liqiang888 commited on
Commit
bc7b305
·
verified ·
1 Parent(s): be638b6

Delete data_modeling/evaluation

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data_modeling/evaluation/amazon-employee-access-challenge_eval.py +0 -37
  2. data_modeling/evaluation/bike-sharing-demand_eval.py +0 -29
  3. data_modeling/evaluation/cat-in-the-dat-ii_eval.py +0 -25
  4. data_modeling/evaluation/cat-in-the-dat_eval.py +0 -25
  5. data_modeling/evaluation/commonlitreadabilityprize_eval.py +0 -28
  6. data_modeling/evaluation/conways-reverse-game-of-life-2020_eval.py +0 -34
  7. data_modeling/evaluation/covid19-global-forecasting-week-1_eval.py +0 -28
  8. data_modeling/evaluation/covid19-global-forecasting-week-2_eval.py +0 -28
  9. data_modeling/evaluation/covid19-global-forecasting-week-3_eval.py +0 -28
  10. data_modeling/evaluation/covid19-global-forecasting-week-4_eval.py +0 -28
  11. data_modeling/evaluation/covid19-global-forecasting-week-5_eval.py +0 -28
  12. data_modeling/evaluation/demand-forecasting-kernels-only_eval.py +0 -30
  13. data_modeling/evaluation/dont-overfit-ii_eval.py +0 -31
  14. data_modeling/evaluation/feedback-prize-english-language-learning_eval.py +0 -37
  15. data_modeling/evaluation/google-quest-challenge_eval.py +0 -45
  16. data_modeling/evaluation/instant-gratification_eval.py +0 -43
  17. data_modeling/evaluation/learning-agency-lab-automated-essay-scoring-2_eval.py +0 -45
  18. data_modeling/evaluation/liverpool-ion-switching_eval.py +0 -46
  19. data_modeling/evaluation/lmsys-chatbot-arena_eval.py +0 -42
  20. data_modeling/evaluation/microsoft-malware-prediction_eval.py +0 -44
  21. data_modeling/evaluation/nlp-getting-started_eval.py +0 -37
  22. data_modeling/evaluation/playground-series-s3e10_eval.py +0 -32
  23. data_modeling/evaluation/playground-series-s3e11_eval.py +0 -27
  24. data_modeling/evaluation/playground-series-s3e12_eval.py +0 -35
  25. data_modeling/evaluation/playground-series-s3e13_eval.py +0 -57
  26. data_modeling/evaluation/playground-series-s3e14_eval.py +0 -30
  27. data_modeling/evaluation/playground-series-s3e16_eval.py +0 -33
  28. data_modeling/evaluation/playground-series-s3e17_eval.py +0 -29
  29. data_modeling/evaluation/playground-series-s3e18_eval.py +0 -29
  30. data_modeling/evaluation/playground-series-s3e19_eval.py +0 -34
  31. data_modeling/evaluation/playground-series-s3e1_eval.py +0 -38
  32. data_modeling/evaluation/playground-series-s3e20_eval.py +0 -36
  33. data_modeling/evaluation/playground-series-s3e22_eval.py +0 -45
  34. data_modeling/evaluation/playground-series-s3e23_eval.py +0 -37
  35. data_modeling/evaluation/playground-series-s3e24_eval.py +0 -32
  36. data_modeling/evaluation/playground-series-s3e25_eval.py +0 -29
  37. data_modeling/evaluation/playground-series-s3e2_eval.py +0 -26
  38. data_modeling/evaluation/playground-series-s3e3_eval.py +0 -35
  39. data_modeling/evaluation/playground-series-s3e4_eval.py +0 -26
  40. data_modeling/evaluation/playground-series-s3e5_eval.py +0 -61
  41. data_modeling/evaluation/playground-series-s3e6_eval.py +0 -28
  42. data_modeling/evaluation/playground-series-s3e7_eval.py +0 -30
  43. data_modeling/evaluation/playground-series-s3e8_eval.py +0 -26
  44. data_modeling/evaluation/playground-series-s3e9_eval.py +0 -42
  45. data_modeling/evaluation/playground-series-s4e1_eval.py +0 -29
  46. data_modeling/evaluation/playground-series-s4e2_eval.py +0 -37
  47. data_modeling/evaluation/playground-series-s4e3_eval.py +0 -50
  48. data_modeling/evaluation/playground-series-s4e4_eval.py +0 -29
  49. data_modeling/evaluation/playground-series-s4e5_eval.py +0 -30
  50. data_modeling/evaluation/playground-series-s4e6_eval.py +0 -31
data_modeling/evaluation/amazon-employee-access-challenge_eval.py DELETED
@@ -1,37 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="ACTION")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
-
24
- # 定义 RMSLE 计算函数
25
- def rmsle(y_true, y_pred):
26
- return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2))
27
-
28
-
29
- actual = pd.read_csv(args.answer_file)
30
- submission = pd.read_csv(args.predict_file)
31
-
32
- performance = roc_auc_score(actual[args.value], submission[args.value])
33
-
34
-
35
-
36
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
37
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/bike-sharing-demand_eval.py DELETED
@@ -1,29 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
- # 计算RMSLE
8
- def rmsle(predicted, actual):
9
- sum_log_diff = np.sum((np.log(predicted + 1) - np.log(actual + 1)) ** 2)
10
- mean_log_diff = sum_log_diff / len(predicted)
11
- return np.sqrt(mean_log_diff)
12
-
13
- parser = argparse.ArgumentParser()
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="count")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv(args.predict_file)
25
-
26
- performance = rmsle(predictions[args.value], answers[args.value])
27
-
28
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
29
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/cat-in-the-dat-ii_eval.py DELETED
@@ -1,25 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import roc_auc_score
7
-
8
-
9
- parser = argparse.ArgumentParser()
10
- parser.add_argument('--path', type=str, required=True)
11
- parser.add_argument('--name', type=str, required=True)
12
- parser.add_argument('--answer_file', type=str, required=True)
13
- parser.add_argument('--predict_file', type=str, required=True)
14
-
15
- parser.add_argument('--value', type=str, default="target")
16
-
17
- args = parser.parse_args()
18
-
19
- answers = pd.read_csv( args.answer_file)
20
- predictions = pd.read_csv(args.predict_file)
21
-
22
- performance = roc_auc_score(answers[args.value], predictions[args.value])
23
-
24
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
25
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/cat-in-the-dat_eval.py DELETED
@@ -1,25 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import roc_auc_score
7
-
8
-
9
- parser = argparse.ArgumentParser()
10
- parser.add_argument('--path', type=str, required=True)
11
- parser.add_argument('--name', type=str, required=True)
12
- parser.add_argument('--answer_file', type=str, required=True)
13
- parser.add_argument('--predict_file', type=str, required=True)
14
-
15
- parser.add_argument('--value', type=str, default="target")
16
-
17
- args = parser.parse_args()
18
-
19
- answers = pd.read_csv(args.answer_file)
20
- predictions = pd.read_csv( args.predict_file)
21
-
22
- performance = roc_auc_score(answers[args.value], predictions[args.value])
23
-
24
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
25
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/commonlitreadabilityprize_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
-
8
- parser = argparse.ArgumentParser()
9
-
10
- parser.add_argument('--path', type=str, required=True)
11
- parser.add_argument('--name', type=str, required=True)
12
- parser.add_argument('--answer_file', type=str, required=True)
13
- parser.add_argument('--predict_file', type=str, required=True)
14
-
15
- parser.add_argument('--value', type=str, default="target")
16
-
17
- args = parser.parse_args()
18
-
19
- def rmse(targets, predictions):
20
- return np.sqrt(((predictions - targets) ** 2).mean())
21
-
22
- answers = pd.read_csv( args.answer_file)
23
- predictions = pd.read_csv( args.predict_file)
24
-
25
- performance = rmse(answers[args.value], predictions[args.value])
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/conways-reverse-game-of-life-2020_eval.py DELETED
@@ -1,34 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="generated")
19
-
20
- args = parser.parse_args()
21
-
22
- actual = pd.read_csv( args.answer_file)
23
- submission = pd.read_csv(args.predict_file)
24
-
25
- # 移除id列,剩下的是矩阵的值
26
- submission_values = submission.drop(columns=['id']).values
27
- actual_values = actual.drop(columns=['id']).values
28
-
29
- # 计算平均绝对误差
30
- performance = np.mean(np.abs(submission_values - actual_values))
31
-
32
-
33
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
34
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/covid19-global-forecasting-week-1_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
- def rmsle(predictions, actuals):
8
- rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2))
9
- rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2))
10
- return (rmsle_confirmed + rmsle_fatalities) / 2
11
-
12
- parser = argparse.ArgumentParser()
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="count")
19
-
20
- args = parser.parse_args()
21
-
22
- answers = pd.read_csv( args.answer_file)
23
- predictions = pd.read_csv( args.predict_file)
24
-
25
- performance = rmsle(predictions, answers)
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/covid19-global-forecasting-week-2_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
- def rmsle(predictions, actuals):
8
- rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2))
9
- rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2))
10
- return (rmsle_confirmed + rmsle_fatalities) / 2
11
-
12
- parser = argparse.ArgumentParser()
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="count")
19
-
20
- args = parser.parse_args()
21
-
22
- answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
23
- predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
24
-
25
- performance = rmsle(predictions, answers)
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/covid19-global-forecasting-week-3_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
- def rmsle(predictions, actuals):
8
- rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2))
9
- rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2))
10
- return (rmsle_confirmed + rmsle_fatalities) / 2
11
-
12
- parser = argparse.ArgumentParser()
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="count")
19
-
20
- args = parser.parse_args()
21
-
22
- answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
23
- predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
24
-
25
- performance = rmsle(predictions, answers)
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/covid19-global-forecasting-week-4_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
- def rmsle(predictions, actuals):
8
- rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2))
9
- rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2))
10
- return (rmsle_confirmed + rmsle_fatalities) / 2
11
-
12
- parser = argparse.ArgumentParser()
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="count")
19
-
20
- args = parser.parse_args()
21
-
22
- answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
23
- predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
24
-
25
- performance = rmsle(predictions, answers)
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/covid19-global-forecasting-week-5_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
- def rmsle(predictions, actuals):
8
- rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2))
9
- rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2))
10
- return (rmsle_confirmed + rmsle_fatalities) / 2
11
-
12
- parser = argparse.ArgumentParser()
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="count")
19
-
20
- args = parser.parse_args()
21
-
22
- answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
23
- predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
24
-
25
- performance = rmsle(predictions, answers)
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/demand-forecasting-kernels-only_eval.py DELETED
@@ -1,30 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- # 定义 SMAPE 计算函数
7
- def smape(y_true, y_pred):
8
- return 100/len(y_true) * np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred)))
9
-
10
-
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="sales")
20
-
21
- args = parser.parse_args()
22
-
23
-
24
- answers = pd.read_csv(args.answer_file)
25
- predictions = pd.read_csv(args.predict_file)
26
-
27
- performance = smape(answers[args.value], predictions[args.value])
28
-
29
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
30
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/dont-overfit-ii_eval.py DELETED
@@ -1,31 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="target")
19
-
20
- args = parser.parse_args()
21
-
22
- actual = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
23
- submission = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
24
-
25
-
26
- # 计算平均绝对误差
27
- performance = roc_auc_score(actual[args.value], submission[args.value])
28
-
29
-
30
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
31
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/feedback-prize-english-language-learning_eval.py DELETED
@@ -1,37 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import roc_auc_score
7
-
8
- parser = argparse.ArgumentParser()
9
-
10
- parser.add_argument('--path', type=str, required=True)
11
- parser.add_argument('--name', type=str, required=True)
12
- parser.add_argument('--answer_file', type=str, required=True)
13
- parser.add_argument('--predict_file', type=str, required=True)
14
- parser.add_argument('--value', type=str, default="place_id")
15
-
16
- args = parser.parse_args()
17
-
18
- actual = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
19
- submission = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
20
-
21
- def mcrmse(y_true, y_pred):
22
- """
23
- 计算Mean Columnwise Root Mean Squared Error (MCRMSE)
24
- """
25
- assert y_true.shape == y_pred.shape, "The shapes of true and predicted values do not match"
26
- columnwise_rmse = np.sqrt(((y_true - y_pred) ** 2).mean(axis=0))
27
- return columnwise_rmse.mean()
28
-
29
- # 提取实际标签和预测结果
30
- actual_values = actual.iloc[:, 1:].values # 假设实际标签文件中第一列是text_id,后面是实际标签值
31
- predicted_values = submission.iloc[:, 1:].values # 假设提交文件中第一列是text_id,后面是预测标签值
32
-
33
- # 计算MAP@3
34
- performance = mcrmse(actual_values, predicted_values)
35
-
36
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
37
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/google-quest-challenge_eval.py DELETED
@@ -1,45 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
- from scipy.stats import spearmanr
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="place_id")
20
-
21
- args = parser.parse_args()
22
-
23
- actual = pd.read_csv(os.path.join(args.path, args.name, args.answer_file))
24
- submission = pd.read_csv(os.path.join(args.path, args.name, args.predict_file))
25
- def mean_spearmanr(y_true, y_pred):
26
- """
27
- 计算每列的Spearman's rank correlation coefficient,并取平均值
28
- """
29
- assert y_true.shape == y_pred.shape, "The shapes of true and predicted values do not match"
30
- correlations = []
31
- for col in range(y_true.shape[1]):
32
- corr, _ = spearmanr(y_true[:, col], y_pred[:, col])
33
- correlations.append(corr)
34
- return sum(correlations) / len(correlations)
35
-
36
-
37
- # 提取实际标签和预测结果
38
- actual_values = actual.iloc[:, 1:].values # 假设实际标签文件中第一列是qa_id,后面是实际标签值
39
- predicted_values = submission.iloc[:, 1:].values # 假设提交文件中第一列是qa_id,后面是预测标签值
40
- # 计算MAP@3
41
- performance = mean_spearmanr(actual_values, predicted_values)
42
-
43
-
44
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
45
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/instant-gratification_eval.py DELETED
@@ -1,43 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="target")
19
-
20
- args = parser.parse_args()
21
-
22
- # Compute MAE
23
- def mean_absolute_error(y_true, y_pred):
24
- return np.mean(np.abs(y_pred - y_true))
25
-
26
-
27
- answers = pd.read_csv(args.answer_file)
28
- predictions = pd.read_csv(args.predict_file)
29
-
30
- y_true = answers[args.value].values
31
- y_pred = predictions[args.value].values
32
-
33
-
34
-
35
-
36
- performance = roc_auc_score(y_true, y_pred)
37
-
38
-
39
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
40
- f.write(str(performance))
41
-
42
-
43
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/learning-agency-lab-automated-essay-scoring-2_eval.py DELETED
@@ -1,45 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
- from sklearn.metrics import cohen_kappa_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="score")
20
-
21
- args = parser.parse_args()
22
-
23
- # Compute MAE
24
- def mean_absolute_error(y_true, y_pred):
25
- return np.mean(np.abs(y_pred - y_true))
26
-
27
-
28
- answers = pd.read_csv(args.answer_file)
29
- predictions = pd.read_csv( args.predict_file)
30
-
31
- answers = answers.sort_values('essay_id')
32
- predictions = predictions.sort_values('essay_id')
33
-
34
-
35
- y_true = answers[args.value].values
36
- y_pred = predictions[args.value].values
37
-
38
-
39
-
40
-
41
- performance = cohen_kappa_score(y_true, y_pred, weights='quadratic')
42
-
43
-
44
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
45
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/liverpool-ion-switching_eval.py DELETED
@@ -1,46 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
- from sklearn.metrics import cohen_kappa_score
11
- from sklearn.metrics import f1_score
12
-
13
- parser = argparse.ArgumentParser()
14
-
15
- parser.add_argument('--path', type=str, required=True)
16
- parser.add_argument('--name', type=str, required=True)
17
- parser.add_argument('--answer_file', type=str, required=True)
18
- parser.add_argument('--predict_file', type=str, required=True)
19
-
20
- parser.add_argument('--value', type=str, default="open_channels")
21
-
22
- args = parser.parse_args()
23
-
24
- # Compute MAE
25
- def mean_absolute_error(y_true, y_pred):
26
- return np.mean(np.abs(y_pred - y_true))
27
-
28
-
29
- answers = pd.read_csv( args.answer_file)
30
- predictions = pd.read_csv(args.predict_file)
31
-
32
- answers = answers.sort_values('time')
33
- predictions = predictions.sort_values('time')
34
-
35
-
36
- y_true = answers[args.value].values
37
- y_pred = predictions[args.value].values
38
-
39
-
40
-
41
-
42
- performance = f1_score(y_true, y_pred, average='macro')
43
-
44
-
45
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
46
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/lmsys-chatbot-arena_eval.py DELETED
@@ -1,42 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
- from sklearn.metrics import log_loss
9
-
10
- from sklearn.metrics import roc_auc_score
11
- from sklearn.metrics import cohen_kappa_score
12
-
13
- parser = argparse.ArgumentParser()
14
-
15
- parser.add_argument('--path', type=str, required=True)
16
- parser.add_argument('--name', type=str, required=True)
17
- parser.add_argument('--answer_file', type=str, required=True)
18
- parser.add_argument('--predict_file', type=str, required=True)
19
-
20
- parser.add_argument('--value', type=str, default="score")
21
-
22
- args = parser.parse_args()
23
-
24
- # Compute MAE
25
- def mean_absolute_error(y_true, y_pred):
26
- return np.mean(np.abs(y_pred - y_true))
27
-
28
-
29
- actual = pd.read_csv( args.answer_file)
30
- submission = pd.read_csv(args.predict_file)
31
-
32
- # 提取实际值和预测值
33
- actual_values = actual[['winner_model_a', 'winner_model_b', 'winner_tie']].values
34
- predicted_values = submission[['winner_model_a', 'winner_model_b', 'winner_tie']].values
35
-
36
-
37
-
38
- performance = log_loss(actual_values, predicted_values)
39
-
40
-
41
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
42
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/microsoft-malware-prediction_eval.py DELETED
@@ -1,44 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="HasDetections")
19
-
20
- args = parser.parse_args()
21
-
22
- # Compute MAE
23
- def mean_absolute_error(y_true, y_pred):
24
- return np.mean(np.abs(y_pred - y_true))
25
-
26
-
27
- answers = pd.read_csv(args.answer_file)
28
- predictions = pd.read_csv(args.predict_file)
29
- answers.sort_values(by=["MachineIdentifier"])
30
- predictions.sort_values(by=['MachineIdentifier'])
31
- y_true = answers[args.value].values
32
- y_pred = predictions[args.value].values
33
-
34
-
35
-
36
-
37
- performance = roc_auc_score(y_true, y_pred)
38
-
39
-
40
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
41
- f.write(str(performance))
42
-
43
-
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/nlp-getting-started_eval.py DELETED
@@ -1,37 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import f1_score
7
-
8
-
9
- # 计算多类对数损失
10
- def multiclass_logloss(actuals, predictions):
11
- epsilon = 1e-15 # 避免对数运算中的数值问题
12
- predictions = np.clip(predictions, epsilon, 1 - epsilon) # 限制预测概率的范围,防止对数为无穷
13
- predictions /= predictions.sum(axis=1)[:, np.newaxis] # 归一化确保总和为1
14
- log_pred = np.log(predictions)
15
- loss = -np.sum(actuals * log_pred) / len(actuals)
16
- return loss
17
-
18
-
19
- parser = argparse.ArgumentParser()
20
-
21
- parser.add_argument('--path', type=str, required=True)
22
- parser.add_argument('--name', type=str, required=True)
23
- parser.add_argument('--answer_file', type=str, required=True)
24
- parser.add_argument('--predict_file', type=str, required=True)
25
-
26
- parser.add_argument('--value', type=str, default="target")
27
-
28
- args = parser.parse_args()
29
-
30
-
31
- answers = pd.read_csv( args.answer_file)
32
- predictions = pd.read_csv( args.predict_file)
33
-
34
- performance = f1_score(answers[args.value], predictions[args.value])
35
-
36
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
37
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e10_eval.py DELETED
@@ -1,32 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import log_loss
8
-
9
-
10
- parser = argparse.ArgumentParser()
11
-
12
- parser.add_argument('--path', type=str, required=True)
13
- parser.add_argument('--name', type=str, required=True)
14
- parser.add_argument('--answer_file', type=str, required=True)
15
- parser.add_argument('--predict_file', type=str, required=True)
16
-
17
- parser.add_argument('--value', type=str, default="Class")
18
-
19
- args = parser.parse_args()
20
-
21
- answers = pd.read_csv( args.answer_file)
22
- predictions = pd.read_csv( args.predict_file)
23
- answers.sort_values(by=['id'])
24
- predictions.sort_values(by=['id'])
25
- if "Strength" in predictions:
26
- performance = log_loss(answers[args.value], predictions["Strength"])
27
- else:
28
- performance = log_loss(answers[args.value], predictions[args.value])
29
-
30
-
31
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
32
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e11_eval.py DELETED
@@ -1,27 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
-
9
-
10
- parser = argparse.ArgumentParser()
11
-
12
- parser.add_argument('--path', type=str, required=True)
13
- parser.add_argument('--name', type=str, required=True)
14
- parser.add_argument('--answer_file', type=str, required=True)
15
- parser.add_argument('--predict_file', type=str, required=True)
16
-
17
- parser.add_argument('--value', type=str, default="cost")
18
-
19
- args = parser.parse_args()
20
-
21
- answers = pd.read_csv(args.answer_file)
22
- predictions = pd.read_csv( args.predict_file)
23
-
24
- performance = np.sqrt(mean_squared_log_error(answers[args.value], predictions[args.value]))
25
-
26
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
27
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e12_eval.py DELETED
@@ -1,35 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="target")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
- actual = pd.read_csv(args.answer_file)
24
- submission = pd.read_csv(args.predict_file)
25
-
26
- actual.sort_values(by=['id'])
27
- submission.sort_values(by=['id'])
28
-
29
- # 计算平均错误率
30
- performance = roc_auc_score(actual[args.value], submission[args.value])
31
-
32
-
33
-
34
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
35
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e13_eval.py DELETED
@@ -1,57 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', default='', type=str, required=False)
14
- parser.add_argument('--name', default='', type=str, required=False)
15
- parser.add_argument('--answer_file', default='/Users/tencentintern/PycharmProjects/DSBench/kaggle_data/data_filted_csv/answers/playground-series-s3e13/test_answer.csv', type=str, required=False)
16
- parser.add_argument('--predict_file', default='/Users/tencentintern/PycharmProjects/DSBench/kaggle_data/data_filted_csv/answers/playground-series-s3e13/test_answer.csv', type=str, required=False)
17
-
18
- parser.add_argument('--value', type=str, default="prognosis")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
- actual = pd.read_csv(args.answer_file)
24
- submission = pd.read_csv(args.predict_file)
25
-
26
- actual.sort_values(by=['id'])
27
- submission.sort_values(by=['id'])
28
-
29
-
30
- def mpa_at_3(actual, predictions):
31
- """
32
- Calculate Mean Percentage Agreement at 3 (MPA@3).
33
-
34
- Parameters:
35
- actual (list): List of actual prognosis values.
36
- predictions (list of lists): List of lists containing up to 3 predicted prognosis values.
37
-
38
- Returns:
39
- float: The MPA@3 score.
40
- """
41
- total = len(actual)
42
- score = 0.0
43
-
44
- for act, preds in zip(actual, predictions):
45
- preds = preds.split()
46
- if act in preds[:3]:
47
- score += 1
48
-
49
- return score / total
50
-
51
- # 计算平均错误率
52
- performance = mpa_at_3(actual[args.value], submission[args.value])
53
- print(performance)
54
-
55
-
56
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
57
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e14_eval.py DELETED
@@ -1,30 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
-
8
- parser = argparse.ArgumentParser()
9
-
10
- parser.add_argument('--path', type=str, required=True)
11
- parser.add_argument('--name', type=str, required=True)
12
- parser.add_argument('--answer_file', type=str, required=True)
13
- parser.add_argument('--predict_file', type=str, required=True)
14
-
15
- parser.add_argument('--value', type=str, default="yield")
16
-
17
- args = parser.parse_args()
18
-
19
- # Compute MAE
20
- def mean_absolute_error(y_true, y_pred):
21
- return np.mean(np.abs(y_pred - y_true))
22
-
23
-
24
- answers = pd.read_csv( args.answer_file)
25
- predictions = pd.read_csv( args.predict_file)
26
-
27
- performance = mean_absolute_error(answers[args.value], predictions[args.value])
28
-
29
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
30
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e16_eval.py DELETED
@@ -1,33 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="yield")
19
-
20
- args = parser.parse_args()
21
-
22
- answers = pd.read_csv( args.answer_file)
23
- predictions = pd.read_csv( args.predict_file)
24
-
25
- answers.sort_values(by=['id'])
26
- predictions.sort_values(by=['id'])
27
- if 'Age' in predictions:
28
- performance = mean_absolute_error(answers['Age'], predictions['Age'])
29
- else:
30
- performance = mean_absolute_error(answers['Age'], predictions[args.value])
31
-
32
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
33
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e17_eval.py DELETED
@@ -1,29 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="Machine failure")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv( args.predict_file)
25
-
26
- performance = roc_auc_score(answers[args.value], predictions[args.value])
27
-
28
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
29
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e18_eval.py DELETED
@@ -1,29 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="Machine failure")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv(args.predict_file)
25
-
26
- performance = (roc_auc_score(answers['EC1'], predictions['EC1']) + roc_auc_score(answers['EC2'], predictions['EC2'])) / 2
27
-
28
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
29
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e19_eval.py DELETED
@@ -1,34 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="Machine failure")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv( args.predict_file)
25
-
26
- # 提取预测值和实际标签
27
- predicted_values = predictions['num_sold'].values
28
- actual_values = answers['num_sold'].values # 修改列名为answers
29
-
30
-
31
- smape = np.mean(2 * np.abs(predicted_values - actual_values) / (np.abs(actual_values) + np.abs(predicted_values)))
32
- performance = smape
33
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
34
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e1_eval.py DELETED
@@ -1,38 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
- from sklearn.metrics import mean_squared_error
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="MedHouseVal")
20
-
21
- args = parser.parse_args()
22
-
23
-
24
-
25
- # 定义 RMSLE 计算函数
26
- def rmsle(y_true, y_pred):
27
- return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2))
28
-
29
-
30
- actual = pd.read_csv( args.answer_file)
31
- submission = pd.read_csv( args.predict_file)
32
-
33
- performance = np.sqrt(mean_squared_error(actual[args.value], submission[args.value]))
34
-
35
-
36
-
37
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
38
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e20_eval.py DELETED
@@ -1,36 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
- from math import sqrt
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="Machine failure")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv(args.predict_file)
25
-
26
- answers.sort_values(by=['ID_LAT_LON_YEAR_WEEK'])
27
- predictions.sort_values(by=['ID_LAT_LON_YEAR_WEEK'])
28
- # 提取预测值和实际标签
29
- predicted_values = predictions['emission'].values
30
- actual_values = answers['emission'].values # 修改列名为answers
31
-
32
-
33
- smape = sqrt(mean_squared_error(actual_values, predicted_values))
34
- performance = smape
35
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
36
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e22_eval.py DELETED
@@ -1,45 +0,0 @@
1
- from sklearn.metrics import f1_score
2
-
3
-
4
- import os.path
5
-
6
- import numpy as np
7
- import pandas as pd
8
- import argparse
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="outcome")
20
-
21
- args = parser.parse_args()
22
-
23
- # Compute MAE
24
- def mean_absolute_error(y_true, y_pred):
25
- return np.mean(np.abs(y_pred - y_true))
26
-
27
-
28
- answers = pd.read_csv(args.answer_file)
29
- predictions = pd.read_csv(args.predict_file)
30
- answers.sort_values(by=["id"])
31
- predictions.sort_values(by=['id'])
32
- y_true = answers[args.value].values
33
- y_pred = predictions[args.value].values
34
-
35
-
36
-
37
-
38
- performance = f1_score(y_true, y_pred, average='micro')
39
-
40
-
41
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
42
- f.write(str(performance))
43
-
44
-
45
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e23_eval.py DELETED
@@ -1,37 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="defects")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv( args.predict_file)
25
-
26
- answers.sort_values(by=['id'])
27
- predictions.sort_values(by=['id'])
28
- # 提取预测值和实际标签
29
- predicted_values = predictions['defects'].values
30
- actual_values = answers['defects'].values # 修改列名为answers
31
-
32
- # 计算RMSE
33
- rmse = roc_auc_score(actual_values, predicted_values)
34
-
35
- performance = rmse
36
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
37
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e24_eval.py DELETED
@@ -1,32 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="smoking")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv(args.answer_file)
24
- predictions = pd.read_csv( args.predict_file)
25
-
26
- # 提取预测概率和实际标签
27
- predicted_probabilities = predictions['smoking'].values
28
- actual_labels = answers['smoking'].values #
29
-
30
- performance = roc_auc_score(actual_labels, predicted_probabilities)
31
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
32
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e25_eval.py DELETED
@@ -1,29 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import roc_auc_score
7
- from sklearn.metrics import median_absolute_error
8
-
9
-
10
- parser = argparse.ArgumentParser()
11
-
12
- parser.add_argument('--path', type=str, required=True)
13
- parser.add_argument('--name', type=str, required=True)
14
- parser.add_argument('--answer_file', type=str, required=True)
15
- parser.add_argument('--predict_file', type=str, required=True)
16
-
17
- parser.add_argument('--value', type=str, default="Hardness")
18
-
19
- args = parser.parse_args()
20
-
21
- answers = pd.read_csv(args.answer_file)
22
- predictions = pd.read_csv( args.predict_file)
23
-
24
- performance = median_absolute_error(answers[args.value], predictions[args.value])
25
-
26
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
27
- f.write(str(performance))
28
-
29
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e2_eval.py DELETED
@@ -1,26 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import roc_auc_score
7
-
8
-
9
- parser = argparse.ArgumentParser()
10
-
11
- parser.add_argument('--path', type=str, required=True)
12
- parser.add_argument('--name', type=str, required=True)
13
- parser.add_argument('--answer_file', type=str, required=True)
14
- parser.add_argument('--predict_file', type=str, required=True)
15
-
16
- parser.add_argument('--value', type=str, default="stroke")
17
-
18
- args = parser.parse_args()
19
-
20
- answers = pd.read_csv( args.answer_file)
21
- predictions = pd.read_csv( args.predict_file)
22
-
23
- performance = roc_auc_score(answers[args.value], predictions[args.value])
24
-
25
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
26
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e3_eval.py DELETED
@@ -1,35 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="Attrition")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
- actual = pd.read_csv(args.answer_file)
24
- submission = pd.read_csv(args.predict_file)
25
-
26
- actual.sort_values(by=actual.columns[0])
27
- submission.sort_values(by=submission.columns[0])
28
-
29
- # 计算平均错误率
30
- performance = roc_auc_score(actual[args.value], submission[args.value])
31
-
32
-
33
-
34
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
35
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e4_eval.py DELETED
@@ -1,26 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import roc_auc_score
7
-
8
-
9
- parser = argparse.ArgumentParser()
10
-
11
- parser.add_argument('--path', type=str, required=True)
12
- parser.add_argument('--name', type=str, required=True)
13
- parser.add_argument('--answer_file', type=str, required=True)
14
- parser.add_argument('--predict_file', type=str, required=True)
15
-
16
- parser.add_argument('--value', type=str, default="Class")
17
-
18
- args = parser.parse_args()
19
-
20
- answers = pd.read_csv( args.answer_file)
21
- predictions = pd.read_csv(args.predict_file)
22
-
23
- performance = roc_auc_score(answers[args.value], predictions[args.value])
24
-
25
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
26
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e5_eval.py DELETED
@@ -1,61 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="quality")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
- actual = pd.read_csv(args.answer_file)
24
- submission = pd.read_csv(args.predict_file)
25
-
26
- actual.sort_values(by=['Id'])
27
- submission.sort_values(by=['Id'])
28
-
29
- def quadratic_weighted_kappa(actual, predicted, N):
30
- O = np.zeros((N, N), dtype=int)
31
- for a, p in zip(actual, predicted):
32
- O[a][p] += 1
33
-
34
- w = np.zeros((N, N))
35
- for i in range(N):
36
- for j in range(N):
37
- w[i][j] = ((i - j) ** 2) / ((N - 1) ** 2)
38
-
39
- actual_hist = np.zeros(N)
40
- for a in actual:
41
- actual_hist[a] += 1
42
-
43
- pred_hist = np.zeros(N)
44
- for p in predicted:
45
- pred_hist[p] += 1
46
-
47
- E = np.outer(actual_hist, pred_hist)
48
- E = E / E.sum() * O.sum()
49
-
50
- num = (w * O).sum()
51
- den = (w * E).sum()
52
-
53
- return 1 - num / den
54
-
55
- # 计算平均错误率
56
- performance = quadratic_weighted_kappa(actual[args.value], submission[args.value], 10)
57
-
58
-
59
-
60
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
61
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e6_eval.py DELETED
@@ -1,28 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
-
7
-
8
- parser = argparse.ArgumentParser()
9
-
10
- parser.add_argument('--path', type=str, required=True)
11
- parser.add_argument('--name', type=str, required=True)
12
- parser.add_argument('--answer_file', type=str, required=True)
13
- parser.add_argument('--predict_file', type=str, required=True)
14
-
15
- parser.add_argument('--value', type=str, default="price")
16
-
17
- args = parser.parse_args()
18
-
19
- def rmse(targets, predictions):
20
- return np.sqrt(((predictions - targets) ** 2).mean())
21
-
22
- answers = pd.read_csv( args.answer_file)
23
- predictions = pd.read_csv(args.predict_file)
24
-
25
- performance = rmse(answers[args.value], predictions[args.value])
26
-
27
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
28
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e7_eval.py DELETED
@@ -1,30 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="booking_status")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
- answers = pd.read_csv( args.answer_file)
24
- predictions = pd.read_csv(args.predict_file)
25
-
26
- performance = roc_auc_score(answers[args.value], predictions[args.value])
27
-
28
-
29
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
30
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e8_eval.py DELETED
@@ -1,26 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
-
8
-
9
- parser = argparse.ArgumentParser()
10
-
11
- parser.add_argument('--path', type=str, required=True)
12
- parser.add_argument('--name', type=str, required=True)
13
- parser.add_argument('--answer_file', type=str, required=True)
14
- parser.add_argument('--predict_file', type=str, required=True)
15
-
16
- parser.add_argument('--value', type=str, default="price")
17
-
18
- args = parser.parse_args()
19
-
20
- answers = pd.read_csv( args.answer_file)
21
- predictions = pd.read_csv( args.predict_file)
22
-
23
- performance = np.sqrt(mean_squared_error(answers[args.value], predictions[args.value]))
24
-
25
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
26
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s3e9_eval.py DELETED
@@ -1,42 +0,0 @@
1
-
2
-
3
- import os.path
4
-
5
- import numpy as np
6
- import pandas as pd
7
- import argparse
8
-
9
- from sklearn.metrics import roc_auc_score
10
-
11
- parser = argparse.ArgumentParser()
12
-
13
- parser.add_argument('--path', type=str, required=True)
14
- parser.add_argument('--name', type=str, required=True)
15
- parser.add_argument('--answer_file', type=str, required=True)
16
- parser.add_argument('--predict_file', type=str, required=True)
17
-
18
- parser.add_argument('--value', type=str, default="Strength")
19
-
20
- args = parser.parse_args()
21
-
22
-
23
- actual = pd.read_csv(args.answer_file)
24
- submission = pd.read_csv(args.predict_file)
25
-
26
- actual.sort_values(by=['id'])
27
- submission.sort_values(by=['id'])
28
-
29
- def calculate_rmse(actual, predicted):
30
- actual = np.array(actual)
31
- predicted = np.array(predicted)
32
- mse = np.mean((actual - predicted) ** 2)
33
- rmse = np.sqrt(mse)
34
- return rmse
35
-
36
- # 计算平均错误率
37
- performance = calculate_rmse(actual[args.value], submission[args.value])
38
-
39
-
40
-
41
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
42
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s4e1_eval.py DELETED
@@ -1,29 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="Exited")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv(args.answer_file)
24
- predictions = pd.read_csv(args.predict_file)
25
-
26
- performance = roc_auc_score(answers[args.value], predictions[args.value])
27
-
28
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
29
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s4e2_eval.py DELETED
@@ -1,37 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import accuracy_score
7
-
8
-
9
- # 计算多类对数损失
10
- def multiclass_logloss(actuals, predictions):
11
- epsilon = 1e-15 # 避免对数运算中的数值问题
12
- predictions = np.clip(predictions, epsilon, 1 - epsilon) # 限制预测概率的范围,防止对数为无穷
13
- predictions /= predictions.sum(axis=1)[:, np.newaxis] # 归一化确保总和为1
14
- log_pred = np.log(predictions)
15
- loss = -np.sum(actuals * log_pred) / len(actuals)
16
- return loss
17
-
18
-
19
- parser = argparse.ArgumentParser()
20
-
21
- parser.add_argument('--path', type=str, required=True)
22
- parser.add_argument('--name', type=str, required=True)
23
- parser.add_argument('--answer_file', type=str, required=True)
24
- parser.add_argument('--predict_file', type=str, required=True)
25
-
26
- parser.add_argument('--value', type=str, default="NObeyesdad")
27
-
28
- args = parser.parse_args()
29
-
30
-
31
- answers = pd.read_csv(args.answer_file)
32
- predictions = pd.read_csv(args.predict_file)
33
-
34
- performance = accuracy_score(answers[args.value], predictions[args.value])
35
-
36
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
37
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s4e3_eval.py DELETED
@@ -1,50 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import accuracy_score
7
- from sklearn.metrics import roc_auc_score
8
-
9
-
10
- # 计算多类对数损失
11
- def multiclass_logloss(actuals, predictions):
12
- epsilon = 1e-15 # 避免对数运算中的数值问题
13
- predictions = np.clip(predictions, epsilon, 1 - epsilon) # 限制预测概率的范围,防止对数为无穷
14
- predictions /= predictions.sum(axis=1)[:, np.newaxis] # 归一化确保总和为1
15
- log_pred = np.log(predictions)
16
- loss = -np.sum(actuals * log_pred) / len(actuals)
17
- return loss
18
-
19
-
20
- parser = argparse.ArgumentParser()
21
-
22
- parser.add_argument('--path', type=str, required=True)
23
- parser.add_argument('--name', type=str, required=True)
24
- parser.add_argument('--answer_file', type=str, required=True)
25
- parser.add_argument('--predict_file', type=str, required=True)
26
-
27
- parser.add_argument('--value', type=str, default="NObeyesdad")
28
-
29
- args = parser.parse_args()
30
-
31
- actual = pd.read_csv(args.answer_file)
32
- submission = pd.read_csv( args.predict_file)
33
-
34
- # 定义要计算的类别
35
- categories = ['Pastry', 'Z_Scratch', 'K_Scatch', 'Stains', 'Dirtiness', 'Bumps', 'Other_Faults']
36
-
37
- # 提取数据并计算每个类别的 ROC AUC 分数
38
- auc_scores = {}
39
- for category in categories:
40
- y_true = actual[category].values
41
- y_pred = submission[category].values
42
- auc_scores[category] = roc_auc_score(y_true, y_pred)
43
-
44
- # 计算平均 AUC 分数
45
- performance = sum(auc_scores.values()) / len(auc_scores)
46
-
47
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
48
- f.write(str(performance))
49
-
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s4e4_eval.py DELETED
@@ -1,29 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
-
10
- from sklearn.metrics import roc_auc_score
11
-
12
- parser = argparse.ArgumentParser()
13
-
14
- parser.add_argument('--path', type=str, required=True)
15
- parser.add_argument('--name', type=str, required=True)
16
- parser.add_argument('--answer_file', type=str, required=True)
17
- parser.add_argument('--predict_file', type=str, required=True)
18
-
19
- parser.add_argument('--value', type=str, default="Rings")
20
-
21
- args = parser.parse_args()
22
-
23
- answers = pd.read_csv(args.answer_file)
24
- predictions = pd.read_csv(args.predict_file)
25
-
26
- performance = np.sqrt(mean_squared_log_error(answers[args.value], predictions[args.value]))
27
-
28
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
29
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s4e5_eval.py DELETED
@@ -1,30 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
- from sklearn.metrics import r2_score
10
-
11
- from sklearn.metrics import roc_auc_score
12
-
13
- parser = argparse.ArgumentParser()
14
-
15
- parser.add_argument('--path', type=str, required=True)
16
- parser.add_argument('--name', type=str, required=True)
17
- parser.add_argument('--answer_file', type=str, required=True)
18
- parser.add_argument('--predict_file', type=str, required=True)
19
-
20
- parser.add_argument('--value', type=str, default="FloodProbability")
21
-
22
- args = parser.parse_args()
23
-
24
- answers = pd.read_csv(args.answer_file)
25
- predictions = pd.read_csv( args.predict_file)
26
-
27
- performance = r2_score(answers[args.value], predictions[args.value])
28
-
29
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
30
- f.write(str(performance))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_modeling/evaluation/playground-series-s4e6_eval.py DELETED
@@ -1,31 +0,0 @@
1
- import os.path
2
-
3
- import numpy as np
4
- import pandas as pd
5
- import argparse
6
- from sklearn.metrics import mean_squared_error
7
- from sklearn.metrics import mean_squared_log_error
8
- from sklearn.metrics import mean_absolute_error
9
- from sklearn.metrics import r2_score
10
-
11
- from sklearn.metrics import accuracy_score
12
- from sklearn.metrics import roc_auc_score
13
-
14
- parser = argparse.ArgumentParser()
15
-
16
- parser.add_argument('--path', type=str, required=True)
17
- parser.add_argument('--name', type=str, required=True)
18
- parser.add_argument('--answer_file', type=str, required=True)
19
- parser.add_argument('--predict_file', type=str, required=True)
20
-
21
- parser.add_argument('--value', type=str, default="Target")
22
-
23
- args = parser.parse_args()
24
-
25
- answers = pd.read_csv(args.answer_file)
26
- predictions = pd.read_csv(args.predict_file)
27
-
28
- performance = accuracy_score(answers[args.value], predictions[args.value])
29
-
30
- with open(os.path.join(args.path, args.name, "result.txt"), "w") as f:
31
- f.write(str(performance))