Joash2024 commited on
Commit
c1a3f6d
·
1 Parent(s): dcd4f06

feat: add monitoring module

Browse files
Files changed (1) hide show
  1. monitoring.py +97 -0
monitoring.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from datetime import datetime
3
+ import json
4
+ import os
5
+ from collections import defaultdict
6
+ import threading
7
+ import numpy as np
8
+
9
+ class PerformanceMonitor:
10
+ def __init__(self, metrics_file="metrics.json"):
11
+ self.metrics_file = metrics_file
12
+ self.metrics = defaultdict(list)
13
+ self.lock = threading.Lock()
14
+ self._load_metrics()
15
+
16
+ def _load_metrics(self):
17
+ """Load existing metrics from file"""
18
+ if os.path.exists(self.metrics_file):
19
+ try:
20
+ with open(self.metrics_file, 'r') as f:
21
+ self.metrics.update(json.load(f))
22
+ except json.JSONDecodeError:
23
+ pass
24
+
25
+ def _save_metrics(self):
26
+ """Save metrics to file"""
27
+ with self.lock:
28
+ with open(self.metrics_file, 'w') as f:
29
+ json.dump(dict(self.metrics), f)
30
+
31
+ def record_response_time(self, model_id, duration):
32
+ """Record response time for a model"""
33
+ with self.lock:
34
+ self.metrics[f"{model_id}_response_times"].append({
35
+ 'timestamp': datetime.now().isoformat(),
36
+ 'duration': duration
37
+ })
38
+ self._save_metrics()
39
+
40
+ def record_success(self, model_id, success):
41
+ """Record success/failure for a model"""
42
+ with self.lock:
43
+ self.metrics[f"{model_id}_success_rate"].append({
44
+ 'timestamp': datetime.now().isoformat(),
45
+ 'success': success
46
+ })
47
+ self._save_metrics()
48
+
49
+ def record_problem_type(self, problem_type):
50
+ """Record usage of different problem types"""
51
+ with self.lock:
52
+ self.metrics['problem_types'].append({
53
+ 'timestamp': datetime.now().isoformat(),
54
+ 'type': problem_type
55
+ })
56
+ self._save_metrics()
57
+
58
+ def get_statistics(self):
59
+ """Calculate and return performance statistics"""
60
+ stats = {}
61
+
62
+ # Response time statistics
63
+ for model in ['base', 'finetuned']:
64
+ times = [x['duration'] for x in self.metrics.get(f"{model}_response_times", [])]
65
+ if times:
66
+ stats[f"{model}_avg_response_time"] = np.mean(times)
67
+ stats[f"{model}_max_response_time"] = np.max(times)
68
+ stats[f"{model}_min_response_time"] = np.min(times)
69
+
70
+ # Success rate statistics
71
+ for model in ['base', 'finetuned']:
72
+ successes = [x['success'] for x in self.metrics.get(f"{model}_success_rate", [])]
73
+ if successes:
74
+ stats[f"{model}_success_rate"] = sum(successes) / len(successes) * 100
75
+
76
+ # Problem type distribution
77
+ problem_types = [x['type'] for x in self.metrics.get('problem_types', [])]
78
+ if problem_types:
79
+ type_counts = defaultdict(int)
80
+ for ptype in problem_types:
81
+ type_counts[ptype] += 1
82
+ total = len(problem_types)
83
+ stats['problem_type_distribution'] = {
84
+ ptype: (count / total) * 100
85
+ for ptype, count in type_counts.items()
86
+ }
87
+
88
+ return stats
89
+
90
+ def measure_time(func):
91
+ """Decorator to measure function execution time"""
92
+ def wrapper(*args, **kwargs):
93
+ start_time = time.time()
94
+ result = func(*args, **kwargs)
95
+ duration = time.time() - start_time
96
+ return result, duration
97
+ return wrapper