Spaces:
Running
Running
mervenoyan
commited on
Commit
·
b76ba92
1
Parent(s):
b9f93e2
improvements
Browse files
app.py
CHANGED
@@ -22,12 +22,6 @@ from sklearn.pipeline import make_pipeline
|
|
22 |
|
23 |
|
24 |
|
25 |
-
# Example settings
|
26 |
-
n_samples = 300
|
27 |
-
outliers_fraction = 0.15
|
28 |
-
n_outliers = int(outliers_fraction * n_samples)
|
29 |
-
n_inliers = n_samples - n_outliers
|
30 |
-
|
31 |
#### MODELS
|
32 |
|
33 |
def get_groundtruth_model(X, labels):
|
@@ -39,21 +33,26 @@ def get_groundtruth_model(X, labels):
|
|
39 |
return Dummy(labels)
|
40 |
############
|
41 |
# Define datasets
|
42 |
-
|
43 |
-
DATA_MAPPING = {
|
44 |
-
"Central Blob":make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5, **blobs_params)[0],
|
45 |
-
"Two Blobs": make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5], **blobs_params)[0],
|
46 |
-
"Blob with Noise": make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, 0.3], **blobs_params)[0],
|
47 |
-
"Moons": 4.0
|
48 |
-
* (
|
49 |
-
make_moons(n_samples=n_samples, noise=0.05, random_state=0)[0]
|
50 |
-
- np.array([0.5, 0.25])
|
51 |
-
),
|
52 |
-
"Noise": 14.0 * (np.random.RandomState(42).rand(n_samples, 2) - 0.5),
|
53 |
-
}
|
54 |
|
55 |
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
"One-Class SVM": svm.OneClassSVM(nu=outliers_fraction, kernel="rbf", gamma=0.1),
|
58 |
"One-Class SVM (SGD)":make_pipeline(
|
59 |
Nystroem(gamma=0.1, random_state=42, n_components=150),
|
@@ -67,15 +66,19 @@ NAME_CLF_MAPPING = {"Robust covariance": EllipticEnvelope(contamination=outliers
|
|
67 |
),
|
68 |
"Isolation Forest": IsolationForest(contamination=outliers_fraction, random_state=42),
|
69 |
"Local Outlier Factor": LocalOutlierFactor(n_neighbors=35, contamination=outliers_fraction),
|
70 |
-
}
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
79 |
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5, **blobs_params)[0],
|
80 |
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5], **blobs_params)[0],
|
81 |
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, 0.3], **blobs_params)[0],
|
@@ -85,21 +88,8 @@ DATASETS = [
|
|
85 |
- np.array([0.5, 0.25])
|
86 |
),
|
87 |
14.0 * (np.random.RandomState(42).rand(n_samples, 2) - 0.5),
|
88 |
-
]
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
###########
|
93 |
-
|
94 |
-
#### PLOT
|
95 |
-
FIGSIZE = 10,10
|
96 |
-
figure = plt.figure(figsize=(25, 10))
|
97 |
-
i = 1
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
def train_models(selected_data, clf_name):
|
103 |
xx, yy = np.meshgrid(np.linspace(-7, 7, 150), np.linspace(-7, 7, 150))
|
104 |
clf = NAME_CLF_MAPPING[clf_name]
|
105 |
plt.figure(figsize=(len(NAME_CLF_MAPPING) * 2 + 4, 12.5))
|
@@ -107,7 +97,7 @@ def train_models(selected_data, clf_name):
|
|
107 |
|
108 |
plot_num = 1
|
109 |
rng = np.random.RandomState(42)
|
110 |
-
X = DATA_MAPPING[
|
111 |
X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0)
|
112 |
|
113 |
t0 = time.time()
|
@@ -161,11 +151,14 @@ with gr.Blocks() as demo:
|
|
161 |
gr.Markdown(f"## {title}")
|
162 |
gr.Markdown(description)
|
163 |
|
164 |
-
input_models =
|
|
|
165 |
input_data = gr.Radio(
|
166 |
choices=["Central Blob", "Two Blobs", "Blob with Noise", "Moons", "Noise"],
|
167 |
value="Moons"
|
168 |
)
|
|
|
|
|
169 |
counter = 0
|
170 |
|
171 |
|
@@ -176,7 +169,9 @@ with gr.Blocks() as demo:
|
|
176 |
input_model = input_models[counter]
|
177 |
plot = gr.Plot(label=input_model)
|
178 |
fn = partial(train_models, clf_name=input_model)
|
179 |
-
input_data.change(fn=fn, inputs=[input_data], outputs=plot)
|
|
|
|
|
180 |
counter += 1
|
181 |
|
182 |
demo.launch(enable_queue=True, debug=True)
|
|
|
22 |
|
23 |
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
#### MODELS
|
26 |
|
27 |
def get_groundtruth_model(X, labels):
|
|
|
33 |
return Dummy(labels)
|
34 |
############
|
35 |
# Define datasets
|
36 |
+
# Example settings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
|
39 |
+
|
40 |
+
|
41 |
+
#### PLOT
|
42 |
+
FIGSIZE = 10,10
|
43 |
+
figure = plt.figure(figsize=(25, 10))
|
44 |
+
i = 1
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
def train_models(input_data, outliers_fraction, n_samples, clf_name):
|
50 |
+
# n_samples=300
|
51 |
+
# outliers_fraction = 0.15
|
52 |
+
n_outliers = int(outliers_fraction * n_samples)
|
53 |
+
n_inliers = n_samples - n_outliers
|
54 |
+
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
|
55 |
+
NAME_CLF_MAPPING = {"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
|
56 |
"One-Class SVM": svm.OneClassSVM(nu=outliers_fraction, kernel="rbf", gamma=0.1),
|
57 |
"One-Class SVM (SGD)":make_pipeline(
|
58 |
Nystroem(gamma=0.1, random_state=42, n_components=150),
|
|
|
66 |
),
|
67 |
"Isolation Forest": IsolationForest(contamination=outliers_fraction, random_state=42),
|
68 |
"Local Outlier Factor": LocalOutlierFactor(n_neighbors=35, contamination=outliers_fraction),
|
69 |
+
}
|
70 |
+
DATA_MAPPING = {
|
71 |
+
"Central Blob":make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5, **blobs_params)[0],
|
72 |
+
"Two Blobs": make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5], **blobs_params)[0],
|
73 |
+
"Blob with Noise": make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, 0.3], **blobs_params)[0],
|
74 |
+
"Moons": 4.0
|
75 |
+
* (
|
76 |
+
make_moons(n_samples=n_samples, noise=0.05, random_state=0)[0]
|
77 |
+
- np.array([0.5, 0.25])
|
78 |
+
),
|
79 |
+
"Noise": 14.0 * (np.random.RandomState(42).rand(n_samples, 2) - 0.5),
|
80 |
+
}
|
81 |
+
DATASETS = [
|
82 |
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5, **blobs_params)[0],
|
83 |
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5], **blobs_params)[0],
|
84 |
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, 0.3], **blobs_params)[0],
|
|
|
88 |
- np.array([0.5, 0.25])
|
89 |
),
|
90 |
14.0 * (np.random.RandomState(42).rand(n_samples, 2) - 0.5),
|
91 |
+
]
|
92 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
xx, yy = np.meshgrid(np.linspace(-7, 7, 150), np.linspace(-7, 7, 150))
|
94 |
clf = NAME_CLF_MAPPING[clf_name]
|
95 |
plt.figure(figsize=(len(NAME_CLF_MAPPING) * 2 + 4, 12.5))
|
|
|
97 |
|
98 |
plot_num = 1
|
99 |
rng = np.random.RandomState(42)
|
100 |
+
X = DATA_MAPPING[input_data]
|
101 |
X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0)
|
102 |
|
103 |
t0 = time.time()
|
|
|
151 |
gr.Markdown(f"## {title}")
|
152 |
gr.Markdown(description)
|
153 |
|
154 |
+
input_models = ["Robust covariance","One-Class SVM","One-Class SVM (SGD)","Isolation Forest",
|
155 |
+
"Local Outlier Factor"]
|
156 |
input_data = gr.Radio(
|
157 |
choices=["Central Blob", "Two Blobs", "Blob with Noise", "Moons", "Noise"],
|
158 |
value="Moons"
|
159 |
)
|
160 |
+
n_samples = gr.Slider(minimum=100, maximum=500, step=25, label="Number of Samples")
|
161 |
+
outliers_fraction = gr.Slider(minimum=0.1, maximum=0.9, step=0.1, label="Fraction of Outliers")
|
162 |
counter = 0
|
163 |
|
164 |
|
|
|
169 |
input_model = input_models[counter]
|
170 |
plot = gr.Plot(label=input_model)
|
171 |
fn = partial(train_models, clf_name=input_model)
|
172 |
+
input_data.change(fn=fn, inputs=[input_data, outliers_fraction, n_samples], outputs=plot)
|
173 |
+
n_samples.change(fn=fn, inputs=[input_data, outliers_fraction, n_samples], outputs=plot)
|
174 |
+
outliers_fraction.change(fn=fn, inputs=[input_data, outliers_fraction, n_samples], outputs=plot)
|
175 |
counter += 1
|
176 |
|
177 |
demo.launch(enable_queue=True, debug=True)
|