Spaces:
Sleeping
Sleeping
victormiller
commited on
Update results.py
Browse files- results.py +6 -5
results.py
CHANGED
@@ -1,11 +1,13 @@
|
|
1 |
from fasthtml.common import *
|
2 |
from fasthtml.components import *
|
3 |
|
4 |
-
|
|
|
|
|
|
|
5 |
quality_est = P("We took one of the model-based data quality evaluation strategies adopted by [DataComp-LM](https://arxiv.org/abs/2406.11794), which used perplexity filtering as a candidate for quality filtering. DataComp-LM followed [CCNet’s](https://arxiv.org/abs/1911.00359) practice to use a 5-gram Kneser-Ney model as implemented in the [KenLM](https://github.com/kpu/kenlm) library for efficient perplexity calculation. Following this practice, we estimated data quality by taking a KenLM model (from [edugp/kenlm](https://huggingface.co/edugp/kenlm)) trained on English Wikipedia data to compute perplexity on data with different duplication patterns. Lower perplexity is regarded as a signal of higher quality.")
|
6 |
sampling_strat = P("We started from a processed Common Crawl (CC) ablation dataset divided by the number of duplicates of each document. For each CC dump, we have different buckets each holding chunks of document with different duplicate count ranges (1-1, 2-5, 6-10, 11-100, 101-1000, 1001-30000000). We sampled the first 10k documents from each chunk with their meta data.")
|
7 |
-
|
8 |
-
perp_v_buckets_img = Img(src="images/prep-diff-buckets-global.png")
|
9 |
perp_v_years = P("Taking the same data, we can convert it into a graph indicating the yearly trend. For most buckets, the average perplexity of dumps from more recent years seem to be lower than that of former years.")
|
10 |
perp_v_years_img = Img(src="images/prep-across-diff-year-global-dup-buckets.png")
|
11 |
perp_v_docdup = P("We can also break each bucket into distinct document counts. The graph becomes a bit noisy at the end because of insufficient samples with larger duplication counts.")
|
@@ -33,8 +35,7 @@ def results():
|
|
33 |
H3("Sampling Strategy"),
|
34 |
sampling_strat,
|
35 |
H3("Perplexity vs Buckets"),
|
36 |
-
|
37 |
-
perp_v_buckets_img,
|
38 |
H3("Perplexity vs Years"),
|
39 |
perp_v_years,
|
40 |
perp_v_years_img,
|
|
|
1 |
from fasthtml.common import *
|
2 |
from fasthtml.components import *
|
3 |
|
4 |
+
perp1_div = Div(
|
5 |
+
perp_v_buckets = P("For each bucket, we aggregated all the chunks that belong to a single year and calculated the average perplexity for each (bucket, year) data point."),
|
6 |
+
perp_v_buckets_img = Img(src="images/prep-diff-buckets-global.png"),
|
7 |
+
)
|
8 |
quality_est = P("We took one of the model-based data quality evaluation strategies adopted by [DataComp-LM](https://arxiv.org/abs/2406.11794), which used perplexity filtering as a candidate for quality filtering. DataComp-LM followed [CCNet’s](https://arxiv.org/abs/1911.00359) practice to use a 5-gram Kneser-Ney model as implemented in the [KenLM](https://github.com/kpu/kenlm) library for efficient perplexity calculation. Following this practice, we estimated data quality by taking a KenLM model (from [edugp/kenlm](https://huggingface.co/edugp/kenlm)) trained on English Wikipedia data to compute perplexity on data with different duplication patterns. Lower perplexity is regarded as a signal of higher quality.")
|
9 |
sampling_strat = P("We started from a processed Common Crawl (CC) ablation dataset divided by the number of duplicates of each document. For each CC dump, we have different buckets each holding chunks of document with different duplicate count ranges (1-1, 2-5, 6-10, 11-100, 101-1000, 1001-30000000). We sampled the first 10k documents from each chunk with their meta data.")
|
10 |
+
|
|
|
11 |
perp_v_years = P("Taking the same data, we can convert it into a graph indicating the yearly trend. For most buckets, the average perplexity of dumps from more recent years seem to be lower than that of former years.")
|
12 |
perp_v_years_img = Img(src="images/prep-across-diff-year-global-dup-buckets.png")
|
13 |
perp_v_docdup = P("We can also break each bucket into distinct document counts. The graph becomes a bit noisy at the end because of insufficient samples with larger duplication counts.")
|
|
|
35 |
H3("Sampling Strategy"),
|
36 |
sampling_strat,
|
37 |
H3("Perplexity vs Buckets"),
|
38 |
+
perp1_div,
|
|
|
39 |
H3("Perplexity vs Years"),
|
40 |
perp_v_years,
|
41 |
perp_v_years_img,
|