merve HF staff commited on
Commit
1fea8fa
·
1 Parent(s): 06870e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -253,7 +253,7 @@ with gr.Blocks(title=title) as demo:
253
  gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/linear_model/plot_ard.html#sphx-glr-auto-examples-linear-model-plot-ard-py)** <br>")
254
 
255
 
256
- with gr.Tab("# Plot true and estimated coefficients"):
257
 
258
  with gr.Row():
259
  n_iter = gr.Slider(value=5, minimum=5, maximum=50, step=1, label="n_iterations")
@@ -266,7 +266,7 @@ with gr.Blocks(title=title) as demo:
266
  One can observe that with the added noise, none of the models can perfectly recover the coefficients of the original model. All models have more thab 10 non-zero coefficients,
267
  where only 10 are useful. The Bayesian Ridge Regression manages to recover most of the coefficients, while the ARD is more conservative.
268
  """)
269
- with gr.Tab("# Plot marginal log likelihoods"):
270
  with gr.Row():
271
  n_iter = gr.Slider(value=5, minimum=5, maximum=50, step=1, label="n_iterations")
272
  btn = gr.Button(value="Plot marginal log likelihoods")
@@ -277,7 +277,7 @@ with gr.Blocks(title=title) as demo:
277
  Both ARD and Bayesian Ridge minimized the log-likelihood upto an arbitrary cuttoff defined the the n_iter parameter.
278
  """
279
  )
280
- with gr.Tab("# Plot bayesian regression with polynomial features"):
281
  with gr.Row():
282
  degree = gr.Slider(value=5, minimum=5, maximum=50, step=1, label="n_degrees")
283
  btn = gr.Button(value="Plot bayesian regression with polynomial features")
 
253
  gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/linear_model/plot_ard.html#sphx-glr-auto-examples-linear-model-plot-ard-py)** <br>")
254
 
255
 
256
+ with gr.Tab("Plot true and estimated coefficients"):
257
 
258
  with gr.Row():
259
  n_iter = gr.Slider(value=5, minimum=5, maximum=50, step=1, label="n_iterations")
 
266
  One can observe that with the added noise, none of the models can perfectly recover the coefficients of the original model. All models have more thab 10 non-zero coefficients,
267
  where only 10 are useful. The Bayesian Ridge Regression manages to recover most of the coefficients, while the ARD is more conservative.
268
  """)
269
+ with gr.Tab("Plot marginal log likelihoods"):
270
  with gr.Row():
271
  n_iter = gr.Slider(value=5, minimum=5, maximum=50, step=1, label="n_iterations")
272
  btn = gr.Button(value="Plot marginal log likelihoods")
 
277
  Both ARD and Bayesian Ridge minimized the log-likelihood upto an arbitrary cuttoff defined the the n_iter parameter.
278
  """
279
  )
280
+ with gr.Tab("Plot bayesian regression with polynomial features"):
281
  with gr.Row():
282
  degree = gr.Slider(value=5, minimum=5, maximum=50, step=1, label="n_degrees")
283
  btn = gr.Button(value="Plot bayesian regression with polynomial features")