Spaces:
Runtime error
Runtime error
File size: 5,047 Bytes
6f59917 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "yqiXEj_uL8kv",
"outputId": "b3591701-bb63-4496-f90d-50d958b32b32"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
" Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"
]
}
],
"source": [
"!pip install -q gradio\n",
"!pip install -q git+https://github.com/huggingface/transformers.git\n"
]
},
{
"cell_type": "code",
"source": [
"import gradio as gr\n",
"import tensorflow as tf\n",
"from transformers import TFGPT2LMHeadModel,GPT2Tokenizer"
],
"metadata": {
"id": "NWyCNUJIMp58"
},
"execution_count": 19,
"outputs": []
},
{
"cell_type": "code",
"source": [
"tokenizer = GPT2Tokenizer.from_pretrained (\"gpt2\")\n",
"model = TFGPT2LMHeadModel.from_pretrained (\"gpt2\" ,pad_token_id=tokenizer.eos_token_id)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "uGE4z27oMuZx",
"outputId": "a26407d4-2628-44d4-be16-f3826a043eb8"
},
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"All PyTorch model weights were used when initializing TFGPT2LMHeadModel.\n",
"\n",
"All the weights of TFGPT2LMHeadModel were initialized from the PyTorch model.\n",
"If your task is similar to the task the model of the checkpoint was trained on, you can already use TFGPT2LMHeadModel for predictions without further training.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"def generate_text(input_Prompt):\n",
" input_ids = tokenizer.encode(input_Prompt, return_tensors='tf')\n",
" beam_output = model.generate(input_ids, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=False)\n",
" output = tokenizer.decode(beam_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)\n",
" return \".\".join(output.split(\".\")[:-1]) + \".\"\n"
],
"metadata": {
"id": "hoKSOw9eMvQt"
},
"execution_count": 16,
"outputs": []
},
{
"cell_type": "code",
"source": [
"output_text = gr.Textbox()\n",
"\n",
"gr. Interface(generate_text,\"textbox\", output_text, title=\"GPT-2\",\n",
"\n",
"description=\"OpenAI's GPT-2 is an unsupervised language model that \\ can generate coherent text. Go ahead and input a sentence and see what it completes \\ it with! Takes around 20s to run.\").launch()"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 648
},
"id": "cM-5NqQ-M1dn",
"outputId": "c52b7b5d-43b1-4bc7-aebe-761ddb8be371"
},
"execution_count": 17,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
"\n",
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
"Running on public URL: https://ac6c205dbfaa7333aa.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
]
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"<IPython.core.display.HTML object>"
],
"text/html": [
"<div><iframe src=\"https://ac6c205dbfaa7333aa.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": []
},
"metadata": {},
"execution_count": 17
}
]
}
]
} |