kwabs22 commited on
Commit
932fd0e
·
1 Parent(s): 093cc88

Passing variables in workflow

Browse files
Files changed (1) hide show
  1. app.py +100 -63
app.py CHANGED
@@ -12,6 +12,7 @@ from threading import Thread
12
  import time
13
  import psutil
14
  from sentence_transformers import SentenceTransformer
 
15
 
16
  #Imported Long Variables - comment for each move to search
17
  from relatively_constant_variables import *
@@ -756,6 +757,22 @@ def refresh_file_explorer():
756
 
757
  #-----------------------------------------------------------------------------------------------------------------------------------
758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759
  #-----------------------------------------------------------------------------------------------------------------------------------
760
 
761
  with gr.Blocks() as demo:
@@ -771,9 +788,65 @@ with gr.Blocks() as demo:
771
  with gr.Accordion("Config and Asset Assistance - Click to open", open=False):
772
  with gr.Accordion("Themes to consider before you attempt config", open=False):
773
  gr.HTML("Copy paste any old config to llm and ask to remix is the easiest <br>To bake 'Moral of the story' in you have to be very deliberate")
774
- with gr.Accordion("New Config Proto Assist - Trying to abstract the process into one worflow is beyond me so multiple paths to goal (config) is the aim now", open=False):
775
  gr.HTML("UI can be media and all items can have media")
776
- with gr.Tab("List to Empty Config with Edit support - Linear"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777
  with gr.Accordion("Can copy in the Test Example State Machine tab - only linear path for now", open=False):
778
  gr.Markdown("# Story and Timeline Generator")
779
  gr.Markdown("Click the button to generate a random timeline and story based on UI elements and story events. <br>Ask an LLM to use this to write a story around")
@@ -812,20 +885,40 @@ with gr.Blocks() as demo:
812
 
813
  generate_button.click(generate_story_and_timeline, inputs=[generate_no_story_timeline_points, generate_no_ui_timeline_points, timeline_num_lists_slider, timeline_items_per_list_slider, timeline_include_existing_games, timeline_include_multiplayer], outputs=[timeline_output_with_assets, story_output, game_structure_output_text_with_media, timeline_output_text, timeline_selected_lists_text]) #, generate_no_media_timeline_points, generate_with_media_check], outputs=[timeline_output_with_assets, timeline_output, story_output, game_structure_output_text_with_media, game_structure_output_text])
814
 
815
- with gr.Tab("Prompt Engineering as basis for ideation process"):
816
  gr.HTML("Current Assited workflow idea - Story timeline events suggestions (LLM / Premade List) | Merging events with premade mermaid structures (LLM + Story Text + Mermaid Text) | Edit mermaid till satisfied (LLM + Story Text) | Ask LLM to convert to config (LLM + JSON Text) | Edit config (LLM / User with format assistance or not) | Playtest and go back to mermaaid or config if there are problems")
817
  gr.HTML("Interactive movie (UI interaction or no progress) vs Branching Paths (Maze)")
818
  gr.HTML("Things that can change the workflow - Asset First (Make Asset and make the transitions using LLM), Export First (Custom JS config, Playcanvas, Unreal Engine reverse engineered to this spaces config?) Game Mechanics First (eg. Player Stats, Inventory and NPCS not implemented yet, so traversal type games best aka graph like structures)")
819
  gr.HTML("Config writing = Remix old one, Ask LLM to make one, Endless combination testing using the prompt engineering above or writing by hand (prompt engineering on yourself)")
820
  gr.HTML("Can use song lyrics as thematic source")
821
  gr.HTML("Placeholder for each below prompt getting a Textbox")
822
- for item in Storycraftprompts:
 
 
 
 
823
  input = gr.State(item)
824
- output = gr.Textbox("", label=item)
825
- outputbtn = gr.Button(item).click(fn=llmguide_generate_response, inputs=input, outputs=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
826
 
827
  #with gr.Accordion("Decisions / Timeline Creation to Story to Config Conversation", open=False):
828
- with gr.Tab("Decisions / Timeline Creation to Story to Config Conversation"):
829
  gr.HTML("Structures for interesting timeline progression")
830
  gr.HTML("Placeholder - Considerations - Story from the perspective of Main character or NPC in the LLM genereated story")
831
  mermaideditoriframebtn = gr.Button("Load Mermaid Editor")
@@ -853,62 +946,6 @@ with gr.Blocks() as demo:
853
  with gr.Tab("Themes"):
854
  gr.HTML("")
855
 
856
- with gr.Accordion("Qwen 0.5B as Space Guide Tests", open=False):
857
- with gr.Tab("General FAQ Attempt"):
858
- FAQMainOutput = gr.TextArea(placeholder='Output will show here', value='')
859
- FAQCustomButtonInput = gr.TextArea(lines=1, placeholder='Prompt goes here')
860
-
861
- for category_name, category_prompts in FAQAllprompts.items():
862
- with gr.Accordion(f"General {category_name} Pattern based", open=False):
863
- with gr.Group():
864
- for index, (prompt, _) in enumerate(category_prompts):
865
- button = gr.Button(prompt)
866
- button.click(llmguide_generate_response, inputs=[FAQCustomButtonInput, gr.State(index), gr.State(category_name)], outputs=FAQMainOutput)
867
-
868
- with gr.Tab("General RAG (Pathfinder?) Attempt"):
869
- gr.HTML("https://huggingface.co/spaces/mteb/leaderboard - Source for SOTA - current using all-MiniLM-L6-v2")
870
- gr.HTML("Placeholder for weak RAG Type Charcter interaction test aka input for JSON 'Knowledge Base' Input")
871
- gr.Interface(
872
- fn=process_query,
873
- inputs=[
874
- gr.Textbox(lines=2, placeholder="Enter your question here..."),
875
- gr.Checkbox(label="Use RAG"),
876
- gr.Checkbox(label="Stream output")
877
- ],
878
- outputs=[
879
- gr.Textbox(label="Generated Response"),
880
- gr.Textbox(label="Tokens per second"),
881
- gr.Textbox(label="RAM Usage"),
882
- gr.Textbox(label="Referenced Documents")
883
- ],
884
- title="RAG/Non-RAG Q&A System",
885
- description="Ask a question with or without using RAG. The response is generated using a GPU-accelerated model. RAM usage and referenced document IDs (for RAG) are logged."
886
- )
887
-
888
- with gr.Tab("Any Request to Qwen2-0.5B"):
889
- gr.HTML("Placeholder for https://huggingface.co/h2oai/h2o-danube3-500m-chat-GGUF and https://huggingface.co/OuteAI/Lite-Mistral-150M-v2-Instruct as alternative")
890
- gr.HTML("https://huggingface.co/spaces/HuggingFaceTB/SmolLM-135M-Instruct-WebGPU 125 mdeol to be tested as alternative (and all up to 1.5b - how to delte a model in your code?) - Need to go over the dataset to see how to prompt it - https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus ")
891
- gr.HTML("Placeholder for qwen 2 72b as alternative use checkbox and gradio client api call")
892
- gr.Markdown("# Qwen-0.5B-Instruct Language Model")
893
- gr.Markdown("This demo uses the Qwen-0.5B-Instruct model to generate responses based on your input.")
894
- gr.HTML("Example prompts: <br>I am writing a story about a chef. please write dishes to appear on the menu. <br>What are the most common decisions that a chef story would include? <br>What are the kinds problems that a chef story would include? <br>What are the kinds of out of reach goals that a chef story would include? <br>Continue this config - Paste any complete block of the config")
895
-
896
- with gr.Row():
897
- with gr.Column():
898
- llmguide_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
899
- llmguide_stream_checkbox = gr.Checkbox(label="Enable streaming")
900
- llmguide_submit_button = gr.Button("Generate")
901
-
902
- with gr.Column():
903
- llmguide_output = gr.Textbox(lines=10, label="Generated Response")
904
- llmguide_tokens_per_second = gr.Textbox(label="Tokens per Second")
905
-
906
- llmguide_submit_button.click(
907
- llmguide_generate_response,
908
- inputs=[llmguide_prompt, llmguide_stream_checkbox],
909
- outputs=[llmguide_output, llmguide_tokens_per_second],
910
- )
911
-
912
  with gr.Accordion("Existing Config Crafting Progression - click to open", open=False):
913
  with gr.Accordion("Test for config to gradio components order - ignore for now", open=False ):
914
  gr.HTML("Placeholder for changing the render below to the one above for new config but with the ability to upload files aka the media field should be file uploader / dropdowns for all files that have been uploaded")
 
12
  import time
13
  import psutil
14
  from sentence_transformers import SentenceTransformer
15
+ import textwrap
16
 
17
  #Imported Long Variables - comment for each move to search
18
  from relatively_constant_variables import *
 
757
 
758
  #-----------------------------------------------------------------------------------------------------------------------------------
759
 
760
+ LinPEWFprevious_messages = []
761
+
762
+ def LinPEWFformat_prompt(current_prompt, prev_messages):
763
+ formatted_prompt = textwrap.dedent("""
764
+ Previous prompts and responses:
765
+ {history}
766
+
767
+ Current prompt:
768
+ {current}
769
+
770
+ Please respond to the current prompt, taking into account the context from previous prompts and responses.
771
+ """).strip()
772
+
773
+ history = "\n\n".join(f"Prompt {i+1}: {msg}" for i, msg in enumerate(prev_messages))
774
+ return formatted_prompt.format(history=history, current=current_prompt)
775
+
776
  #-----------------------------------------------------------------------------------------------------------------------------------
777
 
778
  with gr.Blocks() as demo:
 
788
  with gr.Accordion("Config and Asset Assistance - Click to open", open=False):
789
  with gr.Accordion("Themes to consider before you attempt config", open=False):
790
  gr.HTML("Copy paste any old config to llm and ask to remix is the easiest <br>To bake 'Moral of the story' in you have to be very deliberate")
 
791
  gr.HTML("UI can be media and all items can have media")
792
+ with gr.Accordion("Qwen 0.5B as Space Guide Tests", open=False):
793
+ with gr.Tab("General FAQ Attempt"):
794
+ FAQMainOutput = gr.TextArea(placeholder='Output will show here', value='')
795
+ FAQCustomButtonInput = gr.TextArea(lines=1, placeholder='Prompt goes here')
796
+
797
+ for category_name, category_prompts in FAQAllprompts.items():
798
+ with gr.Accordion(f"General {category_name} Pattern based", open=False):
799
+ with gr.Group():
800
+ for index, (prompt, _) in enumerate(category_prompts):
801
+ button = gr.Button(prompt)
802
+ button.click(llmguide_generate_response, inputs=[FAQCustomButtonInput, gr.State(index), gr.State(category_name)], outputs=FAQMainOutput)
803
+
804
+ with gr.Tab("General RAG (Pathfinder?) Attempt"):
805
+ gr.HTML("https://huggingface.co/spaces/mteb/leaderboard - Source for SOTA - current using all-MiniLM-L6-v2")
806
+ gr.HTML("Placeholder for weak RAG Type Charcter interaction test aka input for JSON 'Knowledge Base' Input")
807
+ gr.Interface(
808
+ fn=process_query,
809
+ inputs=[
810
+ gr.Textbox(lines=2, placeholder="Enter your question here..."),
811
+ gr.Checkbox(label="Use RAG"),
812
+ gr.Checkbox(label="Stream output")
813
+ ],
814
+ outputs=[
815
+ gr.Textbox(label="Generated Response"),
816
+ gr.Textbox(label="Tokens per second"),
817
+ gr.Textbox(label="RAM Usage"),
818
+ gr.Textbox(label="Referenced Documents")
819
+ ],
820
+ title="RAG/Non-RAG Q&A System",
821
+ description="Ask a question with or without using RAG. The response is generated using a GPU-accelerated model. RAM usage and referenced document IDs (for RAG) are logged."
822
+ )
823
+
824
+ with gr.Tab("Any Request to Qwen2-0.5B"):
825
+ gr.HTML("Placeholder for https://huggingface.co/h2oai/h2o-danube3-500m-chat-GGUF and https://huggingface.co/OuteAI/Lite-Mistral-150M-v2-Instruct as alternative")
826
+ gr.HTML("https://huggingface.co/spaces/HuggingFaceTB/SmolLM-135M-Instruct-WebGPU 125 mdeol to be tested as alternative (and all up to 1.5b - how to delte a model in your code?) - Need to go over the dataset to see how to prompt it - https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus ")
827
+ gr.HTML("Placeholder for qwen 2 72b as alternative use checkbox and gradio client api call")
828
+ gr.Markdown("# Qwen-0.5B-Instruct Language Model")
829
+ gr.Markdown("This demo uses the Qwen-0.5B-Instruct model to generate responses based on your input.")
830
+ gr.HTML("Example prompts: <br>I am writing a story about a chef. please write dishes to appear on the menu. <br>What are the most common decisions that a chef story would include? <br>What are the kinds problems that a chef story would include? <br>What are the kinds of out of reach goals that a chef story would include? <br>Continue this config - Paste any complete block of the config")
831
+
832
+ with gr.Row():
833
+ with gr.Column():
834
+ llmguide_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
835
+ llmguide_stream_checkbox = gr.Checkbox(label="Enable streaming")
836
+ llmguide_submit_button = gr.Button("Generate")
837
+
838
+ with gr.Column():
839
+ llmguide_output = gr.Textbox(lines=10, label="Generated Response")
840
+ llmguide_tokens_per_second = gr.Textbox(label="Tokens per Second")
841
+
842
+ llmguide_submit_button.click(
843
+ llmguide_generate_response,
844
+ inputs=[llmguide_prompt, llmguide_stream_checkbox],
845
+ outputs=[llmguide_output, llmguide_tokens_per_second],
846
+ )
847
+
848
+ with gr.Accordion("New Config Proto Assist - Trying to abstract the process into one worflow is beyond me so multiple paths to goal (config) is the aim now", open=False):
849
+ with gr.Tab("Linear - List to Empty Config with Edit support"):
850
  with gr.Accordion("Can copy in the Test Example State Machine tab - only linear path for now", open=False):
851
  gr.Markdown("# Story and Timeline Generator")
852
  gr.Markdown("Click the button to generate a random timeline and story based on UI elements and story events. <br>Ask an LLM to use this to write a story around")
 
885
 
886
  generate_button.click(generate_story_and_timeline, inputs=[generate_no_story_timeline_points, generate_no_ui_timeline_points, timeline_num_lists_slider, timeline_items_per_list_slider, timeline_include_existing_games, timeline_include_multiplayer], outputs=[timeline_output_with_assets, story_output, game_structure_output_text_with_media, timeline_output_text, timeline_selected_lists_text]) #, generate_no_media_timeline_points, generate_with_media_check], outputs=[timeline_output_with_assets, timeline_output, story_output, game_structure_output_text_with_media, game_structure_output_text])
887
 
888
+ with gr.Tab("Linear - Prompt Engineering as basis for ideation process"):
889
  gr.HTML("Current Assited workflow idea - Story timeline events suggestions (LLM / Premade List) | Merging events with premade mermaid structures (LLM + Story Text + Mermaid Text) | Edit mermaid till satisfied (LLM + Story Text) | Ask LLM to convert to config (LLM + JSON Text) | Edit config (LLM / User with format assistance or not) | Playtest and go back to mermaaid or config if there are problems")
890
  gr.HTML("Interactive movie (UI interaction or no progress) vs Branching Paths (Maze)")
891
  gr.HTML("Things that can change the workflow - Asset First (Make Asset and make the transitions using LLM), Export First (Custom JS config, Playcanvas, Unreal Engine reverse engineered to this spaces config?) Game Mechanics First (eg. Player Stats, Inventory and NPCS not implemented yet, so traversal type games best aka graph like structures)")
892
  gr.HTML("Config writing = Remix old one, Ask LLM to make one, Endless combination testing using the prompt engineering above or writing by hand (prompt engineering on yourself)")
893
  gr.HTML("Can use song lyrics as thematic source")
894
  gr.HTML("Placeholder for each below prompt getting a Textbox")
895
+ # for item in Storycraftprompts:
896
+ # input = gr.State(item)
897
+ # output = gr.Textbox("", label=item)
898
+ # outputbtn = gr.Button(item).click(fn=llmguide_generate_response, inputs=input, outputs=output)
899
+ for i, item in enumerate(Storycraftprompts, 1):
900
  input = gr.State(item)
901
+ previous_input = gr.State(lambda: LinPEWFprevious_messages)
902
+ output = gr.Textbox("", label=f"Output {i}")
903
+
904
+ def LinPEWF_update_and_generate(prompt, prev_msgs):
905
+ prev_msgs.append(prompt)
906
+ formatted_prompt = LinPEWFformat_prompt(prompt, prev_msgs)
907
+ response = llmguide_generate_response(formatted_prompt)
908
+ prev_msgs.append(f"Response: {response}")
909
+ return response
910
+
911
+ outputbtn = gr.Button(f"Generate {i}").click(
912
+ fn=LinPEWF_update_and_generate,
913
+ inputs=[input, previous_input],
914
+ outputs=output
915
+ )
916
+
917
+ LinPEWFprevious_messages.append(item)
918
+
919
 
920
  #with gr.Accordion("Decisions / Timeline Creation to Story to Config Conversation", open=False):
921
+ with gr.Tab("Branching - Decisions / Timeline Creation to Story to Config Conversation"):
922
  gr.HTML("Structures for interesting timeline progression")
923
  gr.HTML("Placeholder - Considerations - Story from the perspective of Main character or NPC in the LLM genereated story")
924
  mermaideditoriframebtn = gr.Button("Load Mermaid Editor")
 
946
  with gr.Tab("Themes"):
947
  gr.HTML("")
948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
949
  with gr.Accordion("Existing Config Crafting Progression - click to open", open=False):
950
  with gr.Accordion("Test for config to gradio components order - ignore for now", open=False ):
951
  gr.HTML("Placeholder for changing the render below to the one above for new config but with the ability to upload files aka the media field should be file uploader / dropdowns for all files that have been uploaded")