# import streamlit as st # from transformers import pipeline # # Load the SQLCoder model # sql_generator = pipeline('text-generation', model='defog/sqlcoder') # st.title('SQL Table Extractor') # # Text input for SQL query # user_sql = st.text_input("Enter your SQL statement", "SELECT * FROM my_table WHERE condition;") # # Button to parse SQL # if st.button('Extract Tables'): # # Generate SQL or parse directly # results = sql_generator(user_sql) # # Assuming results contain SQL, extract table names (this part may require custom logic based on output) # tables = extract_tables_from_sql(results) # # Display extracted table names # st.write('Extracted Tables:', tables) # def extract_tables_from_sql(sql): # # Dummy function: Implement logic to parse table names from SQL # return ["my_table"] # Example output # import streamlit as st # from transformers import pipeline # # Load the NER model # ner = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", grouped_entities=True) # st.title('Hello World NER Parser') # # User input for text # user_input = st.text_area("Enter a sentence to parse for named entities:", "John Smith lives in San Francisco.") # # Parse entities # if st.button('Parse'): # entities = ner(user_input) # # Display extracted entities # for entity in entities: # st.write(f"Entity: {entity['word']}, Entity Type: {entity['entity_group']}") import streamlit as st from transformers import pipeline # Load CodeBERT model as a feature extractor # (Note: You may need to adjust the task if using CodeBERT for other specific purposes) codebert = pipeline("feature-extraction", model="microsoft/codebert-base") st.title('CodeBERT Feature Extractor') # User input for text user_input = st.text_area("Enter code or text to extract features:", "SELECT * FROM users;") # Extract features if st.button('Extract Features'): features = codebert(user_input) # Display extracted features (example: show size of feature vector for demonstration) st.write('Number of features extracted:', len(features[0][0]))