Spaces:
Sleeping
Sleeping
| import requests | |
| import json | |
| import streamlit as st | |
| response = None | |
| def create_model(name, modelname, prompt_data): | |
| payload = json.dumps({ | |
| "name": name, | |
| "modelfile": f"FROM {modelname}\nSYSTEM {prompt_data}" | |
| }) | |
| response = requests.request("POST", inp_url+"create", headers=headers, data=payload) | |
| if response.status_code == 200: | |
| return "model created successfully" | |
| else: | |
| return "error:", response.text | |
| def view_models(): | |
| response = requests.get(inp_url+"tags", headers=headers) | |
| test = json.loads(response.text) | |
| test = [i['name'] for i in test['models']] | |
| return test | |
| def generate_response(model_nm,prompt): | |
| history.append(prompt) | |
| final_prompt = "\n".join(history) | |
| data = { | |
| "model": model_nm, | |
| "prompt": final_prompt, | |
| "stream": False | |
| } | |
| response=requests.post(inp_url+"generate", headers=headers, data=json.dumps(data)) | |
| if response.status_code == 200: | |
| data = json.loads(response.text) | |
| actual_response = data['response'] | |
| return actual_response | |
| else: | |
| return "error:", response.text | |
| def home(inputurl): | |
| if input_url: | |
| inp_url = url(inputurl) | |
| response = requests.request("GET", inputurl, headers=headers) | |
| if response.status_code == 200: | |
| st.write("Main URL set successfully!") | |
| return response.status_code, inp_url | |
| def create(): | |
| st.title("Create a model") | |
| text_inp1 = st.text_input("Model name", key='name') | |
| text_inp2 = st.text_input("From which model, you need to generate new model?", key='model') | |
| text_inp3 = st.text_area("text for prompting", key='file') | |
| c_button = st.button("Create") | |
| if text_inp1 and text_inp2 and text_inp3 and c_button: | |
| st.write(create_model(text_inp1, text_inp2, text_inp3)) | |
| def view(): | |
| button1 = st.button("show models") | |
| if button1: | |
| st.write(view_models()) | |
| def generate(): | |
| model_inp = st.text_input("model name", key="model1") | |
| inp1 = st.text_area("input text", key="inp") | |
| button = st.button("submit") | |
| if button and inp1: | |
| st.write(generate_response(model_inp, inp1)) | |
| def main(): | |
| pages = { | |
| "create model": create, | |
| "Generate response": generate, | |
| } | |
| with st.sidebar.title("Navigation"): | |
| view() | |
| selection = st.sidebar.radio("Go to", list(pages.keys())) | |
| pages[selection]() | |
| input_url = st.text_input("provide your url to hook the models") | |
| url = lambda url1: f"{input_url}/api/" | |
| headers = {"Content-Type": 'application/json'} | |
| history = [] | |
| payload = {} | |
| st.info(""" | |
| # Model Generation App | |
| This model is developed using LLAMA \n | |
| This app allows users to create, view, and generate responses from trained models. \n | |
| To Utilize the model operations, Please run the llama model in local and share the public url here. | |
| If the public URL is valid the you can able to perform some operations | |
| ## Create a model | |
| Enter the name of the new model, the name of the model you want to use as the base, and the prompt data. Click the "Create" button to create the new model. | |
| ## View models | |
| Click the "Show models" button to view all available models. | |
| ## Generate response | |
| Enter the name of the model you want to generate a response from, and the input value. Click the "Submit" button to generate the response. | |
| Note: This app requires a valid URL to connect to the API server. | |
| """) | |
| if input_url: | |
| try: | |
| inp_url = url(input_url) | |
| response = requests.request("GET", input_url, headers=headers) | |
| url_response, url_input = home(input_url) | |
| if url_response == 200: | |
| if input_url: | |
| url(input_url) | |
| main() | |
| else: | |
| st.chat_message("invalid input") | |
| except requests.exceptions.MissingSchema as e: | |
| st.error(f"Invalid URL schema: {e}") | |
| except requests.exceptions.RequestException as e: | |
| st.error(f"An error occurred: {e}") | |