Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, Conversation, pipeline | |
| # Load the best pre-trained models and tokenizers for coding tasks | |
| models_and_tokenizers = [ | |
| ("EleutherAI/gpt-neo-2.7B", AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B"), AutoModelForSeq2SeqLM.from_pretrained("EleutherAI/gpt-neo-2.7B")), | |
| ("Bard", AutoTokenizer.from_pretrained("bard"), AutoModelForSeq2SeqLM.from_pretrained("bard")), | |
| ("Turing NLG", AutoTokenizer.from_pretrained("Turing NLG"), AutoModelForSeq2SeqLM.from_pretrained("Turing NLG")), | |
| ("GPT-3", AutoTokenizer.from_pretrained("gpt-3"), AutoModelForSeq2SeqLM.from_pretrained("gpt-3")), | |
| ("GPT-J", AutoTokenizer.from_pretrained("gpt-j"), AutoModelForSeq2SeqLM.from_pretrained("gpt-j")), | |
| ] | |
| # Create the conversational pipeline | |
| conversational_pipeline = pipeline("conversational", model=models_and_tokenizers[0][1], tokenizer=models_and_tokenizers[0][0]) | |
| # Define a function to handle conversation with multiple models | |
| def handle_conversation(models, prompt): | |
| responses = [] | |
| for model, tokenizer in models: | |
| conversation = Conversation(prompt) | |
| response = pipeline("conversational", model=model, tokenizer=tokenizer)(conversation) | |
| responses.append(response.generated_responses[-1]) | |
| return responses | |
| # Replace the following line with the user's input code snippet | |
| user_code = """ | |
| def reverse_prompt_engineer(code): | |
| # TODO: Reverse prompt engineer the code | |
| return None | |
| """ | |
| # Use the handle_conversation function to get responses from multiple models | |
| responses = handle_conversation(models_and_tokenizers, f"Now I want you to reverse prompt engineer the {user_code}. Give me a single prompt that would create a similar output.") | |
| print(responses) | |
| # Instruct the user how to use the tool | |
| print("To use this tool, simply paste your code snippet into the `user_code` variable and then run the code. The tool will then generate a prompt that can be used to create similar code.") | |
| # Create the interface | |
| app = gr.Interface( | |
| fn=handle_conversation, | |
| inputs="text", | |
| outputs="text", | |
| title="Reverse Prompt Engineer", | |
| description="Generate a prompt that can be used to create similar code.", | |
| ) | |
| app.launch() |