Spaces:
Runtime error
Runtime error
| # -*- coding: utf-8 -*- | |
| """Academic_Papers_Writing_translat.ipynb | |
| Automatically generated by Colaboratory. | |
| Original file is located at | |
| https://colab.research.google.com/github/So-AI-love/chatgpt-prompts-for-academic-writing/blob/main/Auto_Making/Academic_Papers_Writing_translat.ipynb | |
| ## Aromatically wrrie apaper by the ChatGPT and this github prompt instruction: | |
| https://github.com/ahmetbersoz/chatgpt-prompts-for-academic-writing | |
| """ | |
| !pip install openai | |
| !pip install backoff | |
| !pip install docx2pdf | |
| !pip install python-docx | |
| !pip install django | |
| # SETUP COLAB for run Streamlit | |
| !npm install localtunnel | |
| !pip install -q dl-translate | |
| !curl ipv4.icanhazip.com | |
| !sudo apt-get update | |
| !sudo apt-get install libreoffice | |
| #!sudo apt-get install abiword | |
| !sudo apt install libreoffice-writer | |
| # Define main variables | |
| TOPIC = "{TOPIC}" | |
| RESEARCH_DOMAIN = "{RESEARCH_DOMAIN}" | |
| PARAGRAPH = "{PARAGRAPH}" | |
| PARAGRAPHS = "{PARAGRAPHS}" | |
| TOPIC_SENTENCE = "{TOPIC_SENTENCE}" | |
| LANGUAGE = "{LANGUAGE}" | |
| ABSTRACT_PARAGRAPH = "{ABSTRACT_PARAGRAPH}" | |
| BIBLIOGRAPHY = "{BIBLIOGRAPHY}" | |
| THEORY1 = "{THEORY1}" | |
| THEORY2 = "{THEORY2}" | |
| RESEARCH_QUESTIONS = "{RESEARCH_QUESTIONS}" | |
| ACTION = "{ACTION}" | |
| RESULT_PARAGRAPHS = "{RESULT_PARAGRAPHS}" | |
| DATE = "{DATE}" | |
| NUMBER_OF_DAYS_MONTHS_YEARS = "{NUMBER_OF_DAYS_MONTHS_YEARS}" | |
| # List of prompts for generating academic papers | |
| prompts = [ | |
| # Improving Language | |
| f"Write a counterargument to the following claim: '{PARAGRAPH}'", | |
| f"Rewrite this in an academic voice: '{PARAGRAPH}'", | |
| f"Expand these notes: '{PARAGRAPH}'", | |
| f"Provide me a list of words and phrases which were repeatedly / more than 3 times used: '{PARAGRAPHS}'", | |
| f"Provide me a list of synonyms for '{PARAGRAPH}' and evaluate them in the context of '{PARAGRAPH}'", | |
| f"Act as a language expert, proofread my paper on '{TOPIC_SENTENCE}' while putting a focus on grammar and punctuation.", | |
| f"In the context of '{RESEARCH_DOMAIN}' translate '{PARAGRAPH}' into the '{LANGUAGE}' language.", | |
| # Brainstorming | |
| f"Find a research topic for a PhD in the area of '{TOPIC}'", | |
| f"Write a detailed proposal on the following research topic. Make sure it is free from plagiarism. '{PARAGRAPH}'", | |
| f"Identify gaps in the literature on '{TOPIC_SENTENCE}'", | |
| f"Generate 10 academic research questions about '{PARAGRAPHS}'", | |
| f"Generate a list of research hypotheses related to '{TOPIC_SENTENCE}'", | |
| f"Identify potential areas for future research in the context of this '{TOPIC_SENTENCE}'", | |
| f"Suggest novel applications of '{TOPIC_SENTENCE}' within '{RESEARCH_DOMAIN}'", | |
| # Title/Topic Sentence | |
| f"Suggest 5 titles for the following abstract: '{ABSTRACT_PARAGRAPH}'", | |
| f"Write a topic sentence for this paragraph: '{PARAGRAPH}'", | |
| # Keywords | |
| f"Provide 5 keywords for this: '{PARAGRAPHS}'", | |
| # Abstract | |
| f"Generate an abstract for a scientific paper based on this information for: '{PARAGRAPHS}'", | |
| # Outline | |
| f"Generate an outline for '{TOPIC_SENTENCE}'", | |
| f"I want to write a journal article about '{TOPIC_SENTENCE}'. Give me an outline for the article that I can use as a starting point.", | |
| # Introduction | |
| f"Come up with an introduction for the following research topic: '{TOPIC_SENTENCE}'", | |
| # Literature Review | |
| f"Conduct a literature review on '{TOPIC_SENTENCE}' and provide review paper references", | |
| f"Provide me with references and links to papers in '{PARAGRAPH}'", | |
| f"Summarize the scholarly literature including in-text citations on '{PARAGRAPHS}'", | |
| f"Write this in standard Harvard referencing '{PARAGRAPH}'", | |
| f"Convert this '{BIBLIOGRAPHY}' from MLA to APA style.", | |
| f"Compare and contrast '{THEORY1}' and '{THEORY2}' in the context of '{RESEARCH_DOMAIN}'", | |
| # Methodology | |
| f"Create objectives and methodology for '{TOPIC_SENTENCE}'", | |
| f"Write a detailed methodology for the topic: '{TOPIC_SENTENCE}'", | |
| f"Analyze the strengths and weaknesses of this methodology: '{PARAGRAPHS}'", | |
| f"Write objectives for this study: '{TOPIC_SENTENCE}'", | |
| f"What are the limitations of using '{TOPIC_SENTENCE}' in '{RESEARCH_DOMAIN}'?", | |
| f"Create a recipe for the methods used in this '{PARAGRAPHS}'", | |
| f"Suggest interdisciplinary approaches to '{TOPIC_SENTENCE}'", | |
| f"Explain how qualitative/quantitative research methods can be used to address '{RESEARCH_QUESTIONS}'", | |
| f"Recommend best practices for data collection and analysis in '{TOPIC_SENTENCE}'", | |
| # Experiments | |
| f"Design an experiment that '{ACTION}'", | |
| # Results | |
| f"Write a result section for the following paragraphs. Please write this in the third person. '{PARAGRAPHS}'", | |
| # Discussion | |
| f"Discuss this results: '{RESULT_PARAGRAPHS}'", | |
| # Conclusion | |
| f"Generate a conclusion for this: '{PARAGRAPHS}'", | |
| f"Give recommendations and conclusion for: '{PARAGRAPHS}'", | |
| # Future Works | |
| f"Can you suggest 3 directions for future research on this topic: '{PARAGRAPH}'", | |
| # Plan/Presentation | |
| f"Develop a research plan for: '{TOPIC_SENTENCE}'", | |
| f"Write a schedule for completion in '{TOPIC_SENTENCE}' in NUMBER OF DAYS MONTHS YEARS which is '{NUMBER_OF_DAYS_MONTHS_YEARS}'", | |
| f"The deadline for the submission of the first draft is '{DATE}'. Give me a week-by-week breakdown so I can plan my writing better.", | |
| f"Write a sensational press release for this research: '{PARAGRAPHS}'", | |
| f"Make this more persuasive: '{PARAGRAPH}'", | |
| f"Write 3 tweets about this research? '{PARAGRAPHS}'", | |
| ] | |
| TOPIC = f"Understanding the Cycle of Domestic Violence against iranian women: The Influence of changes in the type of violence that continues in the aftermath of violence" | |
| #TOPIC = f" " | |
| """#Funstions for saving as PDF and DOCX : | |
| 👇🌱 | |
| """ | |
| !sudo apt-get update | |
| !sudo apt-get install libreoffice | |
| #!sudo apt-get install abiword | |
| !sudo apt install libreoffice-writer | |
| import subprocess | |
| def convert_docx_to_pdf0(docx_path, pdf_path): | |
| subprocess.call(['abiword', '--to=pdf', docx_path, '--to-dir', pdf_path]) | |
| def convert_docx_to_pdf(docx_path, pdf_path): | |
| command = ['libreoffice', '--headless', '--convert-to', 'pdf', '--outdir', pdf_path, docx_path] | |
| subprocess.call(command) | |
| import os | |
| import subprocess | |
| from docx import Document | |
| def save_academic_paper(topic, prompt_my): | |
| # Define the path to the DOCX file | |
| docx_path = f"/content/{topic}.docx" | |
| # Check if the DOCX file exists | |
| if os.path.isfile(docx_path): | |
| # If the DOCX file exists, open it | |
| doc = Document(docx_path) | |
| else: | |
| # If the DOCX file doesn't exist, create a new Document object | |
| doc = Document() | |
| # Add the generated text to the document | |
| #print ("____&&&&&&&&&&\n",prompt_my) | |
| doc.add_paragraph(prompt_my) | |
| # Save the document | |
| doc.save(docx_path) | |
| # Convert the DOCX file to a PDF | |
| convert_docx_to_pdf(docx_path, "/content/output/") | |
| from google.colab import drive | |
| import os | |
| import subprocess | |
| from docx import Document | |
| from django.utils.text import slugify | |
| # Mount Google Drive | |
| drive.mount('/content/drive') | |
| def save_academic_paper(topic, prompt_my): | |
| # Define the path to the folder in Google Drive | |
| folder_path = r"/content/drive/My Drive/ChatGPT_Paper_wrting/" | |
| Pdf_Dir= r"/content/drive/My Drive/ChatGPT_Paper_wrting/" | |
| docx_path= f"{folder_path}{topic}.docx" | |
| # Check if the folder exists | |
| if not os.path.exists(folder_path): | |
| # If the folder doesn't exist, create it | |
| os.mkdir(folder_path) | |
| # Replace spaces in the topic with underscores | |
| #topic = topic[:12].replace(" ", "_") | |
| topic = slugify(topic) | |
| # Define the path to the DOCX file in the folder | |
| # Check if the DOCX file exists | |
| if os.path.isfile(docx_path): | |
| # If the DOCX file exists, open it | |
| #docx_path = f"{folder_path}{topic}.docx"#+str(random.randint(0,9))+".docx" | |
| doc = Document(docx_path) | |
| else: | |
| #docx_path = f"{folder_path}{topic}.docx" | |
| #print(docx_path) | |
| # If the DOCX file doesn't exist, create a new Document object | |
| doc = Document() | |
| # Add the generated text to the document | |
| doc.add_paragraph(prompt_my) | |
| # Save the document | |
| doc.save(docx_path) | |
| # Convert the DOCX file to a PDF | |
| convert_docx_to_pdf(docx_path,Pdf_Dir) | |
| #save_academic_paper("Persian_"+'topic','\n**'+'choice_translated_prompt'+'**\n'+"choice_translated") | |
| topic = TOPIC[:20].replace(" ", "_") | |
| topic = slugify(TOPIC[:-5]) | |
| docx_path = f"{topic}.docx" | |
| print(docx_path) | |
| #!pip install googletrans==4.0.0-rc1 | |
| !pip install httpcore==0.15.0 httpx pymongo googletrans | |
| !pip install httpx==0.24.1 | |
| """#I have tried to update the prompt by ChatGPT itself 👇👇""" | |
| def extract_main_variables(prompts, variable): | |
| main_variables = { | |
| 'TOPIC': None, | |
| 'RESEARCH_DOMAIN': None, | |
| 'PARAGRAPH': None, | |
| 'PARAGRAPHS': None, | |
| 'TOPIC_SENTENCE': None, | |
| 'LANGUAGE': None, | |
| 'ABSTRACT_PARAGRAPH': None, | |
| 'BIBLIOGRAPHY': None, | |
| 'THEORY1': None, | |
| 'THEORY2': None, | |
| 'RESEARCH_QUESTIONS': None, | |
| 'ACTION': None, | |
| 'RESULT_PARAGRAPHS': None, | |
| 'DATE': None, | |
| 'NUMBER_OF_DAYS_MONTHS_YEARS': None | |
| } | |
| for prompt in prompts: | |
| variables = re.findall(r'\{(\w+)\}', prompt) | |
| for var in variables: | |
| if var == 'TOPIC': | |
| main_variables[var] = variable | |
| else: | |
| main_variables[var] = None | |
| return main_variables | |
| import re | |
| def generate_prompt_from_response(previous_response, main_variables): | |
| # Customize this logic based on your needs | |
| # For simplicity, let's use the last 50 characters of the response as the new prompt | |
| new_prompt = previous_response[-50:] | |
| # Replace the main variables in the new prompt | |
| for var, value in main_variables.items(): | |
| if value is not None: | |
| new_prompt = new_prompt.replace(value, f"{{{var}}}") | |
| return new_prompt | |
| # Example usage | |
| #TOPIC =f"strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" #f" the dark triad in psychology and it's relation with {goal}" | |
| #TOPIC = f"Understanding the Cycle of Domestic Violence against iranian women: The Influence of Economic Power" | |
| prompts_1 = [ | |
| f"Find a research topic for a PhD in the area of '{TOPIC}'", | |
| ] | |
| #prompts_2 = [ | |
| # "I need to find information on renewable energy for a research paper that will be 1,500 words and must include eight sources.", | |
| # "The research paper should cover the latest advancements in renewable energy technology." | |
| #] | |
| main_variables = extract_main_variables(prompts,TOPIC) | |
| print(main_variables) | |
| previous_response = "Previous GPT response" | |
| new_prompt = generate_prompt_from_response(previous_response, main_variables) | |
| print(new_prompt) | |
| print ( prompts) | |
| """#Updating the prompt by ChatGPT:👇👇""" | |
| def generate_prompt_update_0(prompt, previous_content, main_variables): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| # Replace the TOPIC variable in the prompt | |
| for var, value in main_variables.items(): | |
| if var == 'TOPIC' and value is not None: | |
| prompt = prompt.replace(f"{{{var}}}", value) | |
| # Construct the instruction for updating the prompt | |
| instruction = f"Given the previous content:\n\n{previous_content}\n\nUpdate the prompt: {prompt}" | |
| # Create the completion with the instruction | |
| completion = client.completions.create( | |
| model=model_engine, | |
| prompt=instruction, | |
| max_tokens=max_tokens, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| # Extract and return the updated prompt from the response | |
| updated_prompt = completion.choices[0].text.strip() | |
| return updated_prompt | |
| from openai import RateLimitError | |
| from backoff import on_exception, expo | |
| def generate_prompt_update_a1(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| import random | |
| import time | |
| import openai | |
| from openai import RateLimitError | |
| import os | |
| from openai import OpenAI | |
| #openai.api_key = "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"# "your_openai_api_key" | |
| client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| api_key = "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", | |
| ) | |
| def retry_with_exponential_backoff( | |
| func, | |
| initial_delay: float = 1, | |
| exponential_base: float = 2, | |
| jitter: bool = True, | |
| max_retries: int = 10, | |
| errors: tuple = (RateLimitError,), | |
| ): | |
| """Retry a function with exponential backoff.""" | |
| def wrapper(*args, **kwargs): | |
| # Initialize variables | |
| num_retries = 0 | |
| delay = initial_delay | |
| # Loop until a successful response or max_retries is hit or an exception is raised | |
| while True: | |
| try: | |
| return func(*args, **kwargs) | |
| # Retry on specified errors | |
| except errors as e: | |
| # Increment retries | |
| num_retries += 1 | |
| # Check if max retries has been reached | |
| if num_retries > max_retries: | |
| raise Exception( | |
| f"Maximum number of retries ({max_retries}) exceeded." | |
| ) | |
| # Increment the delay | |
| delay *= exponential_base * (1 + jitter * random.random()) | |
| # Sleep for the delay | |
| time.sleep(delay) | |
| # Raise exceptions for any errors not specified | |
| except Exception as e: | |
| raise e | |
| return wrapper | |
| from openai import RateLimitError | |
| from backoff import on_exception, expo | |
| def generate_prompt_update(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=1024, | |
| temperature=0.1, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| def generate_academic_paper(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completion.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| def generate_content(prompts, variables, TOPIC, perviuse_content=""): | |
| # Generate content for each variable | |
| variable_contents = {} | |
| for prompt in prompts: | |
| for var in variables: | |
| if var in prompt: | |
| print(f"\n---prompt is ----\n{prompt}") | |
| prompt_my = f"Generate content for the variable '{var}' in the context of the topic '{TOPIC}'. Please consider the result must be in less than 10 words. The prompt is '{prompt}'. Also the more information for understanding better content is '{perviuse_content}'" | |
| content = generate_prompt_update_a1(prompt_my) | |
| for choice in content.choices: | |
| print("\n Result is :"+choice.text) | |
| variable_contents[var] = choice.text | |
| print(f"variable_contents[{var}] is: {variable_contents[var]}") | |
| # Replace the variables in the prompts with the generated content | |
| prompt_new = [] | |
| for prompt in prompts: | |
| for var, content in variable_contents.items(): | |
| prompt = prompt.replace(f"{{{var}}}", variable_contents[var]) | |
| prompt_new.append(prompt) | |
| print("\n --- Updated prompt is :\n"+str(prompt_new)) | |
| return prompt_new | |
| # Define main variables | |
| #TOPIC = "strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" | |
| # List of variables | |
| variables = ["RESEARCH_DOMAIN", "PARAGRAPH", "PARAGRAPHS", "TOPIC_SENTENCE", "LANGUAGE", "ABSTRACT_PARAGRAPH", "BIBLIOGRAPHY", "THEORY1", "THEORY2", "RESEARCH_QUESTIONS", "ACTION", "RESULT_PARAGRAPHS", "DATE", "NUMBER_OF_DAYS_MONTHS_YEARS"] | |
| # List of prompts | |
| #prompts = ["prompt1", "prompt2", "prompt3"] # replace with your actual prompts | |
| #perviuse_content= "test" | |
| # Call the function and store the returned prompts | |
| #updated_prompts = generate_content(prompts, variables, TOPIC, perviuse_content) | |
| #print("Updated Prompts:", updated_prompts) | |
| # Import the necessary libraries and set up the API key | |
| #!pip install openai | |
| import json | |
| #TOPIC =f"strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" #f" the dark triad in psychology and it's relation with {goal}" | |
| #TOPIC = f"Understanding the Cycle of Domestic Violence against iranian women: The Influence of Economic Power" | |
| #topic = f"The Importance of Focusing on Waste Collection in a Waste-Filled World" | |
| import openai | |
| import os | |
| from openai import OpenAI | |
| #openai.api_key = "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"# "your_openai_api_key" | |
| #client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| #api_key = "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", | |
| #) | |
| #openai.api_key = os.getenv('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') | |
| # Define the prompts | |
| prompts3 = [ | |
| f"Find a research topic for a PhD in the area of '{topic}'", | |
| f"Write a detailed proposal on the following research '{topic}'. Make Sure it is free from plagiarism. ", | |
| f"Identify gaps in the literature on '{topic}'", | |
| "Generate 10 academic research questions about Perviuse action", | |
| f"Generate a list of research hypotheses related to '{topic}'" | |
| ] | |
| from googletrans import Translator | |
| # Create a Translator object | |
| translator = Translator() | |
| from openai import RateLimitError | |
| from backoff import on_exception, expo | |
| def generate_academic_paper_a0(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| import time,random | |
| from openai import OpenAI | |
| #openai.api_key = "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"# "your_openai_api_key" | |
| client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| api_key = "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", | |
| ) | |
| def generate_papers(prompts, perviuse_content, perviuse_try_numner): | |
| choice_text_all=[] | |
| for i in range(perviuse_try_numner, len(prompts), 20): | |
| # Slice the prompts list to get the next 20 prompts | |
| print("I is ",i," Len of prompt Is:", len(prompts)) | |
| batch = prompts[i:i+20] | |
| print("batch is ",batch) | |
| for prompt in batch: | |
| # Print the prompt | |
| print("prompt is ",list({prompt})) | |
| updated_prompts = generate_content(list({prompt}), variables, TOPIC, perviuse_content) | |
| print("Updated Prompts:", updated_prompts) | |
| time.sleep(random.randint(22, 40)) | |
| response = generate_academic_paper_a0(updated_prompts) | |
| print("\nGenerated Academic Paper:") | |
| print("========================\n") | |
| for choice in response.choices: | |
| print(choice.text) | |
| choice_in_loop = choice.text | |
| choice_text_all.append(choice.text) | |
| #save_academic_paper(topic,'\n--------**\n'+updated_prompts+'/n-------**\n'+choice.text) | |
| save_academic_paper(TOPIC[:10000],'\n--------**\n'+''.join(updated_prompts)+'/n-------**\n'+choice.text) | |
| if hasattr(choice, 'choices'): | |
| extract_text(choice) | |
| perviuse_content = choice_in_loop | |
| print("\n end of loop") | |
| print("========================\n") | |
| time.sleep(random.randint(22, 40)) | |
| perviuse_try_numner = perviuse_try_numner+1 | |
| return choice_text_all,perviuse_try_numner | |
| #TOPIC =f"strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" #f" the dark triad in psychology and it's relation with {goal}" | |
| #TOPIC = f"Understanding the Cycle of Domestic Violence against iranian women: The Influence of Economic Power" | |
| # Generate academic papers for the given prompts | |
| def main_generate_papers(prompts,perviuse_content,perviuse_try_numner): | |
| if not perviuse_try_numner: | |
| perviuse_try_numner=0 | |
| perviuse_content=['fist step'] | |
| else if perviuse_try_numner= Len (prompts): | |
| perviuse_try_numner=0 | |
| generate_papers(prompts, perviuse_content,perviuse_try_numner) | |
| return perviuse_content,perviuse_try_numner | |
| main_generate_papers(prompts,perviuse_content,perviuse_try_numner) | |
| """#Only update the Prompt by the help of ChatGPT:👇👇""" | |
| # Define main variables | |
| #TOPIC = "strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" | |
| # List of variables | |
| variables = ["RESEARCH_DOMAIN", "PARAGRAPH", "PARAGRAPHS", "TOPIC_SENTENCE", "LANGUAGE", "ABSTRACT_PARAGRAPH", "BIBLIOGRAPHY", "THEORY1", "THEORY2", "RESEARCH_QUESTIONS", "ACTION", "RESULT_PARAGRAPHS", "DATE", "NUMBER_OF_DAYS_MONTHS_YEARS"] | |
| # Generate content for each variable | |
| variable_contents = {} | |
| for prompt in prompts: | |
| for var in variables: | |
| if var in prompt: | |
| prompt = f"Generate content for the variable '{var}' based on the topic '{TOPIC}'. For this prompt '{prompt}'" | |
| content= generate_academic_paper(prompt) | |
| for choice in content.choices: | |
| print(choice.text) | |
| variable_contents[var] = choice.text | |
| print("variable_contents[var] is: ",variable_contents[var]) | |
| # Replace the variables in the prompts with the generated content | |
| for prompt in prompts: | |
| for var, content in variable_contents.items(): | |
| prompt = prompt.replace(f"{{{var}}}", content) | |
| print("Updated Prompt:", prompt) | |
| def generate_prompt_update_a3(prompt, previous_content): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| # Construct the instruction for updating the prompt | |
| instruction = f"Given the previous content:\n\n{previous_content}\n\nUpdate the prompt: {prompt}" | |
| # Create the completion with the instruction | |
| completion = client.completions.create( | |
| model=model_engine, | |
| prompt=instruction, | |
| max_tokens=max_tokens, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| # Extract and return the updated prompt from the response | |
| updated_prompt = completion.choices[0].text.strip() | |
| return updated_prompt | |
| # Inside your loop | |
| for prompt in prompts: | |
| print("Original Prompt:", prompt) | |
| response = generate_academic_paper_a2(prompt) | |
| previous_content = response.choices[0].text.strip() | |
| updated_prompt = generate_prompt_update_2(prompt, previous_content) | |
| print("Updated Prompt:", updated_prompt) | |
| # Continue with the rest of your processing | |
| # ... | |
| """# with no translation for become faster answer:👇👇🙏""" | |
| # Import the necessary libraries and set up the API key | |
| #!pip install openai | |
| import json | |
| #TOPIC =f"strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" #f" the dark triad in psychology and it's relation with {goal}" | |
| #TOPIC = f"Understanding the Cycle of Domestic Violence against iranian women: The Influence of Economic Power" | |
| #topic = f"The Importance of Focusing on Waste Collection in a Waste-Filled World" | |
| import openai | |
| import os | |
| from openai import OpenAI | |
| #openai.api_key = "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"# "your_openai_api_key" | |
| client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| api_key = "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", | |
| ) | |
| #openai.api_key = os.getenv('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') | |
| # Define the prompts | |
| prompts3 = [ | |
| f"Find a research topic for a PhD in the area of '{topic}'", | |
| f"Write a detailed proposal on the following research '{topic}'. Make Sure it is free from plagiarism. ", | |
| f"Identify gaps in the literature on '{topic}'", | |
| "Generate 10 academic research questions about Perviuse action", | |
| f"Generate a list of research hypotheses related to '{topic}'" | |
| ] | |
| from googletrans import Translator | |
| # Create a Translator object | |
| translator = Translator() | |
| # Function to generate an academic paper | |
| from openai import RateLimitError | |
| from backoff import on_exception, expo | |
| def generate_academic_paper_a2(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| choice_text_all=[] | |
| import time | |
| # Function to generate academic papers for given prompts | |
| # Function to generate academic papers for given prompts | |
| def generate_papers(prompts): | |
| for i in range(0, len(prompts), 20): | |
| # Slice the prompts list to get the next 20 prompts | |
| print("I is ",i," Len of prompt Is:", len(prompts)) | |
| batch = prompts[i:i+20] | |
| print("batch is ",batch) | |
| #for j in range ( 0, Len(batch)): | |
| # Generate papers for the next 20 prompts | |
| for prompt in batch: | |
| # for prompt in prompt1: | |
| # Print the prompt | |
| print("prompt is ", prompt) | |
| response = generate_academic_paper_a2(prompt) | |
| print("\nGenerated Academic Paper:") | |
| print("========================\n") | |
| #print(response) | |
| for choice in response.choices: | |
| print(choice.text) | |
| choice_text_all.append(choice.text) | |
| # Translate the generated text to Persian | |
| # choice_translated = translate_to_persian(choice.text) #translator.translate(choice.text, dest='fa') | |
| # choice_translated_prompt = translate_to_persian(prompt)#translator.translate(prompt, dest='fa') | |
| # print("\n-----\n Translated is ",choice_translated) | |
| # save_academic_paper("Persian_"+topic,'\n**'+choice_translated_prompt+'**\n'+choice_translated) | |
| save_academic_paper(topic,'\n--------**\n'+prompt+'/n-------**\n'+choice.text) | |
| # Recursively call the function for the nested Completion objects | |
| if hasattr(choice, 'choices'): | |
| extract_text(choice) | |
| #generated_text = response.choices[0].text | |
| # Print the generated text | |
| #print(generated_text) | |
| print("\n end of loop") | |
| print("========================\n") | |
| #print("loop") | |
| # Wait for a short period of time before sending the next batch of prompts | |
| time.sleep(40) | |
| return choice_text_all#,choice.translated | |
| # Generate academic papers for the given prompts | |
| generate_papers(prompts) | |
| # SETUP COLAB for run Streamlit | |
| !npm install localtunnel | |
| !curl ipv4.icanhazip.com | |
| """# DL Translate | |
| A deep learning-based translation library built on Huggingface transformers and Facebook's mBART-Large | |
| https://colab.research.google.com/github/xhluca/dl-translate/blob/main/demos/colab_demo.ipynb#scrollTo=qdefSjR_YIiG | |
| """ | |
| pip install -q dl-translate | |
| import dl_translate as dlt | |
| mt = dlt.TranslationModel('mbart50') | |
| import dl_translate as dlt | |
| def translate_to_persian(text): | |
| # Initialize the translation model | |
| #mt = dlt.TranslationModel('mbart50') | |
| # Translate the text | |
| translated = mt.translate(text, source=dlt.lang.ENGLISH, target=dlt.lang.PERSIAN) | |
| return translated | |
| # Import the necessary libraries and set up the API key | |
| #!pip install openai | |
| import json | |
| #TOPIC =f"strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" #f" the dark triad in psychology and it's relation with {goal}" | |
| #topic = f"The Importance of Focusing on Waste Collection in a Waste-Filled World" | |
| import openai | |
| import os | |
| from openai import OpenAI | |
| #openai.api_key = "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"# "your_openai_api_key" | |
| client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| api_key = "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", | |
| ) | |
| #openai.api_key = os.getenv('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') | |
| # Define the prompts | |
| prompts3 = [ | |
| f"Find a research topic for a PhD in the area of '{topic}'", | |
| f"Write a detailed proposal on the following research '{topic}'. Make Sure it is free from plagiarism. ", | |
| f"Identify gaps in the literature on '{topic}'", | |
| "Generate 10 academic research questions about Perviuse action", | |
| f"Generate a list of research hypotheses related to '{topic}'" | |
| ] | |
| from googletrans import Translator | |
| # Create a Translator object | |
| translator = Translator() | |
| # Function to generate an academic paper | |
| from openai import RateLimitError | |
| from backoff import on_exception, expo | |
| def generate_academic_paper_5(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| choice_text_all=[] | |
| import time | |
| # Function to generate academic papers for given prompts | |
| # Function to generate academic papers for given prompts | |
| def generate_papers(prompts): | |
| for i in range(0, len(prompts), 20): | |
| # Slice the prompts list to get the next 20 prompts | |
| print("I is ",i," Len of prompt Is:", len(prompts)) | |
| batch = prompts[i:i+10] | |
| print("batch is ",batch) | |
| # Generate papers for the next 20 prompts | |
| for prompt in batch: | |
| # for prompt in prompt1: | |
| # Print the prompt | |
| print("prompt is ", prompt) | |
| response = generate_academic_paper(prompt) | |
| print("\nGenerated Academic Paper:") | |
| print("========================\n") | |
| #print(response) | |
| for choice in response.choices: | |
| print(choice.text) | |
| choice_text_all.append(choice.text) | |
| # Translate the generated text to Persian | |
| choice_translated = translate_to_persian(choice.text) #translator.translate(choice.text, dest='fa') | |
| choice_translated_prompt = translate_to_persian(prompt)#translator.translate(prompt, dest='fa') | |
| print("\n-----\n Translated is ",choice_translated) | |
| save_academic_paper("Persian_"+topic,'\n**'+choice_translated_prompt+'**\n'+choice_translated) | |
| save_academic_paper(topic,'\n**'+prompt+'**\n'+choice.text) | |
| # Recursively call the function for the nested Completion objects | |
| if hasattr(choice, 'choices'): | |
| extract_text(choice) | |
| #generated_text = response.choices[0].text | |
| # Print the generated text | |
| #print(generated_text) | |
| print("\n end of loop") | |
| print("========================\n") | |
| #print("loop") | |
| # Wait for a short period of time before sending the next batch of prompts | |
| time.sleep(2) | |
| return choice_text_all,choice.translated | |
| # Generate academic papers for the given prompts | |
| generate_papers(prompts3) | |
| """#For solving queta prompt of Openai API; | |
| 👇👇🌱 | |
| """ | |
| import random | |
| import time | |
| #import openai | |
| from openai import OpenAI | |
| client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| api_key = "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", | |
| ) | |
| def retry_with_exponential_backoff( | |
| func, | |
| initial_delay: float = 1, | |
| exponential_base: float = 2, | |
| jitter: bool = True, | |
| max_retries: int = 10, | |
| errors: tuple = (openai.error.RateLimitError,), | |
| ): | |
| """Retry a function with exponential backoff.""" | |
| def wrapper(*args, **kwargs): | |
| # Initialize variables | |
| num_retries = 0 | |
| delay = initial_delay | |
| # Loop until a successful response or max_retries is hit or an exception is raised | |
| while True: | |
| try: | |
| return func(*args, **kwargs) | |
| # Retry on specified errors | |
| except errors as e: | |
| # Increment retries | |
| num_retries += 1 | |
| # Check if max retries has been reached | |
| if num_retries > max_retries: | |
| raise Exception( | |
| f"Maximum number of retries ({max_retries}) exceeded." | |
| ) | |
| # Increment the delay | |
| delay *= exponential_base * (1 + jitter * random.random()) | |
| # Sleep for the delay | |
| time.sleep(delay) | |
| # Raise exceptions for any errors not specified | |
| except Exception as e: | |
| raise e | |
| return wrapper | |
| def generate_academic_paper_0(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| # Import the necessary libraries and set up the API key | |
| #!pip install openai | |
| import json | |
| #TOPIC =f"strategies for increase the compassion in iranian Women movment and ist relationship with the fatigue of compassion in light triad personality" #f" the dark triad in psychology and it's relation with {goal}" | |
| topic = f"The Importance of Focusing on Waste Collection in a Waste-Filled World" | |
| import openai | |
| import os | |
| from openai import OpenAI | |
| openai.api_key = "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"# "your_openai_api_key" | |
| client = OpenAI( | |
| # defaults to os.environ.get("OPENAI_API_KEY") | |
| api_key="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", | |
| ) | |
| #openai.api_key = os.getenv('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') | |
| # Define the prompts | |
| prompts3 = [ | |
| f"Find a research topic for a PhD in the area of '{topic}'", | |
| f"Write a detailed proposal on the following research '{topic}'. Make Sure it is free from plagiarism. ", | |
| f"Identify gaps in the literature on '{topic}'", | |
| "Generate 10 academic research questions about Perviuse action", | |
| f"Generate a list of research hypotheses related to '{topic}'" | |
| ] | |
| # Function to generate an academic paper | |
| from openai import RateLimitError | |
| from backoff import on_exception, expo | |
| def generate_academic_paper_6(prompt_my): | |
| model_engine = "text-davinci-003" | |
| max_tokens = 2048 | |
| completion = client.completions.create( | |
| model="gpt-3.5-turbo-instruct", | |
| prompt=prompt_my, | |
| max_tokens=2048, | |
| temperature=0.3, | |
| n=1, | |
| frequency_penalty=0, | |
| ) | |
| return completion | |
| choice_text_all=[] | |
| import time | |
| # Function to generate academic papers for given prompts | |
| # Function to generate academic papers for given prompts | |
| def generate_papers(prompts): | |
| for i in range(0, len(prompts), 20): | |
| # Slice the prompts list to get the next 20 prompts | |
| print("I is ",i," Len of prompt Is:", len(prompts)) | |
| batch = prompts[i:i+10] | |
| print("batch is ",batch) | |
| # Generate papers for the next 20 prompts | |
| for prompt in batch: | |
| # for prompt in prompt1: | |
| # Print the prompt | |
| print("prompt is ", prompt) | |
| response = generate_academic_paper(prompt) | |
| print("\nGenerated Academic Paper:") | |
| print("========================\n") | |
| #print(response) | |
| for choice in response.choices: | |
| print(choice.text) | |
| choice_text_all.append(choice.text) | |
| save_academic_paper(TOPIC,'\n**'+prompt+'**\n'+choice.text) | |
| # Recursively call the function for the nested Completion objects | |
| if hasattr(choice, 'choices'): | |
| extract_text(choice) | |
| #generated_text = response.choices[0].text | |
| # Print the generated text | |
| #print(generated_text) | |
| print("\n end of loop") | |
| print("========================\n") | |
| #print("loop") | |
| # Wait for a short period of time before sending the next batch of prompts | |
| time.sleep(2) | |
| return choice.text_all | |
| # Generate academic papers for the given prompts | |
| generate_papers(prompts3) | |
| def render_index_page(): | |
| html_content = """ | |
| <!DOCTYPE html> | |
| <html> | |
| <head> | |
| <title>Generate Academic Papers</title> | |
| </head> | |
| <body> | |
| <h1>Generate Academic Papers</h1> | |
| <form method="POST"> | |
| <label for="topic">Topic:</label> | |
| <input type="text" id="topic" name="topic" required> | |
| <input type="submit" value="Generate"> | |
| </form> | |
| </body> | |
| </html> | |
| """ | |
| return render_template_string(html_content) | |
| def render_result_page(topic, docx_file, pdf_file, choice_text_all): | |
| html_content = f""" | |
| <!DOCTYPE html> | |
| <html> | |
| <head> | |
| <title>Result</title> | |
| </head> | |
| <body> | |
| <h1>Result</h1> | |
| <p>Academic papers have been generated for the topic: {topic}</p> | |
| <a href="/download/{docx_file}">Download DOCX</a> | |
| <a href="/download/{pdf_file}">Download PDF</a> | |
| <h2>Generated Text:</h2> | |
| <p>{choice_text_all}</p> | |
| </body> | |
| </html> | |
| """ | |
| return render_template_string(html_content) | |
| # app.py | |
| from flask import Flask, request, render_template | |
| from academic_paper_generator import generate_papers | |
| app = Flask(__name__) | |
| def home(): | |
| if request.method == 'POST': | |
| topic = request.form.get('topic') | |
| prompts3 = [ | |
| f"Find a research topic for a PhD in the area of '{topic}'", | |
| f"Write a detailed proposal on the following research '{topic}'. Make Sure it is free from plagiarism. ", | |
| f"Identify gaps in the literature on '{topic}'", | |
| "Generate 10 academic research questions about Perviuse action", | |
| f"Generate a list of research hypotheses related to '{topic}'" | |
| ] | |
| generate_papers(prompts3) | |
| return render_template('result.html', topic=topic) | |
| else: | |
| return render_template('index.html') | |
| if __name__ == '__main__': | |
| app.run(debug=True) |