Spaces:
Runtime error
Runtime error
Commit ·
2175889
1
Parent(s): 6738c00
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
This is the main file that launches the GUI interface for the software.
|
| 3 |
+
'''
|
| 4 |
+
import warnings
|
| 5 |
+
from huggingface_hub import InferenceClient
|
| 6 |
+
import gradio as gr
|
| 7 |
+
warnings.filterwarnings('ignore')
|
| 8 |
+
# Initialize the language model
|
| 9 |
+
generator = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 10 |
+
def generate_script(host_name, listener_location, causes_climate_change, co2_level, effects_climate_change,
|
| 11 |
+
sea_level_rise, warming_rate, potential_solutions, individual_role, call_to_action,
|
| 12 |
+
TOPIC, DESCRIPTION):
|
| 13 |
+
try:
|
| 14 |
+
# Variables and template definitions...
|
| 15 |
+
introduction_template = f"{host_name}, good morning! This is {listener_location}'s local radio station. Today we're talking about an issue that affects us all - {TOPIC}. It's a pressing issue that requires our immediate attention..."
|
| 16 |
+
causes_template = f"The causes of {TOPIC} are {causes_climate_change}. Today, the level of CO2 in our atmosphere is {co2_level}, which is concerning..."
|
| 17 |
+
effects_template = f"These activities result in {effects_climate_change}, leading to drastic changes in our environment. For instance, sea levels are rising at a rate of {sea_level_rise} per year, and global temperatures are increasing at a rate of {warming_rate} per decade..."
|
| 18 |
+
solutions_template = f"But don't worry, there are solutions. {potential_solutions} are all steps we can take to mitigate these effects..."
|
| 19 |
+
role_template = f"Each one of us plays a role in combating {TOPIC}. Even small actions can make a big difference. In fact, our location, {listener_location}, is particularly vulnerable to {TOPIC} due to its geographical features..."
|
| 20 |
+
action_template = f"So, {listener_location}, why wait? Start taking steps today towards a greener future. Support local businesses that prioritize sustainability, reduce your carbon footprint, and voice your opinion to policy makers..."
|
| 21 |
+
summary_template = f"In conclusion, {TOPIC} is a serious issue that requires our immediate attention. But by understanding its causes, effects, and potential solutions, we can all play a part in mitigating its impact. Thank you for joining us today, and remember, every small action counts!"
|
| 22 |
+
# Combine templates based on the DESCRIPTION
|
| 23 |
+
prompt_template = f"""{introduction_template} {causes_template} {effects_template} {solutions_template} {role_template} {action_template} {summary_template}
|
| 24 |
+
TOPIC: {TOPIC}. DESCRIPTION: {DESCRIPTION}"""
|
| 25 |
+
# Generate the script using the language model
|
| 26 |
+
response = generator.text_generation(prompt_template)
|
| 27 |
+
if isinstance(response, list):
|
| 28 |
+
script = response[0].get('generated_text', '')
|
| 29 |
+
else:
|
| 30 |
+
script = response.get('generated_text', '')
|
| 31 |
+
# Split the script into sections
|
| 32 |
+
sections = script.split("\n")
|
| 33 |
+
# Calculate the word count for each section
|
| 34 |
+
word_counts = [len(section.split()) for section in sections]
|
| 35 |
+
# Check if any section exceeds the target word count
|
| 36 |
+
for i, count in enumerate(word_counts):
|
| 37 |
+
if count > 200:
|
| 38 |
+
return f"Warning: Section {i + 1} exceeds the target word count. You may need to shorten this section."
|
| 39 |
+
return script
|
| 40 |
+
except Exception as e:
|
| 41 |
+
error_message = f"Error: {e}"
|
| 42 |
+
# Save error log to a file
|
| 43 |
+
with open("./error_log.txt", "a") as log_file:
|
| 44 |
+
log_file.write(error_message + "\n")
|
| 45 |
+
return error_message
|
| 46 |
+
# Gradio interface setup...
|
| 47 |
+
iface = gr.Interface(fn=generate_script,
|
| 48 |
+
inputs=[gr.Textbox(label="Host Name", value="John"),
|
| 49 |
+
gr.Textbox(label="Listener Location", value="City"),
|
| 50 |
+
gr.Textbox(label="Causes Climate Change", value="human activities"),
|
| 51 |
+
gr.Number(label="CO2 Level", value=400),
|
| 52 |
+
gr.Textbox(label="Effects Climate Change", value="rising temperatures"),
|
| 53 |
+
gr.Number(label="Sea Level Rise", value=0.1),
|
| 54 |
+
gr.Number(label="Warming Rate", value=0.2),
|
| 55 |
+
gr.Textbox(label="Potential Solutions", value="renewable energy"),
|
| 56 |
+
gr.Textbox(label="Individual Role", value="reduce carbon footprint"),
|
| 57 |
+
gr.Textbox(label="Call To Action", value="act now")],
|
| 58 |
+
outputs="text")
|
| 59 |
+
# Launch the interface
|
| 60 |
+
iface.launch(debug=True)
|