jsakshi commited on
Commit
a639353
·
verified ·
1 Parent(s): d9ef514

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -41
app.py CHANGED
@@ -349,8 +349,6 @@ with gr.Blocks(title="Blog Generator & Publisher") as app:
349
 
350
  if __name__ == "__main__":
351
  app.launch()'''
352
-
353
-
354
  import gradio as gr
355
  import os
356
  import time
@@ -358,69 +356,60 @@ import tempfile
358
  import requests
359
  from PIL import Image
360
  from io import BytesIO
361
- import markdown
362
  import re
363
- import json
364
- import random
365
  from datetime import datetime
 
366
 
367
- # Hugging Face Inference API endpoints
 
 
 
 
368
  TEXT_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
369
  IMAGE_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
370
- HEADERS = {"Authorization": "HF_TOKEN"} # Replace with your token
371
 
372
  def generate_blog_content(topic, tone="professional", length="medium"):
373
  current_date = datetime.now().strftime("%B %d, %Y")
374
  reading_time = {"short": "5-8", "medium": "8-12", "long": "15-20"}[length]
375
 
376
- prompt = f"""Write a comprehensive blog post about {topic} with this structure:
377
- # [Main Title]
378
- ## [Subtitle]
379
- **Published**: {current_date}
380
- **Reading time**: {reading_time} minutes
381
-
382
- ### Table of Contents
383
- - [Introduction](#introduction)
384
- - [Section 1](#section-1)
385
- - [Section 2](#section-2)
386
- - [Conclusion](#conclusion)
387
-
388
- ## Introduction
389
- [Engaging opening paragraph with statistics]
390
-
391
- ## Section 1: [Section Title]
392
- [Detailed content with examples and data]
393
-
394
- ## Section 2: [Section Title]
395
- [Technical details or case studies]
396
-
397
- ## Conclusion
398
- [Summary and final thoughts]
399
-
400
- Use {tone} tone and include:
401
  - Markdown formatting
402
- - Bullet points
403
- - Realistic statistics
404
- - 3 subsections per section"""
405
 
406
  payload = {
407
  "inputs": prompt,
408
  "parameters": {
409
- "max_length": 2000,
410
  "temperature": 0.7,
411
- "do_sample": True
412
  }
413
  }
414
 
415
  try:
416
  response = requests.post(TEXT_API_URL, headers=HEADERS, json=payload)
 
 
 
 
 
 
 
417
  response.raise_for_status()
 
418
  result = response.json()
419
- if isinstance(result, list) and len(result) > 0:
420
- return result[0]['generated_text'].split(prompt)[-1].strip()
421
- return f"Error: {result.get('error', 'Unknown error')}"
422
  except Exception as e:
423
- return f"Error generating content: {str(e)}"
 
424
 
425
  def generate_featured_image(topic):
426
  prompt = f"Professional digital illustration for blog about {topic}, high quality, trending on artstation"
 
349
 
350
  if __name__ == "__main__":
351
  app.launch()'''
 
 
352
  import gradio as gr
353
  import os
354
  import time
 
356
  import requests
357
  from PIL import Image
358
  from io import BytesIO
 
359
  import re
 
 
360
  from datetime import datetime
361
+ from dotenv import load_dotenv
362
 
363
+ # Load environment variables
364
+ load_dotenv()
365
+
366
+ # Hugging Face configuration
367
+ HF_TOKEN = os.getenv("hftken") # Make sure this matches your .env variable name
368
  TEXT_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
369
  IMAGE_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
370
+ HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} # Correct header format
371
 
372
  def generate_blog_content(topic, tone="professional", length="medium"):
373
  current_date = datetime.now().strftime("%B %d, %Y")
374
  reading_time = {"short": "5-8", "medium": "8-12", "long": "15-20"}[length]
375
 
376
+ # Proper Mistral instruction format
377
+ prompt = f"""<s>[INST] Write a professional blog post about {topic} with this structure:
378
+ - Title and subtitle
379
+ - Introduction with statistics
380
+ - 2 main sections with subsections
381
+ - Conclusion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  - Markdown formatting
383
+ - Published date: {current_date}
384
+ - Reading time: {reading_time} minutes
385
+ Use {tone} tone [/INST]</s>"""
386
 
387
  payload = {
388
  "inputs": prompt,
389
  "parameters": {
390
+ "max_new_tokens": 1024,
391
  "temperature": 0.7,
392
+ "return_full_text": False # Important to exclude original prompt
393
  }
394
  }
395
 
396
  try:
397
  response = requests.post(TEXT_API_URL, headers=HEADERS, json=payload)
398
+
399
+ if response.status_code == 503:
400
+ # Handle model loading
401
+ estimate = response.json()['estimated_time']
402
+ time.sleep(estimate)
403
+ response = requests.post(TEXT_API_URL, headers=HEADERS, json=payload)
404
+
405
  response.raise_for_status()
406
+
407
  result = response.json()
408
+ return result[0]['generated_text']
409
+
 
410
  except Exception as e:
411
+ return f"Error: {str(e)} - {response.text if 'response' in locals() else ''}"
412
+
413
 
414
  def generate_featured_image(topic):
415
  prompt = f"Professional digital illustration for blog about {topic}, high quality, trending on artstation"