Ahmed-El-Sharkawy commited on
Commit
af60f36
·
verified ·
1 Parent(s): e0e6103

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -8
app.py CHANGED
@@ -4,8 +4,10 @@ import torch
4
  import torch.nn as nn
5
  import torchvision.transforms as transforms
6
  import torchvision.models as models
7
- from transformers import AutoTokenizer, AutoModelForCausalLM
8
  import os
 
 
 
9
 
10
  # Set device
11
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@@ -66,27 +68,70 @@ preprocess = transforms.Compose([
66
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ImageNet normalization
67
  ])
68
 
 
69
  # Load Meta's LLaMA model for generating product descriptions
70
  def load_llama():
71
  model_name = "meta-llama/Llama-3.2-1B-Instruct"
72
  token = os.getenv("HUGGINGFACE_TOKEN")
73
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token)
74
  model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=token).to(device)
 
 
 
 
 
 
75
  return tokenizer, model
76
-
77
  llama_tokenizer, llama_model = load_llama()
78
 
79
- # Generate product description using LLaMA
80
  def generate_description(category, subclass):
81
- prompt = f"Generate a detailed and engaging product description for a {category} of type {subclass}."
82
-
83
- inputs = llama_tokenizer.encode(prompt, return_tensors="pt").to(device)
84
- outputs = llama_model.generate(inputs, max_length=100, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
85
- description = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
 
 
 
 
87
  return description
88
 
89
 
 
 
 
 
 
 
 
 
 
 
 
90
  def classify_image(image):
91
  # Open the image using PIL
92
  image = Image.fromarray(image)
 
4
  import torch.nn as nn
5
  import torchvision.transforms as transforms
6
  import torchvision.models as models
 
7
  import os
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
9
+ import torch
10
+ import gc
11
 
12
  # Set device
13
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
68
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ImageNet normalization
69
  ])
70
 
71
+
72
  # Load Meta's LLaMA model for generating product descriptions
73
  def load_llama():
74
  model_name = "meta-llama/Llama-3.2-1B-Instruct"
75
  token = os.getenv("HUGGINGFACE_TOKEN")
76
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token)
77
  model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=token).to(device)
78
+ # Initialize the text generation pipeline with the prepared model
79
+ text_generation = pipeline(
80
+ "text-generation",
81
+ model=model,
82
+ tokenizer=tokenizer
83
+ )
84
  return tokenizer, model
 
85
  llama_tokenizer, llama_model = load_llama()
86
 
87
+ # Generate product description using external data and structured format
88
  def generate_description(category, subclass):
89
+ # Define file path and read content
90
+ file_path = 'data for product description.txt'
91
+ with open(file_path, 'r', encoding='utf-8') as file:
92
+ file_content = file.read()
93
+
94
+ prompt = f"""
95
+ [Data]
96
+ {file_content}
97
+ Role: You are a product description content writer with 10 years of experience in the market. Generate a product description for a {subclass} in the {category} category based on the [Data] provided.
98
+ Follow the [Instructions] strictly:
99
+ [Instructions]
100
+ - Create a detailed product description for a {subclass} in the {category} category based on the [Data].
101
+ - Use the structured format below, making each section clear and concise.
102
+ - Highlight key product features, technical specifications, and the target audience.
103
+ """
104
+
105
+ generated_texts = llama_model.generate(
106
+ inputs=llama_tokenizer(prompt, return_tensors="pt").input_ids.to(device),
107
+ max_length=7000,
108
+ max_new_tokens=2000,
109
+ do_sample=True,
110
+ temperature=0.7,
111
+ top_k=50,
112
+ top_p=0.95,
113
+ )
114
+
115
+ description = llama_tokenizer.decode(generated_texts[0], skip_special_tokens=True)
116
 
117
+ # Clean up resources
118
+ torch.cuda.empty_cache()
119
+ gc.collect()
120
+
121
  return description
122
 
123
 
124
+ # # Generate product description using LLaMA
125
+ # def generate_description(category, subclass):
126
+ # prompt = f"Generate a detailed and engaging product description for a {category} of type {subclass}."
127
+
128
+ # inputs = llama_tokenizer.encode(prompt, return_tensors="pt").to(device)
129
+ # outputs = llama_model.generate(inputs, max_length=100, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
130
+ # description = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
131
+
132
+ # return description
133
+
134
+
135
  def classify_image(image):
136
  # Open the image using PIL
137
  image = Image.fromarray(image)