Ashar086 commited on
Commit
85ea247
·
verified ·
1 Parent(s): b7307ed

Update ai_processing.py

Browse files
Files changed (1) hide show
  1. ai_processing.py +46 -10
ai_processing.py CHANGED
@@ -1,8 +1,30 @@
1
- import random
2
- from llama_cpp import Llama
3
 
4
  # Initialize Llama model
5
- llm = Llama(model_path="path/to/llama-3.2-model.bin")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def generate_company_profile(user_data):
8
  prompt = f"""
@@ -17,13 +39,27 @@ def generate_company_profile(user_data):
17
  Company Profile:
18
  """
19
 
20
- response = llm(prompt, max_tokens=200)
21
- return response['choices'][0]['text'].strip()
22
 
23
  def calculate_fundraising_score(user_data):
24
- # In a real implementation, this would use more sophisticated analysis
25
- # For this example, we'll use a random score
26
- return random.randint(50, 95)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  def generate_recommendations(user_data):
29
  prompt = f"""
@@ -38,6 +74,6 @@ def generate_recommendations(user_data):
38
  Recommendations:
39
  """
40
 
41
- response = llm(prompt, max_tokens=300)
42
- recommendations = response['choices'][0]['text'].strip().split('\n')
43
  return [rec.strip() for rec in recommendations if rec.strip()]
 
1
+ import torch
2
+ from transformers import MllamaForConditionalGeneration, AutoProcessor
3
 
4
  # Initialize Llama model
5
+ model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
6
+ model = MllamaForConditionalGeneration.from_pretrained(
7
+ model_id,
8
+ torch_dtype=torch.bfloat16,
9
+ device_map="auto",
10
+ )
11
+ processor = AutoProcessor.from_pretrained(model_id)
12
+
13
+ def generate_text(prompt, max_new_tokens=200):
14
+ messages = [
15
+ {"role": "user", "content": [
16
+ {"type": "text", "text": prompt}
17
+ ]}
18
+ ]
19
+ input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
20
+ inputs = processor(
21
+ input_text,
22
+ add_special_tokens=False,
23
+ return_tensors="pt"
24
+ ).to(model.device)
25
+
26
+ output = model.generate(**inputs, max_new_tokens=max_new_tokens)
27
+ return processor.decode(output[0], skip_special_tokens=True)
28
 
29
  def generate_company_profile(user_data):
30
  prompt = f"""
 
39
  Company Profile:
40
  """
41
 
42
+ return generate_text(prompt, max_new_tokens=200)
 
43
 
44
  def calculate_fundraising_score(user_data):
45
+ prompt = f"""
46
+ Based on the following company information, provide a fundraising probability score between 0 and 100:
47
+
48
+ Project Description: {user_data['project_description']}
49
+ Industry: {user_data['industry']}
50
+ Target Market: {user_data['market']}
51
+ Location: {user_data['location']}
52
+ Number of Founders: {len(user_data['founders_info'])}
53
+
54
+ Fundraising Probability Score (0-100):
55
+ """
56
+
57
+ response = generate_text(prompt, max_new_tokens=10)
58
+ try:
59
+ score = int(response.strip())
60
+ return max(0, min(100, score)) # Ensure the score is between 0 and 100
61
+ except ValueError:
62
+ return 50 # Default score if parsing fails
63
 
64
  def generate_recommendations(user_data):
65
  prompt = f"""
 
74
  Recommendations:
75
  """
76
 
77
+ response = generate_text(prompt, max_new_tokens=300)
78
+ recommendations = response.strip().split('\n')
79
  return [rec.strip() for rec in recommendations if rec.strip()]