File size: 4,809 Bytes
826ff58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# First cell: Install required dependencies
#!pip install transformers accelerate gradio --quiet

# Second cell: Import required libraries and check GPU
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr

# Check for GPU availability
if torch.cuda.is_available():
    device = torch.device("cuda")
    print("Using GPU:", torch.cuda.get_device_name(0))
    print("Memory Available:", torch.cuda.get_device_properties(0).total_memory / 1e9, "GB")
else:
    device = torch.device("cpu")
    print("Using CPU")

# Third cell: Load model and tokenizer
def initialize_model():
    model_name = "microsoft/Phi-3.5-mini-instruct"
    print(f"Loading {model_name}...")
    
    # Load with lower precision for GPU efficiency
    if device.type == "cuda":
        tokenizer = AutoTokenizer.from_pretrained(
            model_name,
            trust_remote_code=True
        )
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            torch_dtype=torch.float16,
            device_map="auto",
            trust_remote_code=True
        )
    else:
        tokenizer = AutoTokenizer.from_pretrained(
            model_name,
            trust_remote_code=True
        )
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            trust_remote_code=True
        ).to(device)
        
    return model, tokenizer

try:
    model, tokenizer = initialize_model()
    
    # Create pipeline
    problem_solver_pipeline = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        device=0 if device.type == "cuda" else -1,
        max_length=500
    )
    print("Model loaded successfully!")
except Exception as e:
    print(f"Error loading model: {str(e)}")
    raise

# Fourth cell: Define analysis function with improved prompting for Phi-3.5
def analyze_idea(idea, max_length=500, temperature=0.7):
    """
    Analyze an input idea using the Phi-3.5 model.
    """
    if not idea.strip():
        return "Please enter an idea to analyze."
    
    prompt = f"""Instruction: Analyze the following business idea and provide a structured analysis identifying core problems and their solutions.

Input idea: "{idea}"

Please structure your response in the following format:
1. List the main problems that could arise
2. Provide specific solutions for each problem
3. Give a brief summary of the overall analysis

Response:"""

    try:
        # Generate response with error handling
        response = problem_solver_pipeline(
            prompt,
            max_length=max_length,
            temperature=temperature,
            num_return_sequences=1,
            pad_token_id=tokenizer.eos_token_id,
            do_sample=True,
            top_p=0.9
        )
        
        output = response[0]["generated_text"]
        
        # Format the final output
        formatted_output = f"""#### Input Idea:
"{idea}"

#### Analysis:
{output.replace(prompt, '')}"""  # Remove the prompt from the output
        
        return formatted_output
    
    except Exception as e:
        return f"An error occurred: {str(e)}"

# Fifth cell: Create and launch Gradio interface
def create_gradio_interface():
    interface = gr.Interface(
        fn=analyze_idea,
        inputs=[
            gr.Textbox(
                lines=5,
                placeholder="Enter your business idea here. For example: 'A mobile app that connects local food trucks with nearby customers in real-time.'",
                label="Your Business Idea"
            ),
            gr.Slider(
                minimum=100,
                maximum=1000,
                value=500,
                step=50,
                label="Response Length"
            ),
            gr.Slider(
                minimum=0.1,
                maximum=1.0,
                value=0.7,
                step=0.1,
                label="Creativity (Temperature)"
            )
        ],
        outputs=gr.Textbox(
            label="Analysis Results",
            lines=12
        ),
        title="Business Idea Analyzer powered by Phi-3.5",
        description="Enter your business idea, and this AI-powered tool will analyze potential problems, suggest solutions, and provide a summary.",
        examples=[
            ["An AI-powered platform for personalized workout recommendations based on real-time fitness tracking data.", 500, 0.7],
            ["A subscription service for sustainable, package-free household products with local delivery.", 500, 0.7],
            ["A marketplace connecting local artists with businesses looking for unique office artwork.", 500, 0.7]
        ]
    )
    return interface

# Launch the interface
interface = create_gradio_interface()
interface.launch(share=True, debug=True)