looda3131 commited on
Commit
9f66999
·
1 Parent(s): 1f5ce6d

طيب قم باعداده ليعمل بشكل صحيح علي hugging face يدون التعديل في كود التط

Browse files
Dockerfile CHANGED
@@ -1,10 +1,26 @@
1
- FROM node:18-alpine
 
2
  WORKDIR /app
3
  COPY package*.json ./
4
  RUN npm install
5
  COPY . .
6
  RUN npm run build
 
 
 
 
 
7
  ENV NODE_ENV=production
8
- # Hugging Face يتطلب المنفذ 7860
 
 
 
 
 
 
 
 
 
9
  EXPOSE 7860
10
- CMD ["npm", "start", "--", "-p", "7860"]
 
 
1
+ # Build Stage
2
+ FROM node:20-slim AS builder
3
  WORKDIR /app
4
  COPY package*.json ./
5
  RUN npm install
6
  COPY . .
7
  RUN npm run build
8
+
9
+ # Production Stage
10
+ FROM node:20-slim AS runner
11
+ WORKDIR /app
12
+
13
  ENV NODE_ENV=production
14
+ ENV PORT=7860
15
+
16
+ # Next.js collects completely anonymous telemetry data about general usage.
17
+ # Learn more here: https://nextjs.org/telemetry
18
+ ENV NEXT_TELEMETRY_DISABLED=1
19
+
20
+ COPY --from=builder /app/public ./public
21
+ COPY --from=builder /app/.next/standalone ./
22
+ COPY --from=builder /app/.next/static ./.next/static
23
+
24
  EXPOSE 7860
25
+
26
+ CMD ["node", "server.js"]
README.md CHANGED
@@ -1,8 +1,16 @@
1
  ---
2
- title: My Next App
3
  emoji: 🚀
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: docker
7
  pinned: false
8
- ---
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: AvadoraWorld
3
  emoji: 🚀
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: docker
7
  pinned: false
8
+ ---
9
+
10
+ # AvadoraWorld (أفادورا وورلد)
11
+
12
+ AI-Powered World & Radiant Chat Experience.
13
+
14
+ ## Deployment on Hugging Face
15
+ This application is configured to run on Hugging Face Spaces using Docker.
16
+ Make sure to set your environment variables (Secrets) in the HF Space settings if needed, although the primary keys are embedded as per instructions.
next.config.ts CHANGED
@@ -2,6 +2,7 @@
2
  import type {NextConfig} from 'next';
3
 
4
  const nextConfig: NextConfig = {
 
5
  typescript: {
6
  ignoreBuildErrors: true,
7
  },
 
2
  import type {NextConfig} from 'next';
3
 
4
  const nextConfig: NextConfig = {
5
+ output: 'standalone', // Essential for Docker/HuggingFace deployment
6
  typescript: {
7
  ignoreBuildErrors: true,
8
  },
src/ai/flows/ai-generate-post-ideas.ts CHANGED
@@ -2,7 +2,7 @@
2
  'use server';
3
  /**
4
  * @fileOverview توليد منشورات "صدى الأثير" في avadoraworld.
5
- * يتم تثبيت العدد عند 30 منشوراً مع سجلات تتبع كاملة.
6
  */
7
  import { safeGenerateContent } from '@/lib/ai-client';
8
  import type { GeneratePostIdeasInput, GeneratePostIdeasOutput } from './types';
@@ -34,7 +34,7 @@ export async function generatePostIdeas(
34
  const language = input.language || 'ar';
35
  const FIXED_COUNT = 30; // تثبيت العدد عند 30 منشوراً كما هو مأمور
36
 
37
- console.log(`[POST_GEN]: Initializing generation for ${FIXED_COUNT} posts in AvadoraWorld... Engine: ${input.aiEngine || 'default'}`);
38
 
39
  const prompt = promptTemplate(FIXED_COUNT, location, language, input.age);
40
 
@@ -42,18 +42,18 @@ export async function generatePostIdeas(
42
  const { output, model } = await safeGenerateContent(prompt, input.aiEngine as any);
43
 
44
  if (!output || !Array.isArray(output.posts)) {
45
- console.error("[POST_GEN_ERROR]: AI output missing 'posts' array. Model used:", model);
46
  throw new Error("AI_INVALID_OUTPUT_STRUCTURE");
47
  }
48
 
49
- console.log(`[POST_GEN_SUCCESS]: Generated ${output.posts.length} posts successfully using model: ${model}`);
50
 
51
  return {
52
  posts: output.posts.slice(0, FIXED_COUNT),
53
  modelUsed: model
54
  } as GeneratePostIdeasOutput;
55
  } catch (error: any) {
56
- console.error(`[POST_GEN_CRITICAL_FAILURE]: ${error.message}`);
57
  throw error;
58
  }
59
  }
 
2
  'use server';
3
  /**
4
  * @fileOverview توليد منشورات "صدى الأثير" في avadoraworld.
5
+ * يتم تثبيت العدد عند 30 منشوراً مع سجلات تتبع كاملة (Logs).
6
  */
7
  import { safeGenerateContent } from '@/lib/ai-client';
8
  import type { GeneratePostIdeasInput, GeneratePostIdeasOutput } from './types';
 
34
  const language = input.language || 'ar';
35
  const FIXED_COUNT = 30; // تثبيت العدد عند 30 منشوراً كما هو مأمور
36
 
37
+ console.log(`[AVADORA_LOG]: Initializing generation for ${FIXED_COUNT} posts. Preference Engine: ${input.aiEngine || 'primary'}`);
38
 
39
  const prompt = promptTemplate(FIXED_COUNT, location, language, input.age);
40
 
 
42
  const { output, model } = await safeGenerateContent(prompt, input.aiEngine as any);
43
 
44
  if (!output || !Array.isArray(output.posts)) {
45
+ console.error("[AVADORA_ERROR]: AI output missing 'posts' array. Model: ", model);
46
  throw new Error("AI_INVALID_OUTPUT_STRUCTURE");
47
  }
48
 
49
+ console.log(`[AVADORA_SUCCESS]: Generated ${output.posts.length} posts via ${model}`);
50
 
51
  return {
52
  posts: output.posts.slice(0, FIXED_COUNT),
53
  modelUsed: model
54
  } as GeneratePostIdeasOutput;
55
  } catch (error: any) {
56
+ console.error(`[AVADORA_CRITICAL]: Post generation failed - ${error.message}`);
57
  throw error;
58
  }
59
  }
src/lib/gemini-client.ts CHANGED
@@ -1,11 +1,14 @@
1
 
2
  /**
3
  * @fileOverview المحرك الرئيسي للذكاء الاصطناعي (avadoraworld)
4
- * يدعم: gemini-2.5-flash-lite (أساسي)، Groq (احتياطي)، OpenRouter (متقدم).
 
 
 
5
  */
6
 
7
  const GEMINI_KEY = "AIzaSyA_0i-0yCk9m6ehCIZ87_CKbUMrwlea-_s";
8
- const GEMINI_MODEL = "gemini-2.5-flash-lite"; // الطراز المأمور به حصرياً
9
 
10
  const GROQ_KEY = "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";
11
  const GROQ_MODELS = ["llama-3.3-70b-versatile", "mixtral-8x7b-32768"];
@@ -26,7 +29,7 @@ function extractJsonFromText(text: string): string {
26
  }
27
 
28
  async function callGemini(prompt: string): Promise<string> {
29
- console.log(`[AI_PROVIDER]: Connecting to ${GEMINI_MODEL}...`);
30
  try {
31
  const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${GEMINI_MODEL}:generateContent?key=${GEMINI_KEY}`, {
32
  method: 'POST',
@@ -39,22 +42,22 @@ async function callGemini(prompt: string): Promise<string> {
39
 
40
  if (!response.ok) {
41
  const err = await response.text();
42
- console.error(`[AI_ERROR_GEMINI]: ${response.status} - ${err}`);
43
  throw new Error(`Gemini Error: ${response.status}`);
44
  }
45
 
46
  const data = await response.json();
47
  const text = data.candidates?.[0]?.content?.parts?.[0]?.text || "";
48
- console.log(`[AI_RESPONSE_GEMINI]: Received response length: ${text.length}`);
49
  return text;
50
  } catch (e: any) {
51
- console.error(`[GEMINI_FETCH_FAIL]: ${e.message}`);
52
  throw e;
53
  }
54
  }
55
 
56
  async function callGroq(prompt: string, model: string): Promise<string> {
57
- console.log(`[AI_PROVIDER]: Connecting to Groq (${model})...`);
58
  try {
59
  const response = await fetch("https://api.groq.com/openai/v1/chat/completions", {
60
  method: 'POST',
@@ -71,22 +74,21 @@ async function callGroq(prompt: string, model: string): Promise<string> {
71
 
72
  if (!response.ok) {
73
  const err = await response.text();
74
- console.error(`[AI_ERROR_GROQ]: ${response.status} - ${err}`);
75
- throw new Error(`Groq Error: ${response.status}`);
76
  }
77
 
78
  const data = await response.json();
79
  const text = data.choices?.[0]?.message?.content || "";
80
- console.log(`[AI_RESPONSE_GROQ]: Received response length: ${text.length}`);
81
  return text;
82
  } catch (e: any) {
83
- console.error(`[GROQ_FETCH_FAIL]: ${e.message}`);
84
  throw e;
85
  }
86
  }
87
 
88
  async function callOpenRouter(prompt: string, model: string): Promise<string> {
89
- console.log(`[AI_PROVIDER]: Connecting to OpenRouter (${model})...`);
90
  try {
91
  const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
92
  method: 'POST',
@@ -94,7 +96,7 @@ async function callOpenRouter(prompt: string, model: string): Promise<string> {
94
  'Authorization': `Bearer ${OPENROUTER_KEY}`,
95
  'Content-Type': 'application/json',
96
  'HTTP-Referer': 'https://avadoraworld.app',
97
- 'X-Title': 'Avadora World'
98
  },
99
  body: JSON.stringify({
100
  model: model,
@@ -105,22 +107,21 @@ async function callOpenRouter(prompt: string, model: string): Promise<string> {
105
 
106
  if (!response.ok) {
107
  const err = await response.text();
108
- console.error(`[AI_ERROR_OPENROUTER]: ${response.status} - ${err}`);
109
- throw new Error(`OpenRouter Error: ${response.status}`);
110
  }
111
 
112
  const data = await response.json();
113
  const text = data.choices?.[0]?.message?.content || "";
114
- console.log(`[AI_RESPONSE_OPENROUTER]: Received response length: ${text.length}`);
115
  return text;
116
  } catch (e: any) {
117
- console.error(`[OPENROUTER_FETCH_FAIL]: ${e.message}`);
118
  throw e;
119
  }
120
  }
121
 
122
  export async function askAI(prompt: string, preferredEngine: 'primary' | 'fallback' | 'advanced' = 'primary'): Promise<{ success: true, answer: string, model: string }> {
123
- console.log(`[AI_CHAIN]: Initializing chain for preference: ${preferredEngine}`);
124
 
125
  let providers: { fn: () => Promise<string>, name: string }[] = [];
126
 
@@ -148,15 +149,15 @@ export async function askAI(prompt: string, preferredEngine: 'primary' | 'fallba
148
  try {
149
  const answer = await provider.fn();
150
  if (answer) {
151
- console.log(`[AI_CHAIN_SUCCESS]: Resolved via ${provider.name}`);
152
  return { success: true, answer, model: provider.name };
153
  }
154
  } catch (e: any) {
155
- console.warn(`[AI_CHAIN_RETRY]: ${provider.name} failed: ${e.message}`);
156
  }
157
  }
158
 
159
- throw new Error("All AI providers in fallback chain failed. Please check your keys and logs.");
160
  }
161
 
162
  export const safeGenerateContent = async (prompt: string, aiEngine: 'primary' | 'fallback' | 'advanced' = 'primary'): Promise<{ output: any, model: string }> => {
@@ -165,12 +166,12 @@ export const safeGenerateContent = async (prompt: string, aiEngine: 'primary' |
165
 
166
  try {
167
  const parsed = JSON.parse(cleanedJsonText);
168
- console.log(`[JSON_PARSE_SUCCESS]: Successfully parsed content from ${result.model}`);
169
  return { output: parsed, model: result.model };
170
  } catch (error: any) {
171
- console.error("[JSON_PARSE_ERROR]: Failed to parse JSON ->", error.message);
172
- console.error("[RAW_OUTPUT_CAUSING_ERROR]:", result.answer);
173
- throw new Error("AI response was not valid JSON. Check console for raw data.");
174
  }
175
  };
176
 
 
1
 
2
  /**
3
  * @fileOverview المحرك الرئيسي للذكاء الاصطناعي (avadoraworld)
4
+ * المحرك المأمور به: gemini-2.5-flash-lite (أساسي)
5
+ * مفتاح Gemini: AIzaSyA_0i-0yCk9m6ehCIZ87_CKbUMrwlea-_s
6
+ * مفتاح Groq: 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
7
+ * مفتاح OpenRouter: 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
8
  */
9
 
10
  const GEMINI_KEY = "AIzaSyA_0i-0yCk9m6ehCIZ87_CKbUMrwlea-_s";
11
+ const GEMINI_MODEL = "gemini-2.5-flash-lite";
12
 
13
  const GROQ_KEY = "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";
14
  const GROQ_MODELS = ["llama-3.3-70b-versatile", "mixtral-8x7b-32768"];
 
29
  }
30
 
31
  async function callGemini(prompt: string): Promise<string> {
32
+ console.log(`[AVADORA_AI]: Attempting ${GEMINI_MODEL} via Google API...`);
33
  try {
34
  const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${GEMINI_MODEL}:generateContent?key=${GEMINI_KEY}`, {
35
  method: 'POST',
 
42
 
43
  if (!response.ok) {
44
  const err = await response.text();
45
+ console.error(`[AVADORA_ERROR_GEMINI]: ${response.status} - ${err}`);
46
  throw new Error(`Gemini Error: ${response.status}`);
47
  }
48
 
49
  const data = await response.json();
50
  const text = data.candidates?.[0]?.content?.parts?.[0]?.text || "";
51
+ console.log(`[AVADORA_SUCCESS_GEMINI]: Received response (Length: ${text.length})`);
52
  return text;
53
  } catch (e: any) {
54
+ console.warn(`[AVADORA_RETRY_GEMINI]: Failed - ${e.message}`);
55
  throw e;
56
  }
57
  }
58
 
59
  async function callGroq(prompt: string, model: string): Promise<string> {
60
+ console.log(`[AVADORA_AI]: Attempting Groq (${model})...`);
61
  try {
62
  const response = await fetch("https://api.groq.com/openai/v1/chat/completions", {
63
  method: 'POST',
 
74
 
75
  if (!response.ok) {
76
  const err = await response.text();
77
+ throw new Error(`Groq Error: ${response.status} - ${err}`);
 
78
  }
79
 
80
  const data = await response.json();
81
  const text = data.choices?.[0]?.message?.content || "";
82
+ console.log(`[AVADORA_SUCCESS_GROQ]: Received response (Length: ${text.length})`);
83
  return text;
84
  } catch (e: any) {
85
+ console.warn(`[AVADORA_RETRY_GROQ]: Failed - ${e.message}`);
86
  throw e;
87
  }
88
  }
89
 
90
  async function callOpenRouter(prompt: string, model: string): Promise<string> {
91
+ console.log(`[AVADORA_AI]: Attempting OpenRouter (${model})...`);
92
  try {
93
  const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
94
  method: 'POST',
 
96
  'Authorization': `Bearer ${OPENROUTER_KEY}`,
97
  'Content-Type': 'application/json',
98
  'HTTP-Referer': 'https://avadoraworld.app',
99
+ 'X-Title': 'AvadoraWorld'
100
  },
101
  body: JSON.stringify({
102
  model: model,
 
107
 
108
  if (!response.ok) {
109
  const err = await response.text();
110
+ throw new Error(`OpenRouter Error: ${response.status} - ${err}`);
 
111
  }
112
 
113
  const data = await response.json();
114
  const text = data.choices?.[0]?.message?.content || "";
115
+ console.log(`[AVADORA_SUCCESS_OR]: Received response (Length: ${text.length})`);
116
  return text;
117
  } catch (e: any) {
118
+ console.warn(`[AVADORA_RETRY_OR]: Failed - ${e.message}`);
119
  throw e;
120
  }
121
  }
122
 
123
  export async function askAI(prompt: string, preferredEngine: 'primary' | 'fallback' | 'advanced' = 'primary'): Promise<{ success: true, answer: string, model: string }> {
124
+ console.log(`[AVADORA_DIAGNOSTICS]: Processing chain for preference: ${preferredEngine}`);
125
 
126
  let providers: { fn: () => Promise<string>, name: string }[] = [];
127
 
 
149
  try {
150
  const answer = await provider.fn();
151
  if (answer) {
152
+ console.log(`[AVADORA_CHAIN_RESOLVED]: Request satisfied via ${provider.name}`);
153
  return { success: true, answer, model: provider.name };
154
  }
155
  } catch (e: any) {
156
+ console.error(`[AVADORA_CHAIN_FAIL]: Provider ${provider.name} failed. Attempting next...`);
157
  }
158
  }
159
 
160
+ throw new Error("AVADORA_FATAL: All AI providers failed. Check console diagnostic logs.");
161
  }
162
 
163
  export const safeGenerateContent = async (prompt: string, aiEngine: 'primary' | 'fallback' | 'advanced' = 'primary'): Promise<{ output: any, model: string }> => {
 
166
 
167
  try {
168
  const parsed = JSON.parse(cleanedJsonText);
169
+ console.log(`[AVADORA_PARSE_SUCCESS]: Content generated from ${result.model}`);
170
  return { output: parsed, model: result.model };
171
  } catch (error: any) {
172
+ console.error("[AVADORA_PARSE_ERROR]: Invalid JSON structure received from AI.");
173
+ console.error("[AVADORA_RAW_DATA]:", result.answer);
174
+ throw new Error("AI response was not valid JSON.");
175
  }
176
  };
177