legends810 commited on
Commit
e18955d
·
verified ·
1 Parent(s): cb422de

Update app/api/ask-ai/route.ts

Browse files
Files changed (1) hide show
  1. app/api/ask-ai/route.ts +2 -48
app/api/ask-ai/route.ts CHANGED
@@ -1,14 +1,12 @@
1
  /* eslint-disable @typescript-eslint/no-explicit-any */
2
  import type { NextRequest } from "next/server";
3
  import { NextResponse } from "next/server";
4
- import { headers } from "next/headers";
5
  import { GoogleGenerativeAI } from "@google/generative-ai";
6
 
7
  import {
8
  DIVIDER,
9
  FOLLOW_UP_SYSTEM_PROMPT,
10
  INITIAL_SYSTEM_PROMPT,
11
- MAX_REQUESTS_PER_IP,
12
  NEW_PAGE_END,
13
  NEW_PAGE_START,
14
  REPLACE_END,
@@ -16,11 +14,8 @@ import {
16
  UPDATE_PAGE_START,
17
  UPDATE_PAGE_END,
18
  } from "@/lib/prompts";
19
- import MY_TOKEN_KEY from "@/lib/get-cookie-name";
20
  import { Page } from "@/types";
21
 
22
- const ipAddresses = new Map();
23
-
24
  // Gemini Models Configuration
25
  const GEMINI_MODELS = {
26
  "gemini-2.5-flash": "gemini-2.5-flash",
@@ -30,9 +25,6 @@ const GEMINI_MODELS = {
30
  };
31
 
32
  export async function POST(request: NextRequest) {
33
- const authHeaders = await headers();
34
- const userToken = request.cookies.get(MY_TOKEN_KEY())?.value;
35
-
36
  const body = await request.json();
37
  const { prompt, model, redesignMarkdown, previousPrompts, pages } = body;
38
 
@@ -55,24 +47,7 @@ export async function POST(request: NextRequest) {
55
  // Validate model
56
  const selectedModel = GEMINI_MODELS[model as keyof typeof GEMINI_MODELS] || "gemini-2.5-flash";
57
 
58
- // Rate limiting for non-authenticated users
59
- const ip = authHeaders.get("x-forwarded-for")?.includes(",")
60
- ? authHeaders.get("x-forwarded-for")?.split(",")[1].trim()
61
- : authHeaders.get("x-forwarded-for");
62
-
63
- if (!userToken) {
64
- ipAddresses.set(ip, (ipAddresses.get(ip) || 0) + 1);
65
- if (ipAddresses.get(ip) > MAX_REQUESTS_PER_IP) {
66
- return NextResponse.json(
67
- {
68
- ok: false,
69
- openLogin: true,
70
- message: "Log In to continue using the service",
71
- },
72
- { status: 429 }
73
- );
74
- }
75
- }
76
 
77
  try {
78
  const encoder = new TextEncoder();
@@ -158,9 +133,6 @@ export async function POST(request: NextRequest) {
158
  }
159
 
160
  export async function PUT(request: NextRequest) {
161
- const authHeaders = await headers();
162
- const userToken = request.cookies.get(MY_TOKEN_KEY())?.value;
163
-
164
  const body = await request.json();
165
  const { prompt, previousPrompts, selectedElementHtml, model, pages, files } = body;
166
 
@@ -183,24 +155,7 @@ export async function PUT(request: NextRequest) {
183
  // Validate model
184
  const selectedModel = GEMINI_MODELS[model as keyof typeof GEMINI_MODELS] || "gemini-2.5-flash";
185
 
186
- // Rate limiting for non-authenticated users
187
- const ip = authHeaders.get("x-forwarded-for")?.includes(",")
188
- ? authHeaders.get("x-forwarded-for")?.split(",")[1].trim()
189
- : authHeaders.get("x-forwarded-for");
190
-
191
- if (!userToken) {
192
- ipAddresses.set(ip, (ipAddresses.get(ip) || 0) + 1);
193
- if (ipAddresses.get(ip) > MAX_REQUESTS_PER_IP) {
194
- return NextResponse.json(
195
- {
196
- ok: false,
197
- openLogin: true,
198
- message: "Log In to continue using the service",
199
- },
200
- { status: 429 }
201
- );
202
- }
203
- }
204
 
205
  try {
206
  const genAI = new GoogleGenerativeAI(geminiApiKey);
@@ -393,7 +348,6 @@ export async function PUT(request: NextRequest) {
393
  position = replaceEndIndex + REPLACE_END.length;
394
  }
395
 
396
- // Update the main HTML if it's the index page
397
  const mainPageIndex = updatedPages.findIndex(p => p.path === '/' || p.path === '/index' || p.path === 'index');
398
  if (mainPageIndex !== -1) {
399
  updatedPages[mainPageIndex].html = newHtml;
 
1
  /* eslint-disable @typescript-eslint/no-explicit-any */
2
  import type { NextRequest } from "next/server";
3
  import { NextResponse } from "next/server";
 
4
  import { GoogleGenerativeAI } from "@google/generative-ai";
5
 
6
  import {
7
  DIVIDER,
8
  FOLLOW_UP_SYSTEM_PROMPT,
9
  INITIAL_SYSTEM_PROMPT,
 
10
  NEW_PAGE_END,
11
  NEW_PAGE_START,
12
  REPLACE_END,
 
14
  UPDATE_PAGE_START,
15
  UPDATE_PAGE_END,
16
  } from "@/lib/prompts";
 
17
  import { Page } from "@/types";
18
 
 
 
19
  // Gemini Models Configuration
20
  const GEMINI_MODELS = {
21
  "gemini-2.5-flash": "gemini-2.5-flash",
 
25
  };
26
 
27
  export async function POST(request: NextRequest) {
 
 
 
28
  const body = await request.json();
29
  const { prompt, model, redesignMarkdown, previousPrompts, pages } = body;
30
 
 
47
  // Validate model
48
  const selectedModel = GEMINI_MODELS[model as keyof typeof GEMINI_MODELS] || "gemini-2.5-flash";
49
 
50
+ // Rate limiting and login check has been removed.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  try {
53
  const encoder = new TextEncoder();
 
133
  }
134
 
135
  export async function PUT(request: NextRequest) {
 
 
 
136
  const body = await request.json();
137
  const { prompt, previousPrompts, selectedElementHtml, model, pages, files } = body;
138
 
 
155
  // Validate model
156
  const selectedModel = GEMINI_MODELS[model as keyof typeof GEMINI_MODELS] || "gemini-2.5-flash";
157
 
158
+ // Rate limiting and login check has been removed.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
  try {
161
  const genAI = new GoogleGenerativeAI(geminiApiKey);
 
348
  position = replaceEndIndex + REPLACE_END.length;
349
  }
350
 
 
351
  const mainPageIndex = updatedPages.findIndex(p => p.path === '/' || p.path === '/index' || p.path === 'index');
352
  if (mainPageIndex !== -1) {
353
  updatedPages[mainPageIndex].html = newHtml;