|
|
import axios from "axios"; |
|
|
import { getDefaultHeaders } from "./session"; |
|
|
|
|
|
|
|
|
const isHFSpace = window.location.hostname.includes("hf.space"); |
|
|
const API_URL = isHFSpace |
|
|
? "" |
|
|
: import.meta.env.VITE_API_URL || "http://localhost:8000"; |
|
|
|
|
|
|
|
|
const api = axios.create({ |
|
|
baseURL: API_URL, |
|
|
...(isHFSpace && { |
|
|
baseURL: window.location.origin, |
|
|
}), |
|
|
}); |
|
|
|
|
|
|
|
|
api.interceptors.request.use((config) => { |
|
|
|
|
|
const noSessionRoutes = [ |
|
|
"/api/universe/generate", |
|
|
"/api/generate-image", |
|
|
"/api/text-to-speech", |
|
|
]; |
|
|
|
|
|
if (noSessionRoutes.includes(config.url)) { |
|
|
return config; |
|
|
} |
|
|
|
|
|
|
|
|
if (!config.headers["x-session-id"]) { |
|
|
throw new Error("Session ID is required for this request"); |
|
|
} |
|
|
|
|
|
return config; |
|
|
}); |
|
|
|
|
|
|
|
|
const handleApiError = (error) => { |
|
|
console.error("API Error:", { |
|
|
status: error.response?.status, |
|
|
statusText: error.response?.statusText, |
|
|
data: error.response?.data, |
|
|
config: { |
|
|
method: error.config?.method, |
|
|
url: error.config?.url, |
|
|
data: error.config?.data, |
|
|
}, |
|
|
}); |
|
|
|
|
|
if (error.response) { |
|
|
|
|
|
throw new Error( |
|
|
error.response.data?.message || |
|
|
`Erreur ${error.response.status}: ${error.response.statusText}` |
|
|
); |
|
|
} else if (error.request) { |
|
|
|
|
|
throw new Error("Aucune réponse du serveur"); |
|
|
} else { |
|
|
|
|
|
throw new Error( |
|
|
"Une erreur est survenue lors de la configuration de la requête" |
|
|
); |
|
|
} |
|
|
}; |
|
|
|
|
|
|
|
|
let audioContext = null; |
|
|
let audioSource = null; |
|
|
let isSoundEnabled = true; |
|
|
let hasUserInteraction = false; |
|
|
|
|
|
|
|
|
const initAudioContext = () => { |
|
|
if (!hasUserInteraction) { |
|
|
console.warn("Audio context cannot be initialized before user interaction"); |
|
|
return null; |
|
|
} |
|
|
|
|
|
if (!audioContext) { |
|
|
try { |
|
|
audioContext = new (window.AudioContext || window.webkitAudioContext)(); |
|
|
if (audioContext.state === "suspended") { |
|
|
audioContext.resume(); |
|
|
} |
|
|
} catch (error) { |
|
|
console.error("Failed to initialize audio context:", error); |
|
|
return null; |
|
|
} |
|
|
} |
|
|
return audioContext; |
|
|
}; |
|
|
|
|
|
|
|
|
const handleUserInteraction = () => { |
|
|
hasUserInteraction = true; |
|
|
if (audioContext && audioContext.state === "suspended") { |
|
|
audioContext.resume(); |
|
|
} |
|
|
}; |
|
|
|
|
|
|
|
|
const setSoundEnabled = (enabled) => { |
|
|
isSoundEnabled = enabled; |
|
|
if (!enabled && audioSource) { |
|
|
audioSource.stop(); |
|
|
audioSource = null; |
|
|
} |
|
|
if (!enabled && audioContext) { |
|
|
audioContext.suspend(); |
|
|
} |
|
|
}; |
|
|
|
|
|
|
|
|
export const storyApi = { |
|
|
start: async (sessionId) => { |
|
|
try { |
|
|
const response = await api.post( |
|
|
"/api/chat", |
|
|
{ |
|
|
message: "restart", |
|
|
}, |
|
|
{ |
|
|
headers: getDefaultHeaders(sessionId), |
|
|
} |
|
|
); |
|
|
return response.data; |
|
|
} catch (error) { |
|
|
return handleApiError(error); |
|
|
} |
|
|
}, |
|
|
|
|
|
makeChoice: async (choiceId, sessionId) => { |
|
|
try { |
|
|
const response = await api.post( |
|
|
"/api/chat", |
|
|
{ |
|
|
message: "choice", |
|
|
choice_id: choiceId, |
|
|
}, |
|
|
{ |
|
|
headers: getDefaultHeaders(sessionId), |
|
|
} |
|
|
); |
|
|
return response.data; |
|
|
} catch (error) { |
|
|
return handleApiError(error); |
|
|
} |
|
|
}, |
|
|
|
|
|
makeCustomChoice: async (customText, sessionId) => { |
|
|
try { |
|
|
const response = await api.post( |
|
|
"/api/chat", |
|
|
{ |
|
|
message: "custom_choice", |
|
|
custom_text: customText, |
|
|
}, |
|
|
{ |
|
|
headers: getDefaultHeaders(sessionId), |
|
|
} |
|
|
); |
|
|
return response.data; |
|
|
} catch (error) { |
|
|
return handleApiError(error); |
|
|
} |
|
|
}, |
|
|
|
|
|
generateImage: async ( |
|
|
prompt, |
|
|
width = 512, |
|
|
height = 512, |
|
|
sessionId = null |
|
|
) => { |
|
|
try { |
|
|
const config = { |
|
|
prompt, |
|
|
width, |
|
|
height, |
|
|
}; |
|
|
|
|
|
const options = {}; |
|
|
if (sessionId) { |
|
|
options.headers = getDefaultHeaders(sessionId); |
|
|
} |
|
|
|
|
|
const response = await api.post("/api/generate-image", config, options); |
|
|
return response.data; |
|
|
} catch (error) { |
|
|
return handleApiError(error); |
|
|
} |
|
|
}, |
|
|
|
|
|
|
|
|
playNarration: async (text, sessionId) => { |
|
|
try { |
|
|
|
|
|
if (!isSoundEnabled || !hasUserInteraction) { |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
if (audioSource) { |
|
|
audioSource.stop(); |
|
|
audioSource = null; |
|
|
} |
|
|
|
|
|
|
|
|
audioContext = initAudioContext(); |
|
|
if (!audioContext) { |
|
|
return; |
|
|
} |
|
|
|
|
|
const response = await api.post( |
|
|
"/api/text-to-speech", |
|
|
{ |
|
|
text, |
|
|
voice_id: "21m00Tcm4TlvDq8ikWAM", |
|
|
}, |
|
|
{ |
|
|
headers: getDefaultHeaders(sessionId), |
|
|
} |
|
|
); |
|
|
|
|
|
if (!response.data.success) { |
|
|
throw new Error("Failed to generate audio"); |
|
|
} |
|
|
|
|
|
|
|
|
if (!isSoundEnabled) { |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
const audioData = atob(response.data.audio_base64); |
|
|
const arrayBuffer = new ArrayBuffer(audioData.length); |
|
|
const view = new Uint8Array(arrayBuffer); |
|
|
for (let i = 0; i < audioData.length; i++) { |
|
|
view[i] = audioData.charCodeAt(i); |
|
|
} |
|
|
|
|
|
|
|
|
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); |
|
|
|
|
|
|
|
|
if (!isSoundEnabled) { |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
audioSource = audioContext.createBufferSource(); |
|
|
audioSource.buffer = audioBuffer; |
|
|
audioSource.connect(audioContext.destination); |
|
|
audioSource.start(0); |
|
|
|
|
|
|
|
|
return new Promise((resolve) => { |
|
|
audioSource.onended = () => { |
|
|
audioSource = null; |
|
|
resolve(); |
|
|
}; |
|
|
}); |
|
|
} catch (error) { |
|
|
console.error("Error playing narration:", error); |
|
|
throw error; |
|
|
} |
|
|
}, |
|
|
|
|
|
stopNarration: () => { |
|
|
if (audioSource) { |
|
|
try { |
|
|
audioSource.stop(); |
|
|
} catch (error) { |
|
|
console.warn("Error stopping narration:", error); |
|
|
} |
|
|
audioSource = null; |
|
|
} |
|
|
if (audioContext) { |
|
|
try { |
|
|
audioContext.suspend(); |
|
|
} catch (error) { |
|
|
console.warn("Error suspending audio context:", error); |
|
|
} |
|
|
} |
|
|
}, |
|
|
|
|
|
initAudioContext, |
|
|
handleUserInteraction, |
|
|
setSoundEnabled, |
|
|
}; |
|
|
|
|
|
|
|
|
export const WS_URL = import.meta.env.VITE_WS_URL || "ws://localhost:8000/ws"; |
|
|
|
|
|
export const universeApi = { |
|
|
generate: async () => { |
|
|
try { |
|
|
const response = await api.post("/api/universe/generate"); |
|
|
return response.data; |
|
|
} catch (error) { |
|
|
return handleApiError(error); |
|
|
} |
|
|
}, |
|
|
getStyles: async () => { |
|
|
try { |
|
|
const response = await api.get("/api/universe/styles"); |
|
|
return response.data; |
|
|
} catch (error) { |
|
|
return handleApiError(error); |
|
|
} |
|
|
}, |
|
|
}; |
|
|
|
|
|
|
|
|
export default api; |
|
|
|