Spaces:
Running
Running
github-actions[bot]
commited on
Commit
·
0d60ae9
1
Parent(s):
fae8ff7
Sync turing folder from GitHub
Browse files- turing/api/app.py +3 -1
- turing/config.py +28 -0
- turing/modeling/predict.py +5 -3
turing/api/app.py
CHANGED
|
@@ -1,17 +1,19 @@
|
|
| 1 |
import base64
|
|
|
|
| 2 |
import os
|
| 3 |
from typing import Literal
|
| 4 |
|
| 5 |
from fastapi import FastAPI, HTTPException, Query
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
import gradio as gr
|
| 8 |
-
from loguru import logger
|
| 9 |
|
| 10 |
from turing.api.demo import create_demo
|
| 11 |
from turing.api.resource_monitoring import PrometheusBodyMiddleware, instrumentator
|
| 12 |
from turing.api.schemas import PredictionRequest, PredictionResponse
|
| 13 |
from turing.modeling.predict import ModelInference
|
| 14 |
|
|
|
|
|
|
|
| 15 |
|
| 16 |
def get_logo_b64_src(filename="logo_header.svg"):
|
| 17 |
"""read SVG and convert it into a string Base64 for HTML."""
|
|
|
|
| 1 |
import base64
|
| 2 |
+
import logging
|
| 3 |
import os
|
| 4 |
from typing import Literal
|
| 5 |
|
| 6 |
from fastapi import FastAPI, HTTPException, Query
|
| 7 |
from fastapi.responses import JSONResponse
|
| 8 |
import gradio as gr
|
|
|
|
| 9 |
|
| 10 |
from turing.api.demo import create_demo
|
| 11 |
from turing.api.resource_monitoring import PrometheusBodyMiddleware, instrumentator
|
| 12 |
from turing.api.schemas import PredictionRequest, PredictionResponse
|
| 13 |
from turing.modeling.predict import ModelInference
|
| 14 |
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
|
| 18 |
def get_logo_b64_src(filename="logo_header.svg"):
|
| 19 |
"""read SVG and convert it into a string Base64 for HTML."""
|
turing/config.py
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
|
|
|
|
|
| 1 |
from pathlib import Path
|
|
|
|
| 2 |
|
| 3 |
from dotenv import load_dotenv
|
|
|
|
| 4 |
from loguru import logger
|
| 5 |
|
| 6 |
# Load environment variables from .env file if it exists
|
|
@@ -112,3 +116,27 @@ try:
|
|
| 112 |
logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True)
|
| 113 |
except (ModuleNotFoundError, ValueError):
|
| 114 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
from pathlib import Path
|
| 4 |
+
import sys
|
| 5 |
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
+
from logtail import LogtailHandler
|
| 8 |
from loguru import logger
|
| 9 |
|
| 10 |
# Load environment variables from .env file if it exists
|
|
|
|
| 116 |
logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True)
|
| 117 |
except (ModuleNotFoundError, ValueError):
|
| 118 |
pass
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# setup logging for Better Stack using LogtailHandler
|
| 122 |
+
try:
|
| 123 |
+
better_stack_handler = LogtailHandler(
|
| 124 |
+
source_token=os.getenv("BETTER_STACK_TOKEN"),
|
| 125 |
+
host=os.getenv("BETTER_STACK_HOST"),
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
root_logger = logging.getLogger()
|
| 129 |
+
root_logger.setLevel(logging.INFO)
|
| 130 |
+
|
| 131 |
+
console_handler = logging.StreamHandler(sys.stdout)
|
| 132 |
+
console_handler.setLevel(logging.DEBUG)
|
| 133 |
+
|
| 134 |
+
better_stack_handler.setLevel(logging.WARNING)
|
| 135 |
+
|
| 136 |
+
root_logger.addHandler(console_handler)
|
| 137 |
+
root_logger.addHandler(better_stack_handler)
|
| 138 |
+
|
| 139 |
+
logging.info("LogtailHandler for Better Stack configured successfully.")
|
| 140 |
+
|
| 141 |
+
except Exception as e:
|
| 142 |
+
logging.error(f"Failed to configure LogtailHandler: {e}")
|
turing/modeling/predict.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
import importlib
|
|
|
|
| 2 |
import warnings
|
| 3 |
|
| 4 |
import dagshub
|
| 5 |
-
from loguru import logger
|
| 6 |
import mlflow
|
| 7 |
import numpy as np
|
| 8 |
import pandas as pd
|
|
@@ -12,6 +12,8 @@ from turing.dataset import DatasetManager
|
|
| 12 |
from turing.modeling.model_selector import get_best_model_info
|
| 13 |
from turing.modeling.models.codeBerta import CodeBERTa
|
| 14 |
|
|
|
|
|
|
|
| 15 |
|
| 16 |
class ModelInference:
|
| 17 |
# Model Configuration (Fallback Registry)
|
|
@@ -120,7 +122,7 @@ class ModelInference:
|
|
| 120 |
mlflow.artifacts.download_artifacts(
|
| 121 |
run_id=run_id, artifact_path=artifact_name, dst_path=str(local_path.parent)
|
| 122 |
)
|
| 123 |
-
logger.
|
| 124 |
|
| 125 |
return str(local_path)
|
| 126 |
|
|
@@ -159,7 +161,7 @@ class ModelInference:
|
|
| 159 |
|
| 160 |
# Load Model and store in cache
|
| 161 |
self.loaded_models[language] = model_class(language=language, path=model_path)
|
| 162 |
-
logger.
|
| 163 |
|
| 164 |
model = self.loaded_models[language]
|
| 165 |
|
|
|
|
| 1 |
import importlib
|
| 2 |
+
import logging
|
| 3 |
import warnings
|
| 4 |
|
| 5 |
import dagshub
|
|
|
|
| 6 |
import mlflow
|
| 7 |
import numpy as np
|
| 8 |
import pandas as pd
|
|
|
|
| 12 |
from turing.modeling.model_selector import get_best_model_info
|
| 13 |
from turing.modeling.models.codeBerta import CodeBERTa
|
| 14 |
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
|
| 18 |
class ModelInference:
|
| 19 |
# Model Configuration (Fallback Registry)
|
|
|
|
| 122 |
mlflow.artifacts.download_artifacts(
|
| 123 |
run_id=run_id, artifact_path=artifact_name, dst_path=str(local_path.parent)
|
| 124 |
)
|
| 125 |
+
logger.info(f"Model downloaded and cached at: {local_path}")
|
| 126 |
|
| 127 |
return str(local_path)
|
| 128 |
|
|
|
|
| 161 |
|
| 162 |
# Load Model and store in cache
|
| 163 |
self.loaded_models[language] = model_class(language=language, path=model_path)
|
| 164 |
+
logger.info(f"Model for {language} loaded into memory.")
|
| 165 |
|
| 166 |
model = self.loaded_models[language]
|
| 167 |
|