repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
adrien-bon/bevy_ecs_tiled | https://github.com/adrien-bon/bevy_ecs_tiled/blob/82018c14762a9363f6813b41fd4659266a337e3b/examples/helper/anchor.rs | examples/helper/anchor.rs | use bevy::prelude::*;
use bevy_ecs_tilemap::prelude::TilemapAnchor;
#[allow(dead_code)]
/// Rotate the tilemap anchor to the right generally but also show custom and
/// none for completeness.
pub fn rotate_right(anchor: &TilemapAnchor) -> TilemapAnchor {
use TilemapAnchor::*;
match anchor {
TopLeft => TopCenter,
TopCenter => TopRight,
TopRight => CenterRight,
CenterRight => BottomRight,
BottomRight => BottomCenter,
BottomCenter => BottomLeft,
BottomLeft => CenterLeft,
CenterLeft => Center,
Center => Custom(Vec2::splat(0.25)),
Custom(_) => None,
None => TopLeft,
}
}
| rust | MIT | 82018c14762a9363f6813b41fd4659266a337e3b | 2026-01-04T20:23:27.337930Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/actix/src/main.rs | api/actix/src/main.rs | use actix_cors::Cors;
use actix_web::{web, App, HttpServer};
#[actix_web::main]
async fn main() -> std::io::Result<()> {
// init env vars
dotenv::dotenv().ok();
// init tracing subscriber
let tracing = tracing_subscriber::fmt()
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env());
if cfg!(debug_assertions) {
tracing.pretty().init();
} else {
tracing.json().init();
}
// building address
let port = std::env::var("PORT").unwrap_or("8080".to_string());
let address = format!("127.0.0.1:{}", port);
// repository
let repo = get_repo().await.expect("Couldn't get the repository");
let repo = web::Data::new(repo);
tracing::info!("Repository initialized");
// starting the server
tracing::info!("🚀🚀🚀 Starting Actix server at {}", address);
// static files
let static_folder = std::env::var("STATIC_FOLDER").unwrap_or("./front/dist".to_string());
HttpServer::new(move || {
// CORS
let cors = Cors::permissive();
App::new()
.wrap(cors)
.service(
web::scope("/api")
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(
api_lib::v1::service::<api_lib::film_repository::PostgresFilmRepository>,
),
)
.service(
actix_files::Files::new("/", &static_folder)
.show_files_listing()
.index_file("index.html"),
)
})
.bind(&address)
.unwrap_or_else(|err| {
panic!(
"🔥🔥🔥 Couldn't start the server in port {}: {:?}",
port, err
)
})
.run()
.await
}
async fn get_repo() -> Result<impl api_lib::film_repository::FilmRepository, sqlx::Error> {
let conn_str =
std::env::var("DATABASE_URL").map_err(|e| sqlx::Error::Configuration(Box::new(e)))?;
let pool = sqlx::PgPool::connect(&conn_str).await?;
Ok(api_lib::film_repository::PostgresFilmRepository::new(pool))
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/lib.rs | api/lib/src/lib.rs | pub mod film_repository;
pub mod health;
pub mod v1;
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/health.rs | api/lib/src/health.rs | use actix_web::{
web::{self, ServiceConfig},
HttpResponse,
};
pub const API_VERSION: &str = "v0.0.3";
pub fn service(cfg: &mut ServiceConfig) {
cfg.route("/health", web::get().to(health_check));
}
async fn health_check() -> HttpResponse {
HttpResponse::Ok()
.append_header(("health-check", API_VERSION))
.finish()
}
#[cfg(test)]
mod tests {
use actix_web::http::StatusCode;
use super::*;
#[actix_rt::test]
async fn health_check_works() {
let res = health_check().await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let data = res
.headers()
.get("health-check")
.and_then(|h| h.to_str().ok());
assert_eq!(data, Some(API_VERSION));
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/v1/films.rs | api/lib/src/v1/films.rs | use actix_web::{
web::{self, ServiceConfig},
HttpResponse,
};
use shared::models::{CreateFilm, Film};
use uuid::Uuid;
use crate::film_repository::FilmRepository;
pub fn service<R: FilmRepository>(cfg: &mut ServiceConfig) {
cfg.service(
web::scope("/films")
// GET
.route("", web::get().to(get_all::<R>))
.route("/{film_id}", web::get().to(get::<R>))
// POST
.route("", web::post().to(post::<R>))
// PUT
.route("", web::put().to(put::<R>))
// DELETE
.route("/{film_id}", web::delete().to(delete::<R>)),
);
}
async fn get_all<R: FilmRepository>(repo: web::Data<R>) -> HttpResponse {
match repo.get_films().await {
Ok(films) => HttpResponse::Ok().json(films),
Err(e) => HttpResponse::NotFound().body(format!("Internal server error: {:?}", e)),
}
}
async fn get<R: FilmRepository>(film_id: web::Path<Uuid>, repo: web::Data<R>) -> HttpResponse {
match repo.get_film(&film_id).await {
Ok(film) => HttpResponse::Ok().json(film),
Err(_) => HttpResponse::NotFound().body("Not found"),
}
}
async fn post<R: FilmRepository>(
create_film: web::Json<CreateFilm>,
repo: web::Data<R>,
) -> HttpResponse {
match repo.create_film(&create_film).await {
Ok(film) => HttpResponse::Ok().json(film),
Err(e) => {
HttpResponse::InternalServerError().body(format!("Internal server error: {:?}", e))
}
}
}
async fn put<R: FilmRepository>(film: web::Json<Film>, repo: web::Data<R>) -> HttpResponse {
match repo.update_film(&film).await {
Ok(film) => HttpResponse::Ok().json(film),
Err(e) => HttpResponse::NotFound().body(format!("Internal server error: {:?}", e)),
}
}
async fn delete<R: FilmRepository>(film_id: web::Path<Uuid>, repo: web::Data<R>) -> HttpResponse {
match repo.delete_film(&film_id).await {
Ok(film) => HttpResponse::Ok().json(film),
Err(e) => {
HttpResponse::InternalServerError().body(format!("Internal server error: {:?}", e))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::film_repository::MockFilmRepository;
use actix_web::body::to_bytes;
use chrono::Utc;
pub fn create_test_film(id: Uuid, title: String) -> Film {
Film {
id,
title,
director: "Director test name".to_string(),
year: 2001,
poster: "Poster test name".to_string(),
created_at: Some(Utc::now()),
updated_at: None,
}
}
#[actix_rt::test]
async fn get_all_works() {
let film_id = uuid::Uuid::new_v4();
let film_title1 = "Film test title1";
let film_title2 = "Film test title2";
let mut repo = MockFilmRepository::default();
repo.expect_get_films().returning(move || {
let film = create_test_film(film_id, film_title1.to_string());
let film2 = create_test_film(film_id, film_title2.to_string());
Ok(vec![film, film2])
});
let result = get_all(web::Data::new(repo)).await;
let body = to_bytes(result.into_body()).await.unwrap();
let films = serde_json::from_slice::<'_, Vec<Film>>(&body).unwrap();
assert_eq!(films.len(), 2);
assert_eq!(films[0].title, film_title1);
assert_eq!(films[1].title, film_title2);
}
#[actix_rt::test]
async fn get_works() {
let film_id = uuid::Uuid::new_v4();
let film_title = "Film test title";
let mut repo = MockFilmRepository::default();
repo.expect_get_film().returning(move |id| {
let film = create_test_film(*id, film_title.to_string());
Ok(film)
});
let result = get(web::Path::from(film_id), web::Data::new(repo)).await;
let body = to_bytes(result.into_body()).await.unwrap();
let film = serde_json::from_slice::<'_, Film>(&body).unwrap();
assert_eq!(film.id, film_id);
assert_eq!(film.title, film_title);
}
#[actix_rt::test]
async fn create_works() {
let film_id = uuid::Uuid::new_v4();
let title = "Film test title";
let create_film = CreateFilm {
title: title.to_string(),
director: "Director test name".to_string(),
year: 2001,
poster: "Poster test name".to_string(),
};
let mut repo = MockFilmRepository::default();
repo.expect_create_film().returning(move |create_film| {
Ok(Film {
id: film_id,
title: create_film.title.to_owned(),
director: create_film.director.to_owned(),
year: create_film.year,
poster: create_film.poster.to_owned(),
created_at: Some(Utc::now()),
updated_at: None,
})
});
let result = post(web::Json(create_film), web::Data::new(repo)).await;
let body = to_bytes(result.into_body()).await.unwrap();
let film = serde_json::from_slice::<'_, Film>(&body).unwrap();
assert_eq!(film.id, film_id);
assert_eq!(film.title, title);
}
#[actix_rt::test]
async fn update_works() {
let film_id = uuid::Uuid::new_v4();
let film_title = "Film test title";
let new_film = create_test_film(film_id, film_title.to_string());
let mut repo = MockFilmRepository::default();
repo.expect_update_film()
.returning(|film| Ok(film.to_owned()));
let result = put(web::Json(new_film), web::Data::new(repo)).await;
let body = to_bytes(result.into_body()).await.unwrap();
let film = serde_json::from_slice::<'_, Film>(&body).unwrap();
assert_eq!(film.id, film_id);
assert_eq!(film.title, film_title);
}
#[actix_rt::test]
async fn delete_works() {
let film_id = uuid::Uuid::new_v4();
let mut repo = MockFilmRepository::default();
repo.expect_delete_film().returning(|id| Ok(id.to_owned()));
let result = delete(web::Path::from(film_id), web::Data::new(repo)).await;
let body = to_bytes(result.into_body()).await.unwrap();
let uuid = serde_json::from_slice::<'_, Uuid>(&body).unwrap();
assert_eq!(uuid, film_id);
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/v1/mod.rs | api/lib/src/v1/mod.rs | use actix_web::web::{self, ServiceConfig};
use crate::film_repository::FilmRepository;
mod films;
pub fn service<R: FilmRepository>(cfg: &mut ServiceConfig) {
cfg.service(web::scope("/v1").configure(films::service::<R>));
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/film_repository/memory_film_repository.rs | api/lib/src/film_repository/memory_film_repository.rs | use std::{collections::HashMap, sync::RwLock};
use async_trait::async_trait;
use chrono::Utc;
use shared::models::{CreateFilm, Film};
use uuid::Uuid;
use super::{FilmRepository, FilmResult};
pub struct MemoryFilmRepository {
films: RwLock<HashMap<Uuid, Film>>,
}
impl MemoryFilmRepository {
pub fn new() -> Self {
Self {
films: RwLock::new(HashMap::new()),
}
}
}
impl Default for MemoryFilmRepository {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl FilmRepository for MemoryFilmRepository {
async fn get_films(&self) -> FilmResult<Vec<Film>> {
let result = self
.films
.read()
.map(|films| films.clone().into_values().collect::<Vec<_>>())
.map_err(|e| format!("An error happened while trying to read films: {}", e));
if result.is_err() {
tracing::error!("Couldn't retrive a films");
}
result
}
async fn get_film(&self, film_id: &uuid::Uuid) -> FilmResult<Film> {
let result = self
.films
.read()
.map_err(|e| format!("An error happened while trying to read films: {}", e))
.and_then(|films| {
films
.get(film_id)
.cloned()
.ok_or_else(|| format!("Couldn't find film: {}", film_id))
});
if result.is_err() {
tracing::error!("Couldn't retrive a film with id {}", film_id);
}
result
}
async fn create_film(&self, create_film: &CreateFilm) -> FilmResult<Film> {
match self.films.write() {
Ok(mut films) => {
let new_film = Film {
id: uuid::Uuid::new_v4(),
title: create_film.title.clone(),
director: create_film.director.clone(),
year: create_film.year,
poster: create_film.poster.clone(),
created_at: Some(Utc::now()),
updated_at: None,
};
films.insert(new_film.id, new_film.clone());
tracing::trace!("Film with id {} correctly created", new_film.id);
Ok(new_film)
}
Err(e) => {
let err = format!("An error happened while trying to update film: {}", e);
tracing::error!(err);
Err(err)
}
}
}
async fn update_film(&self, film: &Film) -> FilmResult<Film> {
match self.films.write() {
Ok(mut films) => {
let old_film = films.get_mut(&film.id);
match old_film {
Some(old_film) => {
let mut updated_film = film.to_owned();
updated_film.created_at = old_film.created_at;
updated_film.updated_at = Some(Utc::now());
films.insert(film.id, updated_film.clone());
tracing::debug!("Film with id {} correctly updated", film.id);
Ok(updated_film)
}
None => {
let err = format!("Film with id {} does not exist", film.id);
tracing::error!(err);
Err(err)
}
}
}
Err(e) => {
let err = format!("An error happened while trying to update film: {}", e);
tracing::error!(err);
Err(err)
}
}
}
async fn delete_film(&self, film_id: &uuid::Uuid) -> FilmResult<Uuid> {
match self.films.write() {
Ok(mut films) => {
films.remove(film_id);
Ok(film_id.to_owned())
}
Err(e) => {
let err = format!("An error happened while trying to delete film: {}", e);
tracing::error!(err);
Err(err)
}
}
}
}
#[cfg(test)]
mod tests {
use super::MemoryFilmRepository;
use crate::film_repository::FilmRepository;
use shared::models::{CreateFilm, Film};
use std::{collections::HashMap, sync::RwLock};
fn create_test_film(id: &'static str) -> Film {
Film {
id: uuid::Uuid::new_v4(),
title: format!("title-{}", id),
director: format!("director-{}", id),
poster: format!("poster-{}", id),
year: 2001,
created_at: Some(chrono::Utc::now()),
updated_at: None,
}
}
fn create_test_create_film(id: &'static str) -> CreateFilm {
CreateFilm {
title: format!("title-{}", id),
director: format!("director-{}", id),
poster: format!("poster-{}", id),
year: 2001,
}
}
#[actix_rt::test]
async fn repo_must_be_empty_on_new() {
let repo = MemoryFilmRepository::new();
let result = repo.get_films().await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), 0);
}
#[actix_rt::test]
async fn repo_must_be_empty_on_default() {
let repo = MemoryFilmRepository::default();
let result = repo.get_films().await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), 0);
}
#[actix_rt::test]
async fn get_films_works() {
let store = RwLock::new(HashMap::new());
let film1 = create_test_film("1");
let film2 = create_test_film("2");
{
let mut store = store.write().unwrap();
store.insert(film1.id, film1.clone());
store.insert(film2.id, film2.clone());
}
let repo = MemoryFilmRepository { films: store };
let result = repo.get_films().await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), 2);
assert!(result.iter().any(|f| f.id == film1.id));
assert!(result.iter().any(|f| f.id == film2.id));
}
#[actix_rt::test]
async fn get_film_works() {
let store = RwLock::new(HashMap::new());
let film = create_test_film("1");
store.write().unwrap().insert(film.id, film.clone());
let repo = MemoryFilmRepository { films: store };
let result = repo.get_film(&film.id).await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), film);
}
#[actix_rt::test]
async fn get_film_fails_if_file_is_not_present() {
let film_update = create_test_film("2");
let repo = MemoryFilmRepository::default();
let result = repo.update_film(&film_update).await;
assert!(result.is_err());
let err = result.unwrap_err();
assert!(err.contains("does not exist"));
}
#[actix_rt::test]
async fn create_film_works() {
let store = RwLock::new(HashMap::new());
let create_film = create_test_create_film("1");
let repo = MemoryFilmRepository { films: store };
let result = repo.create_film(&create_film).await;
assert!(result.is_ok());
let created_file = result.unwrap();
assert_eq!(created_file.title, create_film.title);
assert_eq!(created_file.director, create_film.director);
assert_eq!(created_file.poster, create_film.poster);
assert_eq!(created_file.year, create_film.year);
assert!(created_file.created_at.is_some());
}
#[actix_rt::test]
async fn update_film_works() {
let store = RwLock::new(HashMap::new());
let film = create_test_film("1");
store.write().unwrap().insert(film.id, film.clone());
let mut film_update = film.clone();
film_update.title = "new-title".to_string();
film_update.year = 2002;
let repo = MemoryFilmRepository { films: store };
let result = repo.update_film(&film_update).await;
assert!(result.is_ok());
let updated_file = result.unwrap();
assert_eq!(updated_file.id, film.id);
assert_ne!(updated_file.title, film.title);
assert_eq!(updated_file.title, film_update.title);
assert_eq!(updated_file.director, film.director);
assert_eq!(updated_file.poster, film.poster);
assert_ne!(updated_file.year, film.year);
assert_eq!(updated_file.year, film_update.year);
assert_eq!(updated_file.created_at, film.created_at);
assert!(updated_file.updated_at.is_some());
assert!(film.updated_at.is_none());
}
#[actix_rt::test]
async fn update_film_fails_if_file_is_not_present() {
let store = RwLock::new(HashMap::new());
let film = create_test_film("1");
store.write().unwrap().insert(film.id, film.clone());
let film_update = create_test_film("2");
let repo = MemoryFilmRepository { films: store };
let result = repo.update_film(&film_update).await;
assert!(result.is_err());
let err = result.unwrap_err();
assert!(err.contains("does not exist"));
}
#[actix_rt::test]
async fn delete_film_works() {
let store = RwLock::new(HashMap::new());
let film = create_test_film("1");
store.write().unwrap().insert(film.id, film.clone());
let repo = MemoryFilmRepository { films: store };
let result = repo.delete_film(&film.id).await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), film.id);
}
#[actix_rt::test]
async fn delete_film_does_not_fail_if_film_is_not_present() {
let repo = MemoryFilmRepository::default();
let id = uuid::Uuid::new_v4();
let result = repo.delete_film(&id).await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), id);
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/film_repository/postgres_film_repository.rs | api/lib/src/film_repository/postgres_film_repository.rs | use async_trait::async_trait;
use shared::models::{CreateFilm, Film};
use uuid::Uuid;
use super::{FilmRepository, FilmResult};
pub struct PostgresFilmRepository {
pool: sqlx::PgPool,
}
impl PostgresFilmRepository {
pub fn new(pool: sqlx::PgPool) -> Self {
Self { pool }
}
}
#[async_trait]
impl FilmRepository for PostgresFilmRepository {
async fn get_films(&self) -> FilmResult<Vec<Film>> {
sqlx::query_as::<_, Film>(
r#"
SELECT id, title, director, year, poster, created_at, updated_at
FROM films
"#,
)
.fetch_all(&self.pool)
.await
.map_err(|e| e.to_string())
}
async fn get_film(&self, film_id: &uuid::Uuid) -> FilmResult<Film> {
sqlx::query_as::<_, Film>(
r#"
SELECT id, title, director, year, poster, created_at, updated_at
FROM films
WHERE id = $1
"#,
)
.bind(film_id)
.fetch_one(&self.pool)
.await
.map_err(|e| e.to_string())
}
async fn create_film(&self, create_film: &CreateFilm) -> FilmResult<Film> {
sqlx::query_as::<_, Film>(
r#"
INSERT INTO films (title, director, year, poster)
VALUES ($1, $2, $3, $4)
RETURNING id, title, director, year, poster, created_at, updated_at
"#,
)
.bind(&create_film.title)
.bind(&create_film.director)
.bind(create_film.year as i16)
.bind(&create_film.poster)
.fetch_one(&self.pool)
.await
.map_err(|e| e.to_string())
}
async fn update_film(&self, film: &Film) -> FilmResult<Film> {
sqlx::query_as::<_, Film>(
r#"
UPDATE films
SET title = $2, director = $3, year = $4, poster = $5
WHERE id = $1
RETURNING id, title, director, year, poster, created_at, updated_at
"#,
)
.bind(film.id)
.bind(&film.title)
.bind(&film.director)
.bind(film.year as i16)
.bind(&film.poster)
.fetch_one(&self.pool)
.await
.map_err(|e| e.to_string())
}
async fn delete_film(&self, film_id: &uuid::Uuid) -> FilmResult<Uuid> {
sqlx::query_scalar::<_, Uuid>(
r#"
DELETE FROM films
WHERE id = $1
RETURNING id
"#,
)
.bind(film_id)
.fetch_one(&self.pool)
.await
.map_err(|e| e.to_string())
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/src/film_repository/mod.rs | api/lib/src/film_repository/mod.rs | mod memory_film_repository;
mod postgres_film_repository;
pub use memory_film_repository::MemoryFilmRepository;
pub use postgres_film_repository::PostgresFilmRepository;
use async_trait::async_trait;
use shared::models::{CreateFilm, Film};
use uuid::Uuid;
pub type FilmError = String;
pub type FilmResult<T> = Result<T, FilmError>;
#[cfg_attr(test, mockall::automock)]
#[async_trait]
pub trait FilmRepository: Send + Sync + 'static {
async fn get_films(&self) -> FilmResult<Vec<Film>>;
async fn get_film(&self, id: &Uuid) -> FilmResult<Film>;
async fn create_film(&self, id: &CreateFilm) -> FilmResult<Film>;
async fn update_film(&self, id: &Film) -> FilmResult<Film>;
async fn delete_film(&self, id: &Uuid) -> FilmResult<Uuid>;
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/tests/health.rs | api/lib/tests/health.rs | mod integration {
use actix_web::{http::StatusCode, App};
use api_lib::health::{service, API_VERSION};
#[actix_rt::test]
async fn health_check_works() {
let app = App::new().configure(service);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::get()
.uri("/health")
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let data = res
.headers()
.get("health-check")
.and_then(|h| h.to_str().ok());
assert_eq!(data, Some(API_VERSION));
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/lib/tests/v1.rs | api/lib/tests/v1.rs | mod integration {
use actix_web::{http::StatusCode, web, App};
use api_lib::film_repository::{FilmRepository, MemoryFilmRepository};
use shared::models::{CreateFilm, Film};
fn create_test_film(id: &'static str) -> Film {
Film {
id: uuid::Uuid::new_v4(),
title: format!("title-{}", id),
director: format!("director-{}", id),
poster: format!("poster-{}", id),
year: 2001,
created_at: Some(chrono::Utc::now()),
updated_at: None,
}
}
fn create_test_create_film(id: &'static str) -> CreateFilm {
CreateFilm {
title: format!("title-{}", id),
director: format!("director-{}", id),
poster: format!("poster-{}", id),
year: 2001,
}
}
#[actix_rt::test]
async fn get_films_works() {
let repo = MemoryFilmRepository::default();
let create_film1 = create_test_create_film("1");
let create_film2 = create_test_create_film("2");
let _ = repo.create_film(&create_film1).await;
let _ = repo.create_film(&create_film2).await;
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::get()
.uri("/v1/films")
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let films: Vec<Film> = actix_web::test::read_body_json(res).await;
assert_eq!(films.len(), 2);
}
#[actix_rt::test]
async fn get_film_works() {
let repo = MemoryFilmRepository::default();
let create_film = create_test_create_film("1");
let film = repo
.create_film(&create_film)
.await
.expect("create film failed");
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::get()
.uri(&format!("/v1/films/{}", film.id))
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let result: Film = actix_web::test::read_body_json(res).await;
assert_eq!(result.id, film.id);
assert_eq!(result.title, create_film.title);
assert_eq!(result.director, create_film.director);
assert_eq!(result.poster, create_film.poster);
assert_eq!(result.year, create_film.year);
assert!(result.created_at.is_some());
assert!(result.updated_at.is_none());
}
#[actix_rt::test]
async fn get_film_fails_if_file_is_not_present() {
let repo = MemoryFilmRepository::default();
let film = create_test_film("1");
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::get()
.uri(&format!("/v1/films/{}", film.id))
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_client_error());
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn create_film_works() {
let repo = MemoryFilmRepository::default();
let create_film = create_test_create_film("1");
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::post()
.uri("/v1/films")
.set_json(create_film.clone())
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let created_file: Film = actix_web::test::read_body_json(res).await;
assert_eq!(created_file.title, create_film.title);
assert_eq!(created_file.director, create_film.director);
assert_eq!(created_file.poster, create_film.poster);
assert_eq!(created_file.year, create_film.year);
assert!(created_file.created_at.is_some());
assert!(created_file.updated_at.is_none());
}
#[actix_rt::test]
async fn update_film_works() {
let repo = MemoryFilmRepository::default();
let create_film = create_test_create_film("1");
let created_file = repo.create_film(&create_film).await.unwrap();
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let mut film_update = created_file.clone();
film_update.title = "new-title".to_string();
film_update.year = 2002;
let req = actix_web::test::TestRequest::put()
.uri("/v1/films")
.set_json(&film_update)
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let updated_file: Film = actix_web::test::read_body_json(res).await;
assert_eq!(updated_file.id, created_file.id);
assert_ne!(updated_file.title, created_file.title);
assert_eq!(updated_file.title, film_update.title);
assert_eq!(updated_file.director, created_file.director);
assert_eq!(updated_file.poster, created_file.poster);
assert_ne!(updated_file.year, created_file.year);
assert_eq!(updated_file.year, film_update.year);
assert_eq!(updated_file.created_at, created_file.created_at);
assert!(updated_file.updated_at.is_some());
}
#[actix_rt::test]
async fn update_film_fails_if_file_is_not_present() {
let repo = MemoryFilmRepository::default();
let film = create_test_film("1");
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::put()
.uri("/v1/films")
.set_json(&film)
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_client_error());
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn delete_film_works() {
let repo = MemoryFilmRepository::default();
let create_film = create_test_create_film("1");
let film = repo
.create_film(&create_film)
.await
.expect("create film failed");
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::delete()
.uri(&format!("/v1/films/{}", film.id))
.set_json(create_film.clone())
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let deleted_id: uuid::Uuid = actix_web::test::read_body_json(res).await;
assert_eq!(deleted_id, film.id);
}
#[actix_rt::test]
async fn delete_film_does_not_fail_if_film_is_not_present() {
let repo = MemoryFilmRepository::default();
let film = create_test_film("1");
let repo = web::Data::new(repo);
let app = App::new()
.app_data(repo.clone())
.configure(api_lib::health::service)
.configure(api_lib::v1::service::<MemoryFilmRepository>);
let mut app = actix_web::test::init_service(app).await;
let req = actix_web::test::TestRequest::delete()
.uri(&format!("/v1/films/{}", film.id))
.set_json(film.clone())
.to_request();
let res = actix_web::test::call_service(&mut app, req).await;
assert!(res.status().is_success());
assert_eq!(res.status(), StatusCode::OK);
let deleted_id: uuid::Uuid = actix_web::test::read_body_json(res).await;
assert_eq!(deleted_id, film.id);
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/api/shuttle/src/main.rs | api/shuttle/src/main.rs | use actix_files::Files;
use actix_web::web::{self, ServiceConfig};
use shuttle_actix_web::ShuttleActixWeb;
use shuttle_runtime::CustomError;
use sqlx::Executor;
#[shuttle_runtime::main]
async fn actix_web(
#[shuttle_shared_db::Postgres] pool: sqlx::PgPool,
) -> ShuttleActixWeb<impl FnOnce(&mut ServiceConfig) + Send + Clone + 'static> {
// initialize the database if not already initialized
pool.execute(include_str!("../../db/schema.sql"))
.await
.map_err(CustomError::new)?;
// create a film repository. In this case for postgres.
let film_repository = api_lib::film_repository::PostgresFilmRepository::new(pool);
let film_repository = web::Data::new(film_repository);
// start the service
let config = move |cfg: &mut ServiceConfig| {
cfg.service(
web::scope("/api")
.app_data(film_repository)
.configure(api_lib::health::service)
.configure(
api_lib::v1::service::<api_lib::film_repository::PostgresFilmRepository>,
),
)
.service(Files::new("/", "static").index_file("index.html"));
};
Ok(config.into())
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/main.rs | front/src/main.rs | #![allow(non_snake_case)]
// import the prelude to get access to the `rsx!` macro and the `Scope` and `Element` types
mod components;
mod models;
use components::{FilmCard, FilmModal, Footer, Header};
use dioxus::prelude::*;
use models::FilmModalVisibility;
use shared::models::Film;
const API_ENDPOINT: &str = "api/v1";
fn films_endpoint() -> String {
let window = web_sys::window().expect("no global `window` exists");
let location = window.location();
let host = location.host().expect("should have a host");
let protocol = location.protocol().expect("should have a protocol");
let endpoint = format!("{}//{}/{}", protocol, host, API_ENDPOINT);
format!("{}/films", endpoint)
}
async fn get_films() -> Vec<Film> {
log::info!("Getting films {}", films_endpoint());
reqwest::get(&films_endpoint())
.await
.unwrap()
.json::<Vec<Film>>()
.await
.unwrap()
}
fn main() {
wasm_logger::init(wasm_logger::Config::default().module_prefix("front"));
// launch the web app
dioxus_web::launch(App);
}
// create a component that renders a div with the text "Hello, world!"
fn App(cx: Scope) -> Element {
use_shared_state_provider(cx, || FilmModalVisibility(false));
let is_modal_visible = use_shared_state::<FilmModalVisibility>(cx).unwrap();
let films = use_state::<Option<Vec<Film>>>(cx, || None);
let selected_film = use_state::<Option<Film>>(cx, || None);
let force_get_films = use_state(cx, || ());
{
let films = films.clone();
use_effect(cx, force_get_films, |_| async move {
let existing_films = get_films().await;
if existing_films.is_empty() {
films.set(None);
} else {
films.set(Some(existing_films));
}
});
}
let delete_film = move |filmId| {
let force_get_films = force_get_films.clone();
cx.spawn({
async move {
let response = reqwest::Client::new()
.delete(&format!("{}/{}", &films_endpoint(), filmId))
.send()
.await;
match response {
Ok(_data) => {
log::info!("Film deleted");
force_get_films.set(());
}
Err(err) => {
log::info!("Error deleting film: {:?}", err);
}
}
}
});
};
let create_or_update_film = move |film: Film| {
let force_get_films = force_get_films.clone();
let current_selected_film = selected_film.clone();
let is_modal_visible = is_modal_visible.clone();
cx.spawn({
async move {
let response = if current_selected_film.get().is_some() {
reqwest::Client::new()
.put(&films_endpoint())
.json(&film)
.send()
.await
} else {
reqwest::Client::new()
.post(&films_endpoint())
.json(&film)
.send()
.await
};
match response {
Ok(_data) => {
log::info!("Film created");
current_selected_film.set(None);
is_modal_visible.write().0 = false;
force_get_films.set(());
}
Err(err) => {
log::info!("Error creating film: {:?}", err);
}
}
}
});
};
cx.render(rsx! {
main {
class: "relative z-0 bg-blue-100 w-screen h-auto min-h-screen flex flex-col justify-start items-stretch",
Header {}
section {
class: "md:container md:mx-auto md:py-8 flex-1",
if let Some(films) = films.get() {
rsx!(
ul {
class: "flex flex-row justify-center items-stretch gap-4 flex-wrap",
{films.iter().map(|film| {
rsx!(
FilmCard {
key: "{film.id}",
film: film,
on_edit: move |_| {
selected_film.set(Some(film.clone()));
is_modal_visible.write().0 = true
},
on_delete: move |_| {
delete_film(film.id)
}
}
)
})}
}
)
}
}
Footer {}
}
FilmModal {
film: selected_film.get().clone(),
on_create_or_update: move |new_film| {
create_or_update_film(new_film);
},
on_cancel: move |_| {
selected_film.set(None);
is_modal_visible.write().0 = false;
}
}
})
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/components/button.rs | front/src/components/button.rs | use dioxus::prelude::*;
use crate::models::ButtonType;
#[component]
pub fn Button<'a>(
cx: Scope<'a>,
button_type: ButtonType,
onclick: EventHandler<'a, MouseEvent>,
children: Element<'a>,
) -> Element {
cx.render(rsx!(button {
class: "text-slate-200 inline-flex items-center border-0 py-1 px-3 focus:outline-none rounded mt-4 md:mt-0 {button_type.to_string()}",
onclick: move |event| onclick.call(event),
children
}))
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/components/footer.rs | front/src/components/footer.rs | use dioxus::prelude::*;
pub fn Footer(cx: Scope) -> Element {
cx.render(rsx!(
footer {
class: "bg-blue-200 w-full h-16 p-2 box-border gap-6 flex flex-row justify-center items-center text-teal-950",
a {
class: "w-auto h-full",
href: "https://www.devbcn.com/",
target: "_blank",
img {
class: "h-full w-auto",
alt: "DevBcn",
src: "devbcn.png",
"loading": "lazy"
}
}
svg {
fill: "none",
view_box: "0 0 24 24",
stroke_width: "1.5",
stroke: "currentColor",
class: "w-6 h-6",
path {
stroke_linecap: "round",
stroke_linejoin: "round",
d: "M6 18L18 6M6 6l12 12"
}
}
a {
class: "w-auto h-full",
href: "https://www.meetup.com/es-ES/bcnrust/",
target: "_blank",
img {
class: "h-full w-auto",
alt: "BcnRust",
src: "bcnrust.png",
"loading": "lazy"
}
}
}
))
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/components/film_modal.rs | front/src/components/film_modal.rs | use dioxus::prelude::*;
use shared::models::Film;
use uuid::Uuid;
use crate::components::Button;
use crate::models::{ButtonType, FilmModalVisibility};
#[derive(Props)]
pub struct FilmModalProps<'a> {
on_create_or_update: EventHandler<'a, Film>,
on_cancel: EventHandler<'a, MouseEvent>,
#[props(!optional)]
film: Option<Film>,
}
pub fn FilmModal<'a>(cx: Scope<'a, FilmModalProps>) -> Element<'a> {
let is_modal_visible = use_shared_state::<FilmModalVisibility>(cx).unwrap();
let draft_film = use_state::<Film>(cx, || Film {
title: "".to_string(),
poster: "".to_string(),
director: "".to_string(),
year: 1900,
id: Uuid::new_v4(),
created_at: None,
updated_at: None,
});
{
let draft_film = draft_film.clone();
use_effect(cx, &cx.props.film, |film| async move {
match film {
Some(film) => draft_film.set(film),
None => draft_film.set(Film {
title: "".to_string(),
poster: "".to_string(),
director: "".to_string(),
year: 1900,
id: Uuid::new_v4(),
created_at: None,
updated_at: None,
}),
}
});
}
if !is_modal_visible.read().0 {
return None;
}
cx.render(rsx!(
article {
class: "z-50 w-full h-full fixed top-0 right-0 bg-gray-800 bg-opacity-50 flex flex-col justify-center items-center",
section {
class: "w-1/3 h-auto bg-white rounded-lg flex flex-col justify-center items-center box-border p-6",
header {
class: "mb-4",
h2 {
class: "text-xl text-teal-950 font-semibold",
"🎬 Film"
}
}
form {
class: "w-full flex-1 flex flex-col justify-stretch items-start gap-y-2",
div {
class: "w-full",
label {
class: "text-sm font-semibold",
"Title"
}
input {
class: "w-full border border-gray-300 rounded-lg p-2",
"type": "text",
placeholder: "Enter film title",
value: "{draft_film.get().title}",
oninput: move |evt| {
draft_film.set(Film {
title: evt.value.clone(),
..draft_film.get().clone()
})
}
}
}
div {
class: "w-full",
label {
class: "text-sm font-semibold",
"Director"
}
input {
class: "w-full border border-gray-300 rounded-lg p-2",
"type": "text",
placeholder: "Enter film director",
value: "{draft_film.get().director}",
oninput: move |evt| {
draft_film.set(Film {
director: evt.value.clone(),
..draft_film.get().clone()
})
}
}
}
div {
class: "w-full",
label {
class: "text-sm font-semibold",
"Year"
}
input {
class: "w-full border border-gray-300 rounded-lg p-2",
"type": "number",
placeholder: "Enter film year",
value: "{draft_film.get().year.to_string()}",
oninput: move |evt| {
draft_film.set(Film {
year: evt.value.clone().parse::<u16>().unwrap_or(1900),
..draft_film.get().clone()
})
}
}
}
div {
class: "w-full",
label {
class: "text-sm font-semibold",
"Poster"
}
input {
class: "w-full border border-gray-300 rounded-lg p-2",
"type": "text",
placeholder: "Enter film poster URL",
value: "{draft_film.get().poster}",
oninput: move |evt| {
draft_film.set(Film {
poster: evt.value.clone(),
..draft_film.get().clone()
})
}
}
}
}
footer {
class: "flex flex-row justify-center items-center mt-4 gap-x-2",
Button {
button_type: ButtonType::Secondary,
onclick: move |evt| {
draft_film.set(Film {
title: "".to_string(),
poster: "".to_string(),
director: "".to_string(),
year: 1900,
id: Uuid::new_v4(),
created_at: None,
updated_at: None,
});
cx.props.on_cancel.call(evt)
},
"Cancel"
}
Button {
button_type: ButtonType::Primary,
onclick: move |_| {
cx.props.on_create_or_update.call(draft_film.get().clone());
draft_film.set(Film {
title: "".to_string(),
poster: "".to_string(),
director: "".to_string(),
year: 1900,
id: Uuid::new_v4(),
created_at: None,
updated_at: None,
})
},
"Save film"
}
}
}
}
))
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/components/header.rs | front/src/components/header.rs | use dioxus::prelude::*;
use crate::components::Button;
use crate::models::{ButtonType, FilmModalVisibility};
pub fn Header(cx: Scope) -> Element {
let is_modal_visible = use_shared_state::<FilmModalVisibility>(cx).unwrap();
cx.render(rsx!(
header {
class: "sticky top-0 z-10 text-gray-400 bg-blue-300 body-font shadow-md",
div { class: "container mx-auto flex flex-wrap p-0 flex-col md:flex-row justify-between items-center",
a {
class: "flex title-font font-medium items-center text-teal-950 mb-4 md:mb-0",
img {
class: "bg-transparent p-2 animate-jump",
alt: "ferris",
src: "ferris.png",
"loading": "lazy"
}
span { class: "ml-3 text-2xl", "Rusty films"}
}
Button {
button_type: ButtonType::Primary,
onclick: move |_| {
is_modal_visible.write().0 = true;
},
"Add new film"
}
}
}
))
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/components/mod.rs | front/src/components/mod.rs | mod button;
mod film_card;
mod film_modal;
mod footer;
mod header;
pub use button::Button;
pub use film_card::FilmCard;
pub use film_modal::FilmModal;
pub use footer::Footer;
pub use header::Header;
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/components/film_card.rs | front/src/components/film_card.rs | use crate::{components::Button, models::ButtonType};
use dioxus::prelude::*;
use shared::models::Film;
#[component]
pub fn FilmCard<'a>(
cx: Scope<'a>,
film: &'a Film,
on_edit: EventHandler<'a, MouseEvent>,
on_delete: EventHandler<'a, MouseEvent>,
) -> Element {
cx.render(rsx!(
li {
class: "film-card md:basis-1/4 p-4 rounded box-border bg-neutral-100 drop-shadow-md transition-all ease-in-out hover:drop-shadow-xl flex-col flex justify-start items-stretch animate-fade animate-duration-500 animate-ease-in-out animate-normal animate-fill-both",
header {
img {
class: "max-h-80 w-auto mx-auto rounded",
src: "{film.poster}"
},
}
section {
class: "flex-1",
h3 {
class: "text-lg font-bold my-3",
"{film.title}"
}
p {
"{film.director}"
}
p {
class: "text-sm text-gray-500",
"{film.year.to_string()}"
}
}
footer {
class: "flex justify-end space-x-2 mt-auto",
Button {
button_type: ButtonType::Secondary,
onclick: move |event| on_delete.call(event),
svg {
fill: "none",
stroke: "currentColor",
stroke_width: "1.5",
view_box: "0 0 24 24",
class: "w-5 h-5",
path {
stroke_linecap: "round",
stroke_linejoin: "round",
d: "M14.74 9l-.346 9m-4.788 0L9.26 9m9.968-3.21c.342.052.682.107 1.022.166m-1.022-.165L18.16 19.673a2.25 2.25 0 01-2.244 2.077H8.084a2.25 2.25 0 01-2.244-2.077L4.772 5.79m14.456 0a48.108 48.108 0 00-3.478-.397m-12 .562c.34-.059.68-.114 1.022-.165m0 0a48.11 48.11 0 013.478-.397m7.5 0v-.916c0-1.18-.91-2.164-2.09-2.201a51.964 51.964 0 00-3.32 0c-1.18.037-2.09 1.022-2.09 2.201v.916m7.5 0a48.667 48.667 0 00-7.5 0"
}
}
}
Button {
button_type: ButtonType::Primary,
onclick: move |event| on_edit.call(event),
svg {
fill: "none",
stroke: "currentColor",
stroke_width: "1.5",
view_box: "0 0 24 24",
class: "w-5 h-5",
path {
stroke_linecap: "round",
stroke_linejoin: "round",
d: "M16.862 4.487l1.687-1.688a1.875 1.875 0 112.652 2.652L6.832 19.82a4.5 4.5 0 01-1.897 1.13l-2.685.8.8-2.685a4.5 4.5 0 011.13-1.897L16.863 4.487zm0 0L19.5 7.125"
}
}
}
}
}
))
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/models/button.rs | front/src/models/button.rs | use std::fmt;
pub enum ButtonType {
Primary,
Secondary,
}
impl fmt::Display for ButtonType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ButtonType::Primary => write!(f, "bg-blue-700 hover:bg-blue-800 active:bg-blue-900"),
ButtonType::Secondary => write!(f, "bg-rose-700 hover:bg-rose-800 active:bg-rose-900"),
}
}
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/models/film.rs | front/src/models/film.rs | pub struct FilmModalVisibility(pub bool);
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/front/src/models/mod.rs | front/src/models/mod.rs | mod button;
mod film;
pub use button::ButtonType;
pub use film::FilmModalVisibility;
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/shared/src/lib.rs | shared/src/lib.rs | pub mod models;
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
BcnRust/devbcn-workshop | https://github.com/BcnRust/devbcn-workshop/blob/0f45130131914a5458066a03ca19923807913ca1/shared/src/models.rs | shared/src/models.rs | use serde::{Deserialize, Serialize};
#[cfg_attr(feature = "backend", derive(sqlx::FromRow))]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Film {
pub id: uuid::Uuid,
pub title: String,
pub director: String,
#[cfg_attr(feature = "backend", sqlx(try_from = "i16"))]
pub year: u16,
pub poster: String,
pub created_at: Option<chrono::DateTime<chrono::Utc>>,
pub updated_at: Option<chrono::DateTime<chrono::Utc>>,
}
#[cfg_attr(feature = "backend", derive(sqlx::FromRow))]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct CreateFilm {
pub title: String,
pub director: String,
#[cfg_attr(feature = "backend", sqlx(try_from = "i16"))]
pub year: u16,
pub poster: String,
}
| rust | MIT | 0f45130131914a5458066a03ca19923807913ca1 | 2026-01-04T20:24:05.254453Z | false |
rust-embedded-community/usbd-serial | https://github.com/rust-embedded-community/usbd-serial/blob/822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892/src/lib.rs | src/lib.rs | //! CDC-ACM USB serial port implementation for [usb-device](https://crates.io/crates/usb-device).
//!
//! CDC-ACM is a USB class that's supported out of the box by most operating systems and used for
//! implementing modems and generic serial ports. The [`SerialPort`] class
//! implements a stream-like buffered serial port that can be used similarly to a normal UART.
//!
//! The crate also contains [`CdcAcmClass`] which is a lower-level implementation that
//! has less overhead, but requires more care to use correctly.
//!
//! Example
//! =======
//!
//! A full example requires the use of a hardware-driver, but the hardware independent part is as
//! follows:
//!
//! ```no_run
//! # use usb_device::class_prelude::*;
//! # fn dummy(usb_bus: UsbBusAllocator<impl UsbBus>) {
//! use usb_device::prelude::*;
//! use usbd_serial::{SerialPort, USB_CLASS_CDC};
//!
//! let mut serial = SerialPort::new(&usb_bus);
//!
//! let mut usb_dev = UsbDeviceBuilder::new(&usb_bus, UsbVidPid(0x16c0, 0x27dd))
//! .strings(&[StringDescriptors::new(LangID::EN).product("Serial port")])
//! .expect("Failed to set strings")
//! .device_class(USB_CLASS_CDC)
//! .build();
//!
//! loop {
//! if !usb_dev.poll(&mut [&mut serial]) {
//! continue;
//! }
//!
//! let mut buf = [0u8; 64];
//!
//! match serial.read(&mut buf[..]) {
//! Ok(count) => {
//! // count bytes were read to &buf[..count]
//! },
//! Err(UsbError::WouldBlock) => { /* No data received */ },
//! Err(err) => { /* An error occurred */ },
//! };
//!
//! match serial.write(&[0x3a, 0x29]) {
//! Ok(count) => {
//! // count bytes were written
//! },
//! Err(UsbError::WouldBlock) => { /* No data could be written (buffers full) */ },
//! Err(err) => { /* An error occurred */ },
//! };
//! }
//! # }
//! ```
#![no_std]
mod buffer;
mod cdc_acm;
mod io;
mod serial_port;
pub use crate::buffer::DefaultBufferStore;
pub use crate::cdc_acm::*;
pub use crate::serial_port::*;
pub use embedded_io;
pub use usb_device::{Result, UsbError};
| rust | MIT | 822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892 | 2026-01-04T20:24:12.282850Z | false |
rust-embedded-community/usbd-serial | https://github.com/rust-embedded-community/usbd-serial/blob/822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892/src/io.rs | src/io.rs | use super::SerialPort;
use core::borrow::BorrowMut;
use usb_device::bus::UsbBus;
#[derive(Debug)]
pub struct Error(usb_device::UsbError);
impl From<usb_device::UsbError> for Error {
fn from(e: usb_device::UsbError) -> Self {
Self(e)
}
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:?}", self)
}
}
impl core::error::Error for Error {}
impl embedded_io::Error for Error {
fn kind(&self) -> embedded_io::ErrorKind {
match self.0 {
usb_device::UsbError::Unsupported => embedded_io::ErrorKind::Unsupported,
usb_device::UsbError::BufferOverflow
| usb_device::UsbError::EndpointOverflow
| usb_device::UsbError::EndpointMemoryOverflow => embedded_io::ErrorKind::OutOfMemory,
_ => embedded_io::ErrorKind::Other,
}
}
}
impl<Bus: UsbBus, RS: BorrowMut<[u8]>, WS: BorrowMut<[u8]>> embedded_io::ErrorType
for SerialPort<'_, Bus, RS, WS>
{
type Error = Error;
}
impl<Bus: UsbBus, RS: BorrowMut<[u8]>, WS: BorrowMut<[u8]>> embedded_io::Read
for SerialPort<'_, Bus, RS, WS>
{
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
loop {
match self.read(buf).map_err(From::from) {
// We are required by `embedded-io` to continue reading until at least one byte is
// read.
Ok(0) => {}
Err(usb_device::UsbError::WouldBlock) => {}
other => return Ok(other?),
}
}
}
}
impl<Bus: UsbBus, RS: BorrowMut<[u8]>, WS: BorrowMut<[u8]>> embedded_io::ReadReady
for SerialPort<'_, Bus, RS, WS>
{
fn read_ready(&mut self) -> Result<bool, Self::Error> {
self.poll()?;
Ok(self.read_buf.available_read() != 0)
}
}
impl<Bus: UsbBus, RS: BorrowMut<[u8]>, WS: BorrowMut<[u8]>> embedded_io::Write
for SerialPort<'_, Bus, RS, WS>
{
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
loop {
match self.write(buf) {
// We are required by `embedded-io` to continue writing until at least one byte is
// written.
Ok(0) => {}
Err(usb_device::UsbError::WouldBlock) => {}
other => return Ok(other?),
}
}
}
fn flush(&mut self) -> Result<(), Self::Error> {
self.flush().map_err(From::from)
}
}
impl<Bus: UsbBus, RS: BorrowMut<[u8]>, WS: BorrowMut<[u8]>> embedded_io::WriteReady
for SerialPort<'_, Bus, RS, WS>
{
fn write_ready(&mut self) -> Result<bool, Self::Error> {
Ok(self.write_buf.available_write() != 0)
}
}
| rust | MIT | 822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892 | 2026-01-04T20:24:12.282850Z | false |
rust-embedded-community/usbd-serial | https://github.com/rust-embedded-community/usbd-serial/blob/822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892/src/cdc_acm.rs | src/cdc_acm.rs | use core::convert::TryInto;
use core::mem;
use usb_device::Result;
use usb_device::class_prelude::*;
use usb_device::descriptor::lang_id::LangID;
use usb_device::device::DEFAULT_ALTERNATE_SETTING;
/// This should be used as `device_class` when building the `UsbDevice`.
pub const USB_CLASS_CDC: u8 = 0x02;
const USB_CLASS_CDC_DATA: u8 = 0x0a;
const CDC_SUBCLASS_ACM: u8 = 0x02;
const CDC_PROTOCOL_NONE: u8 = 0x00;
const CS_INTERFACE: u8 = 0x24;
const CDC_TYPE_HEADER: u8 = 0x00;
const CDC_TYPE_CALL_MANAGEMENT: u8 = 0x01;
const CDC_TYPE_ACM: u8 = 0x02;
const CDC_TYPE_UNION: u8 = 0x06;
const REQ_SEND_ENCAPSULATED_COMMAND: u8 = 0x00;
#[allow(unused)]
const REQ_GET_ENCAPSULATED_COMMAND: u8 = 0x01;
const REQ_SET_LINE_CODING: u8 = 0x20;
const REQ_GET_LINE_CODING: u8 = 0x21;
const REQ_SET_CONTROL_LINE_STATE: u8 = 0x22;
/// Packet level implementation of a CDC-ACM serial port.
///
/// This class can be used directly and it has the least overhead due to directly reading and
/// writing USB packets with no intermediate buffers, but it will not act like a stream-like serial
/// port. The following constraints must be followed if you use this class directly:
///
/// - `read_packet` must be called with a buffer large enough to hold max_packet_size bytes, and the
/// method will return a `WouldBlock` error if there is no packet to be read.
/// - `write_packet` must not be called with a buffer larger than max_packet_size bytes, and the
/// method will return a `WouldBlock` error if the previous packet has not been sent yet.
/// - If you write a packet that is exactly max_packet_size bytes long, it won't be processed by the
/// host operating system until a subsequent shorter packet is sent. A zero-length packet (ZLP)
/// can be sent if there is no other data to send. This is because USB bulk transactions must be
/// terminated with a short packet, even if the bulk endpoint is used for stream-like data.
pub struct CdcAcmClass<'a, B: UsbBus> {
comm_if: InterfaceNumber,
comm_if_name: Option<(StringIndex, &'static str)>,
comm_ep: EndpointIn<'a, B>,
data_if: InterfaceNumber,
data_if_name: Option<(StringIndex, &'static str)>,
read_ep: EndpointOut<'a, B>,
write_ep: EndpointIn<'a, B>,
line_coding: LineCoding,
dtr: bool,
rts: bool,
}
impl<'a, B: UsbBus> CdcAcmClass<'a, B> {
/// Creates a new CdcAcmClass with the provided UsbBus and max_packet_size in bytes. For
/// full-speed devices, max_packet_size has to be one of 8, 16, 32 or 64.
pub fn new<'alloc: 'a>(
alloc: &'alloc UsbBusAllocator<B>,
max_packet_size: u16,
) -> CdcAcmClass<'a, B> {
Self::new_with_interface_names(alloc, max_packet_size, None, None)
}
/// Creates a new CdcAcmClass with the provided UsbBus and max_packet_size in bytes. For
/// full-speed devices, max_packet_size has to be one of 8, 16, 32 or 64. Additionally,
/// this lets you specify optional names for the CDC interfaces, to better organize composite devices.
pub fn new_with_interface_names<'alloc: 'a>(
alloc: &'alloc UsbBusAllocator<B>,
max_packet_size: u16,
comm_if_name: Option<&'static str>,
data_if_name: Option<&'static str>,
) -> CdcAcmClass<'a, B> {
let comm_if_name = comm_if_name.map(|s| (alloc.string(), s));
let data_if_name = data_if_name.map(|s| (alloc.string(), s));
CdcAcmClass {
comm_if: alloc.interface(),
comm_if_name,
comm_ep: alloc.interrupt(8, 255),
data_if: alloc.interface(),
data_if_name,
read_ep: alloc.bulk(max_packet_size),
write_ep: alloc.bulk(max_packet_size),
line_coding: LineCoding {
stop_bits: StopBits::One,
data_bits: 8,
parity_type: ParityType::None,
data_rate: 9_600,
},
dtr: false,
rts: false,
}
}
/// Gets the maximum packet size in bytes.
pub fn max_packet_size(&self) -> u16 {
// The size is the same for both endpoints.
self.read_ep.max_packet_size()
}
/// Gets the current line coding. The line coding contains information that's mainly relevant
/// for USB to UART serial port emulators, and can be ignored if not relevant.
pub fn line_coding(&self) -> &LineCoding {
&self.line_coding
}
/// Gets the DTR (data terminal ready) state
pub fn dtr(&self) -> bool {
self.dtr
}
/// Gets the RTS (request to send) state
pub fn rts(&self) -> bool {
self.rts
}
/// Writes a single packet into the IN endpoint.
pub fn write_packet(&mut self, data: &[u8]) -> Result<usize> {
self.write_ep.write(data)
}
/// Reads a single packet from the OUT endpoint.
pub fn read_packet(&mut self, data: &mut [u8]) -> Result<usize> {
self.read_ep.read(data)
}
/// Gets the IN endpoint.
pub fn write_ep(&self) -> &EndpointIn<'a, B> {
&self.write_ep
}
/// Mutably gets the IN endpoint.
pub fn write_ep_mut(&mut self) -> &mut EndpointIn<'a, B> {
&mut self.write_ep
}
/// Gets the OUT endpoint.
pub fn read_ep(&self) -> &EndpointOut<'a, B> {
&self.read_ep
}
/// Mutably gets the OUT endpoint.
pub fn read_ep_mut(&mut self) -> &mut EndpointOut<'a, B> {
&mut self.read_ep
}
}
impl<B: UsbBus> UsbClass<B> for CdcAcmClass<'_, B> {
fn get_configuration_descriptors(&self, writer: &mut DescriptorWriter) -> Result<()> {
writer.iad(
self.comm_if,
2,
USB_CLASS_CDC,
CDC_SUBCLASS_ACM,
CDC_PROTOCOL_NONE,
None,
)?;
writer.interface_alt(
self.comm_if,
DEFAULT_ALTERNATE_SETTING,
USB_CLASS_CDC,
CDC_SUBCLASS_ACM,
CDC_PROTOCOL_NONE,
self.comm_if_name.map(|n| n.0),
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_HEADER, // bDescriptorSubtype
0x10,
0x01, // bcdCDC (1.10)
],
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_ACM, // bDescriptorSubtype
0x00, // bmCapabilities
],
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_UNION, // bDescriptorSubtype
self.comm_if.into(), // bControlInterface
self.data_if.into(), // bSubordinateInterface
],
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_CALL_MANAGEMENT, // bDescriptorSubtype
0x00, // bmCapabilities
self.data_if.into(), // bDataInterface
],
)?;
writer.endpoint(&self.comm_ep)?;
writer.interface_alt(
self.data_if,
DEFAULT_ALTERNATE_SETTING,
USB_CLASS_CDC_DATA,
0x00,
0x00,
self.data_if_name.map(|n| n.0),
)?;
writer.endpoint(&self.write_ep)?;
writer.endpoint(&self.read_ep)?;
Ok(())
}
fn get_string(&self, index: StringIndex, _lang_id: LangID) -> Option<&str> {
match (self.comm_if_name, self.data_if_name) {
(Some((i, s)), _) if i == index => Some(s),
(_, Some((i, s))) if i == index => Some(s),
_ => None,
}
}
fn reset(&mut self) {
self.line_coding = LineCoding::default();
self.dtr = false;
self.rts = false;
}
fn control_in(&mut self, xfer: ControlIn<B>) {
let req = xfer.request();
if !(req.request_type == control::RequestType::Class
&& req.recipient == control::Recipient::Interface
&& req.index == u8::from(self.comm_if) as u16)
{
return;
}
match req.request {
// REQ_GET_ENCAPSULATED_COMMAND is not really supported - it will be rejected below.
REQ_GET_LINE_CODING if req.length == 7 => {
xfer.accept(|data| {
data[0..4].copy_from_slice(&self.line_coding.data_rate.to_le_bytes());
data[4] = self.line_coding.stop_bits as u8;
data[5] = self.line_coding.parity_type as u8;
data[6] = self.line_coding.data_bits;
Ok(7)
})
.ok();
}
_ => {
xfer.reject().ok();
}
}
}
fn control_out(&mut self, xfer: ControlOut<B>) {
let req = xfer.request();
if !(req.request_type == control::RequestType::Class
&& req.recipient == control::Recipient::Interface
&& req.index == u8::from(self.comm_if) as u16)
{
return;
}
match req.request {
REQ_SEND_ENCAPSULATED_COMMAND => {
// We don't actually support encapsulated commands but pretend we do for standards
// compatibility.
xfer.accept().ok();
}
REQ_SET_LINE_CODING if xfer.data().len() >= 7 => {
self.line_coding.data_rate =
u32::from_le_bytes(xfer.data()[0..4].try_into().unwrap());
self.line_coding.stop_bits = xfer.data()[4].into();
self.line_coding.parity_type = xfer.data()[5].into();
self.line_coding.data_bits = xfer.data()[6];
xfer.accept().ok();
}
REQ_SET_CONTROL_LINE_STATE => {
self.dtr = (req.value & 0x0001) != 0;
self.rts = (req.value & 0x0002) != 0;
xfer.accept().ok();
}
_ => {
xfer.reject().ok();
}
};
}
}
/// Number of stop bits for LineCoding
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum StopBits {
/// 1 stop bit
One = 0,
/// 1.5 stop bits
OnePointFive = 1,
/// 2 stop bits
Two = 2,
}
impl From<u8> for StopBits {
fn from(value: u8) -> Self {
if value <= 2 {
unsafe { mem::transmute(value) }
} else {
StopBits::One
}
}
}
/// Parity for LineCoding
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum ParityType {
None = 0,
Odd = 1,
Even = 2,
Mark = 3,
Space = 4,
}
impl From<u8> for ParityType {
fn from(value: u8) -> Self {
if value <= 4 {
unsafe { mem::transmute(value) }
} else {
ParityType::None
}
}
}
/// Line coding parameters
///
/// This is provided by the host for specifying the standard UART parameters such as baud rate. Can
/// be ignored if you don't plan to interface with a physical UART.
pub struct LineCoding {
stop_bits: StopBits,
data_bits: u8,
parity_type: ParityType,
data_rate: u32,
}
impl LineCoding {
/// Gets the number of stop bits for UART communication.
pub fn stop_bits(&self) -> StopBits {
self.stop_bits
}
/// Gets the number of data bits for UART communication.
pub fn data_bits(&self) -> u8 {
self.data_bits
}
/// Gets the parity type for UART communication.
pub fn parity_type(&self) -> ParityType {
self.parity_type
}
/// Gets the data rate in bits per second for UART communication.
pub fn data_rate(&self) -> u32 {
self.data_rate
}
}
impl Default for LineCoding {
fn default() -> Self {
LineCoding {
stop_bits: StopBits::One,
data_bits: 8,
parity_type: ParityType::None,
data_rate: 9_600,
}
}
}
| rust | MIT | 822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892 | 2026-01-04T20:24:12.282850Z | false |
rust-embedded-community/usbd-serial | https://github.com/rust-embedded-community/usbd-serial/blob/822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892/src/serial_port.rs | src/serial_port.rs | use crate::buffer::{Buffer, DefaultBufferStore};
use crate::cdc_acm::*;
use core::borrow::BorrowMut;
use core::slice;
use usb_device::Result;
use usb_device::class_prelude::*;
use usb_device::descriptor::lang_id::LangID;
/// USB (CDC-ACM) serial port with built-in buffering to implement stream-like behavior.
///
/// The RS and WS type arguments specify the storage for the read/write buffers, respectively. By
/// default an internal 128 byte buffer is used for both directions.
pub struct SerialPort<'a, B, RS = DefaultBufferStore, WS = DefaultBufferStore>
where
B: UsbBus,
RS: BorrowMut<[u8]>,
WS: BorrowMut<[u8]>,
{
inner: CdcAcmClass<'a, B>,
pub(crate) read_buf: Buffer<RS>,
pub(crate) write_buf: Buffer<WS>,
write_state: WriteState,
}
/// If this many full size packets have been sent in a row, a short packet will be sent so that the
/// host sees the data in a timely manner.
const SHORT_PACKET_INTERVAL: usize = 10;
/// Keeps track of the type of the last written packet.
enum WriteState {
/// No packets in-flight
Idle,
/// Short packet currently in-flight
Short,
/// Full packet current in-flight. A full packet must be followed by a short packet for the host
/// OS to see the transaction. The data is the number of subsequent full packets sent so far. A
/// short packet is forced every SHORT_PACKET_INTERVAL packets so that the OS sees data in a
/// timely manner.
Full(usize),
}
impl<'a, B> SerialPort<'a, B>
where
B: UsbBus,
{
/// Creates a new USB serial port with the provided UsbBus and 128 byte read/write buffers.
pub fn new<'alloc: 'a>(
alloc: &'alloc UsbBusAllocator<B>,
) -> SerialPort<'a, B, DefaultBufferStore, DefaultBufferStore> {
Self::new_with_interface_names(alloc, None, None)
}
/// Same as SerialPort::new, but allows specifying the names of the interfaces
pub fn new_with_interface_names<'alloc: 'a>(
alloc: &'alloc UsbBusAllocator<B>,
comm_if_name: Option<&'static str>,
data_if_name: Option<&'static str>,
) -> SerialPort<'a, B, DefaultBufferStore, DefaultBufferStore> {
SerialPort::new_with_store_and_interface_names(
alloc,
DefaultBufferStore::default(),
DefaultBufferStore::default(),
comm_if_name,
data_if_name,
)
}
}
impl<'a, B, RS, WS> SerialPort<'a, B, RS, WS>
where
B: UsbBus,
RS: BorrowMut<[u8]>,
WS: BorrowMut<[u8]>,
{
/// Creates a new USB serial port with the provided UsbBus and buffer backing stores.
pub fn new_with_store<'alloc: 'a>(
alloc: &'alloc UsbBusAllocator<B>,
read_store: RS,
write_store: WS,
) -> SerialPort<'a, B, RS, WS> {
Self::new_with_store_and_interface_names(alloc, read_store, write_store, None, None)
}
/// Creates a new USB serial port with the provided UsbBus and buffer backing stores.
pub fn new_with_store_and_interface_names<'alloc: 'a>(
alloc: &'alloc UsbBusAllocator<B>,
read_store: RS,
write_store: WS,
comm_if_name: Option<&'static str>,
data_if_name: Option<&'static str>,
) -> SerialPort<'a, B, RS, WS> {
SerialPort {
inner: CdcAcmClass::new_with_interface_names(alloc, 64, comm_if_name, data_if_name),
read_buf: Buffer::new(read_store),
write_buf: Buffer::new(write_store),
write_state: WriteState::Idle,
}
}
/// Gets the current line coding.
pub fn line_coding(&self) -> &LineCoding {
self.inner.line_coding()
}
/// Gets the DTR (data terminal ready) state
pub fn dtr(&self) -> bool {
self.inner.dtr()
}
/// Gets the RTS (request to send) state
pub fn rts(&self) -> bool {
self.inner.rts()
}
/// Writes bytes from `data` into the port and returns the number of bytes written.
///
/// # Errors
///
/// * [`WouldBlock`](usb_device::UsbError::WouldBlock) - No bytes could be written because the
/// buffers are full.
///
/// Other errors from `usb-device` may also be propagated.
pub fn write(&mut self, data: &[u8]) -> Result<usize> {
let count = self.write_buf.write(data);
match self.flush() {
Ok(_) | Err(UsbError::WouldBlock) => {}
Err(err) => {
return Err(err);
}
};
if count == 0 {
Err(UsbError::WouldBlock)
} else {
Ok(count)
}
}
/// Poll the endpoint and try to put them into the serial buffer.
pub(crate) fn poll(&mut self) -> Result<()> {
let Self {
inner, read_buf, ..
} = self;
read_buf.write_all(inner.max_packet_size() as usize, |buf_data| {
match inner.read_packet(buf_data) {
Ok(c) => Ok(c),
Err(UsbError::WouldBlock) => Ok(0),
Err(err) => Err(err),
}
})?;
Ok(())
}
/// Reads bytes from the port into `data` and returns the number of bytes read.
///
/// # Errors
///
/// * [`WouldBlock`](usb_device::UsbError::WouldBlock) - No bytes available for reading.
///
/// Other errors from `usb-device` may also be propagated.
pub fn read(&mut self, data: &mut [u8]) -> Result<usize> {
// Try to read a packet from the endpoint and write it into the buffer if it fits. Propagate
// errors except `WouldBlock`.
self.poll()?;
if self.read_buf.available_read() == 0 {
// No data available for reading.
return Err(UsbError::WouldBlock);
}
self.read_buf.read(data.len(), |buf_data| {
data[..buf_data.len()].copy_from_slice(buf_data);
Ok(buf_data.len())
})
}
/// Sends as much as possible of the current write buffer. Returns `Ok` if all data that has
/// been written has been completely written to hardware buffers `Err(WouldBlock)` if there is
/// still data remaining, and other errors if there's an error sending data to the host. Note
/// that even if this method returns `Ok`, data may still be in hardware buffers on either side.
pub fn flush(&mut self) -> Result<()> {
let buf = &mut self.write_buf;
let inner = &mut self.inner;
let write_state = &mut self.write_state;
let full_count = match *write_state {
WriteState::Full(c) => c,
_ => 0,
};
if buf.available_read() > 0 {
// There's data in the write_buf, so try to write that first.
let max_write_size = if full_count >= SHORT_PACKET_INTERVAL {
inner.max_packet_size() - 1
} else {
inner.max_packet_size()
} as usize;
buf.read(max_write_size, |buf_data| {
// This may return WouldBlock which will be propagated.
inner.write_packet(buf_data)?;
*write_state = if buf_data.len() == inner.max_packet_size() as usize {
WriteState::Full(full_count + 1)
} else {
WriteState::Short
};
Ok(buf_data.len())
})?;
Err(UsbError::WouldBlock)
} else if full_count != 0 {
// Write a ZLP to complete the transaction if there's nothing else to write and the last
// packet was a full one. This may return WouldBlock which will be propagated.
inner.write_packet(&[])?;
*write_state = WriteState::Short;
Err(UsbError::WouldBlock)
} else {
// No data left in writer_buf.
*write_state = WriteState::Idle;
Ok(())
}
}
}
impl<B, RS, WS> UsbClass<B> for SerialPort<'_, B, RS, WS>
where
B: UsbBus,
RS: BorrowMut<[u8]>,
WS: BorrowMut<[u8]>,
{
fn get_configuration_descriptors(&self, writer: &mut DescriptorWriter) -> Result<()> {
self.inner.get_configuration_descriptors(writer)
}
fn get_string(&self, index: StringIndex, lang_id: LangID) -> Option<&str> {
self.inner.get_string(index, lang_id)
}
fn reset(&mut self) {
self.inner.reset();
self.read_buf.clear();
self.write_buf.clear();
self.write_state = WriteState::Idle;
}
fn endpoint_in_complete(&mut self, addr: EndpointAddress) {
if addr == self.inner.write_ep().address() {
self.flush().ok();
}
}
fn control_in(&mut self, xfer: ControlIn<B>) {
self.inner.control_in(xfer);
}
fn control_out(&mut self, xfer: ControlOut<B>) {
self.inner.control_out(xfer);
}
}
impl<B, RS, WS> embedded_hal::serial::Write<u8> for SerialPort<'_, B, RS, WS>
where
B: UsbBus,
RS: BorrowMut<[u8]>,
WS: BorrowMut<[u8]>,
{
type Error = UsbError;
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
match <SerialPort<'_, B, RS, WS>>::write(self, slice::from_ref(&word)) {
Ok(0) | Err(UsbError::WouldBlock) => Err(nb::Error::WouldBlock),
Ok(_) => Ok(()),
Err(err) => Err(nb::Error::Other(err)),
}
}
fn flush(&mut self) -> nb::Result<(), Self::Error> {
match <SerialPort<'_, B, RS, WS>>::flush(self) {
Err(UsbError::WouldBlock) => Err(nb::Error::WouldBlock),
Ok(_) => Ok(()),
Err(err) => Err(nb::Error::Other(err)),
}
}
}
impl<B, RS, WS> embedded_hal::serial::Read<u8> for SerialPort<'_, B, RS, WS>
where
B: UsbBus,
RS: BorrowMut<[u8]>,
WS: BorrowMut<[u8]>,
{
type Error = UsbError;
fn read(&mut self) -> nb::Result<u8, Self::Error> {
let mut buf: u8 = 0;
match <SerialPort<'_, B, RS, WS>>::read(self, slice::from_mut(&mut buf)) {
Ok(0) | Err(UsbError::WouldBlock) => Err(nb::Error::WouldBlock),
Ok(_) => Ok(buf),
Err(err) => Err(nb::Error::Other(err)),
}
}
}
| rust | MIT | 822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892 | 2026-01-04T20:24:12.282850Z | false |
rust-embedded-community/usbd-serial | https://github.com/rust-embedded-community/usbd-serial/blob/822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892/src/buffer.rs | src/buffer.rs | use core::borrow::{Borrow, BorrowMut};
use core::{cmp, ptr};
/// A mediocre buffer that allows for block access without extra copies but memmoves more than
/// necessary.
///
/// wpos points to the first byte that can be written rpos points at the next byte that can be read
///
/// invariants: 0 <= rpos <= wpos <= data.len()
pub struct Buffer<S: BorrowMut<[u8]>> {
store: S,
rpos: usize,
wpos: usize,
}
impl<S: BorrowMut<[u8]>> Buffer<S> {
pub fn new(store: S) -> Self {
Self {
store,
rpos: 0,
wpos: 0,
}
}
// Clears the buffer
pub fn clear(&mut self) {
self.rpos = 0;
self.wpos = 0;
}
// Amount of bytes available for reading
pub fn available_read(&self) -> usize {
self.wpos - self.rpos
}
// Amount of space in bytes available for writing
pub fn available_write(&self) -> usize {
self.available_write_without_discard() + self.rpos
}
fn available_write_without_discard(&self) -> usize {
self.store.borrow().len() - self.wpos
}
// Writes as much as possible of data to the buffer and returns the number of bytes written
pub fn write(&mut self, data: &[u8]) -> usize {
if data.len() > self.available_write_without_discard() && self.rpos > 0 {
// data doesn't fit in already available space, and there is data to discard
self.discard_already_read_data();
}
let count = cmp::min(self.available_write_without_discard(), data.len());
if count == 0 {
// Buffer is full (or data is empty)
return 0;
}
self.store.borrow_mut()[self.wpos..self.wpos + count].copy_from_slice(&data[..count]);
self.wpos += count;
count
}
// Reserves max_count bytes of space for writing, and passes a slice pointing to them to a
// closure for writing. The closure should return the number of bytes actually written and is
// allowed to write less than max_bytes. If the callback returns an error, any written data is
// ignored.
pub fn write_all<E>(
&mut self,
max_count: usize,
f: impl FnOnce(&mut [u8]) -> Result<usize, E>,
) -> Result<usize, E> {
if max_count > self.available_write_without_discard() {
// Data doesn't fit in currently available space
if max_count > self.available_write() {
// Data doesn't fit even if we discard already read data
return Ok(0);
}
self.discard_already_read_data();
}
assert!(self.available_write_without_discard() >= max_count);
f(&mut self.store.borrow_mut()[self.wpos..self.wpos + max_count]).map(|count| {
self.wpos += count;
count
})
}
// Takes up to max_count bytes from the buffer and passes a slice pointing to them to a closure
// for reading. The closure should return the number of bytes actually read and is allowed to
// read less than max_bytes. If the callback returns an error, the data is not discarded from
// the buffer.
pub fn read<E>(
&mut self,
max_count: usize,
f: impl FnOnce(&[u8]) -> Result<usize, E>,
) -> Result<usize, E> {
let count = cmp::min(max_count, self.available_read());
f(&self.store.borrow()[self.rpos..self.rpos + count]).map(|count| {
self.rpos += count;
count
})
}
fn discard_already_read_data(&mut self) {
let data = self.store.borrow_mut();
if self.rpos != data.len() {
unsafe {
ptr::copy(
&data[self.rpos] as *const u8,
&mut data[0] as *mut u8,
self.available_read(),
);
}
}
self.wpos -= self.rpos;
self.rpos = 0;
}
}
/// Default backing store for the mediocre buffer
pub struct DefaultBufferStore([u8; 128]);
impl Default for DefaultBufferStore {
fn default() -> Self {
Self([0u8; 128])
}
}
impl Borrow<[u8]> for DefaultBufferStore {
fn borrow(&self) -> &[u8] {
&self.0
}
}
impl BorrowMut<[u8]> for DefaultBufferStore {
fn borrow_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
#[cfg(test)]
mod tests {
use core::convert::Infallible;
extern crate std;
const DATA: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
const LEN: usize = 5;
type Buf = crate::buffer::Buffer<[u8; LEN]>;
#[test]
fn write() {
let mut b = Buf::new([0; LEN]);
assert_eq!(b.write(&DATA[0..2]), 2);
assert_eq!(b.available_write(), LEN - 2);
assert_eq!(b.available_read(), 2);
assert_eq!(b.write(&DATA[0..5]), 3);
assert_eq!(b.available_write(), 0);
assert_eq!(b.available_read(), LEN);
}
#[test]
fn read() {
let mut b = Buf::new([0; LEN]);
assert_eq!(b.write(&DATA[0..4]), 4);
b.read(3, |data| {
assert_eq!(data, &DATA[0..3]);
Ok::<usize, Infallible>(3)
})
.unwrap();
b.read(1, |data| {
assert_eq!(data, &DATA[3..4]);
Ok::<usize, Infallible>(1)
})
.unwrap();
b.read(1, |data| {
assert_eq!(data, &[]);
Ok::<usize, Infallible>(1)
})
.unwrap();
}
#[test]
fn clear() {
let mut b = Buf::new([0; LEN]);
b.write(&DATA[0..2]);
b.clear();
assert_eq!(b.available_write(), LEN);
assert_eq!(b.available_read(), 0);
}
#[test]
fn discard() {
let mut b = Buf::new([0; LEN]);
assert_eq!(b.write(&DATA[0..4]), 4);
b.read(2, |data| {
assert_eq!(data, &DATA[0..2]);
Ok::<usize, Infallible>(2)
})
.unwrap();
assert_eq!(b.write(&DATA[4..7]), 3);
b.read(5, |data| {
assert_eq!(data, &DATA[2..7]);
Ok::<usize, Infallible>(5)
})
.unwrap();
assert_eq!(b.available_read(), 0);
}
}
| rust | MIT | 822ae08a8a31f3be4a47eaf1a0b1149b4c4e5892 | 2026-01-04T20:24:12.282850Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/lib.rs | src/lib.rs | pub mod chat_template;
pub mod models;
pub mod position_embed;
pub mod tokenizer;
pub mod utils;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/api.rs | src/api.rs | use std::pin::pin;
use std::sync::{Arc, OnceLock};
use aha::models::{GenerateModel, ModelInstance, WhichModel, load_model};
use aha::utils::string_to_static_str;
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use rocket::futures::StreamExt;
use rocket::serde::json::Json;
use rocket::{
Request,
futures::Stream,
http::{ContentType, Status},
post,
response::{Responder, stream::TextStream},
};
use tokio::sync::RwLock;
static MODEL: OnceLock<Arc<RwLock<ModelInstance<'static>>>> = OnceLock::new();
pub fn init(model_type: WhichModel, path: String) -> anyhow::Result<()> {
let model_path = string_to_static_str(path);
let model = load_model(model_type, model_path)?;
MODEL.get_or_init(|| Arc::new(RwLock::new(model)));
Ok(())
}
pub(crate) enum Response<R: Stream<Item = String> + Send> {
Stream(TextStream<R>),
Text(String),
Error(String),
}
impl<'r, 'o: 'r, R> Responder<'r, 'o> for Response<R>
where
R: Stream<Item = String> + Send + 'o,
'r: 'o,
{
fn respond_to(self, req: &'r Request<'_>) -> rocket::response::Result<'o> {
match self {
Response::Stream(stream) => stream.respond_to(req),
Response::Text(text) => text.respond_to(req),
Response::Error(e) => {
let mut res = rocket::response::Response::new();
res.set_status(Status::InternalServerError);
res.set_header(ContentType::JSON);
res.set_sized_body(e.len(), std::io::Cursor::new(e));
Ok(res)
}
}
}
}
#[post("/completions", data = "<req>")]
pub(crate) async fn chat(
req: Json<ChatCompletionParameters>,
) -> (ContentType, Response<impl Stream<Item = String> + Send>) {
match req.stream {
Some(false) => {
let response = {
let model_ref = MODEL
.get()
.cloned()
.ok_or_else(|| anyhow::anyhow!("model not init"))
.unwrap();
model_ref.write().await.generate(req.into_inner())
};
match response {
Ok(res) => {
let response_str = serde_json::to_string(&res).unwrap();
(ContentType::Text, Response::Text(response_str))
}
Err(e) => (ContentType::Text, Response::Error(e.to_string())),
}
}
_ => {
let text_stream = TextStream! {
let model_ref = MODEL.get().cloned().ok_or_else(|| anyhow::anyhow!("model not init")).unwrap();
let mut guard = model_ref.write().await;
let stream_result = guard.generate_stream(req.into_inner());
match stream_result {
Ok(stream) => {
let mut stream = pin!(stream);
while let Some(result) = stream.next().await {
match result {
Ok(chunk) => {
if let Ok(json_str) = serde_json::to_string(&chunk) {
yield format!("data: {}\n\n", json_str);
}
}
Err(e) => {
yield format!("data: {{\"error\": \"{}\"}}\n\n", e);
break;
}
}
}
yield "data: [DONE]\n\n".to_string();
},
Err(e) => {
yield format!("event: error\ndata: {}\n\n", e.to_string());
}
}
};
(ContentType::EventStream, Response::Stream(text_stream))
}
}
}
#[post("/remove_background", data = "<req>")]
pub(crate) async fn remove_background(req: Json<ChatCompletionParameters>) -> (Status, String) {
let response = {
let model_ref = MODEL
.get()
.cloned()
.ok_or_else(|| anyhow::anyhow!("model not init"))
.unwrap();
model_ref.write().await.generate(req.into_inner())
};
match response {
Ok(res) => {
let response_str = serde_json::to_string(&res).unwrap();
(Status::Ok, response_str)
}
Err(e) => (Status::InternalServerError, e.to_string()),
}
}
#[post("/speech", data = "<req>")]
pub(crate) async fn speech(req: Json<ChatCompletionParameters>) -> (Status, String) {
let response = {
let model_ref = MODEL
.get()
.cloned()
.ok_or_else(|| anyhow::anyhow!("model not init"))
.unwrap();
model_ref.write().await.generate(req.into_inner())
};
match response {
Ok(res) => {
let response_str = serde_json::to_string(&res).unwrap();
(Status::Ok, response_str)
}
Err(e) => (Status::InternalServerError, e.to_string()),
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/main.rs | src/main.rs | use std::{net::IpAddr, str::FromStr, time::Duration};
use aha::{models::WhichModel, utils::get_default_save_dir};
use clap::Parser;
use modelscope::ModelScope;
use rocket::{
Config,
data::{ByteUnit, Limits},
routes,
};
use tokio::time::sleep;
use crate::api::init;
mod api;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
#[arg(short, long, default_value = "127.0.0.1")]
address: String,
#[arg(short, long, default_value_t = 10100)]
port: u16,
#[arg(short, long)]
model: WhichModel,
#[arg(long)]
weight_path: Option<String>,
#[arg(long)]
save_dir: Option<String>,
#[arg(long)]
download_retries: Option<u32>,
}
async fn download_model(model_id: &str, save_dir: &str, max_retries: u32) -> anyhow::Result<()> {
let mut attempts = 0u32;
loop {
attempts += 1;
println!(
"Attempting to download model (attempt {}/{})",
attempts, max_retries
);
match ModelScope::download(model_id, save_dir).await {
Ok(()) => {
println!("Model downloaded successfully");
return Ok(());
}
Err(e) => {
if attempts >= max_retries {
return Err(anyhow::anyhow!(
"Failed to download model after {} attempts. Last error: {}",
max_retries,
e
));
}
println!(
"Download failed (attempt {}): {}. Retrying in 2 seconds...",
attempts, e
);
sleep(Duration::from_secs(2)).await;
}
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args = Args::parse();
let model_id = match &args.model {
WhichModel::MiniCPM4_0_5B => "OpenBMB/MiniCPM4-0.5B",
WhichModel::Qwen2_5vl3B => "Qwen/Qwen2.5-VL-3B-Instruct",
WhichModel::Qwen2_5vl7B => "Qwen/Qwen2.5-VL-7B-Instruct",
WhichModel::Qwen3vl2B => "Qwen/Qwen3-VL-2B-Instruct",
WhichModel::Qwen3vl4B => "Qwen/Qwen3-VL-4B-Instruct",
WhichModel::Qwen3vl8B => "Qwen/Qwen3-VL-8B-Instruct",
WhichModel::Qwen3vl32B => "Qwen/Qwen3-VL-32B-Instruct",
WhichModel::DeepSeekOCR => "deepseek-ai/DeepSeek-OCR",
WhichModel::HunyuanOCR => "Tencent-Hunyuan/HunyuanOCR",
WhichModel::PaddleOCRVL => "PaddlePaddle/PaddleOCR-VL",
WhichModel::RMBG2_0 => "AI-ModelScope/RMBG-2.0",
WhichModel::VoxCPM => "OpenBMB/VoxCPM-0.5B",
WhichModel::VoxCPM1_5 => "OpenBMB/VoxCPM1.5",
};
let model_path = match &args.weight_path {
Some(path) => path.clone(),
None => {
let save_dir = match &args.save_dir {
Some(dir) => dir.clone(),
None => get_default_save_dir().expect("Failed to get home directory"),
};
let max_retries = args.download_retries.unwrap_or(3);
download_model(model_id, &save_dir, max_retries).await?;
save_dir + "/" + model_id
}
};
// println!("-------------------download path: {}", model_path);
init(args.model, model_path)?;
start_http_server(&args).await?;
Ok(())
}
pub(crate) async fn start_http_server(args: &Args) -> anyhow::Result<()> {
let mut builder = rocket::build().configure(Config {
address: IpAddr::from_str(&args.address)?,
port: args.port,
limits: Limits::default()
.limit("string", ByteUnit::Mebibyte(5))
.limit("json", ByteUnit::Mebibyte(5))
.limit("data-form", ByteUnit::Mebibyte(100))
.limit("file", ByteUnit::Mebibyte(100)),
..Config::default()
});
builder = builder.mount("/chat", routes![api::chat]);
// /images/remove_background
builder = builder.mount("/images", routes![api::remove_background]);
// /images/speech
builder = builder.mount("/audio", routes![api::speech]);
builder.launch().await?;
Ok(())
}
// fn main() {
// println!("Hello, world!");
// }
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/chat_template/mod.rs | src/chat_template/mod.rs | use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::{Result, anyhow};
use minijinja::{Environment, Value as MiniJinjaValue, context};
use crate::utils::string_to_static_str;
pub fn get_template(path: String) -> Result<String> {
let tokenizer_config_file = path.clone() + "/tokenizer_config.json";
assert!(
std::path::Path::new(&tokenizer_config_file).exists(),
"tokenizer_config.json not exists in model path"
);
let tokenizer_config: serde_json::Value =
serde_json::from_slice(&std::fs::read(tokenizer_config_file)?)
.map_err(|e| anyhow!(format!("load tokenizer_config file error:{}", e)))?;
let chat_template = tokenizer_config["chat_template"]
.as_str()
.ok_or(anyhow!(format!("chat_template to str error")))?;
// 修复模板中的问题行
let fixed_template = chat_template
.replace(
"message.content.startswith('<tool_response>')",
"message.content is startingwith('<tool_response>')", // 使用minijinja中的 is startingwith 替换
)
.replace(
"message.content.endswith('</tool_response>')",
"message.content is endingwith('</tool_response>')", // 使用minijinja中的 is endingwith 替换
)
.replace(
"content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n')",
"((content | split('</think>'))[0] | rstrip('\\n') | split('<think>'))[-1] | lstrip('\\n')", // 使用自定义的split, rstrip, lstrip过滤器替换
)
.replace(
"content.split('</think>')[-1].lstrip('\\n')",
"(content | split('</think>'))[-1] | lstrip('\\n')", // 使用自定义的过滤器替换
)
.replace(
"reasoning_content.strip('\\n')",
"reasoning_content | strip('\\n')", // 使用自定义的过滤器替换
)
.replace(
"content.lstrip('\\n')",
"content | lstrip('\\n')", // 使用自定义的过滤器替换
);
Ok(fixed_template)
}
pub struct ChatTemplate<'a> {
env: Environment<'a>,
}
impl<'a> ChatTemplate<'a> {
pub fn init(path: &str) -> Result<Self> {
let path: String = path.to_string();
if !std::path::Path::new(&path).exists() {
return Err(anyhow!("model path not found"));
}
let template = match get_template(path.clone()) {
Ok(template) => template,
Err(e) => {
let jinja_path = path + "/chat_template.jinja";
if !std::path::Path::new(&jinja_path).exists() {
return Err(anyhow!(
"get_template err {e} and chat_template.jinja not found"
));
}
std::fs::read_to_string(&jinja_path)
.map_err(|e| anyhow!("Failed to read chat_template.jinja: {}", e))?
}
};
let template = string_to_static_str(template);
// 加载jinjaenv处理chat_template
let mut env = Environment::new();
// 添加自定义过滤器
env.add_filter("tojson", |v: MiniJinjaValue| {
serde_json::to_string(&v).unwrap()
});
env.add_filter("split", |s: String, delimiter: String| {
s.split(&delimiter)
.map(|s| s.to_string())
.collect::<Vec<String>>()
});
// 添加 lstrip 过滤器
env.add_filter("lstrip", |s: String, chars: Option<String>| match chars {
Some(chars_str) => s.trim_start_matches(chars_str.as_str()).to_string(),
None => s.trim_start().to_string(),
});
// 添加 rstrip 过滤器
env.add_filter("rstrip", |s: String, chars: Option<String>| match chars {
Some(chars_str) => s.trim_end_matches(chars_str.as_str()).to_string(),
None => s.trim_end().to_string(),
});
// let template = get_template(path.to_string())?;
let _ = env.add_template("chat", template);
Ok(Self { env })
}
pub fn apply_chat_template(&self, messages: &ChatCompletionParameters) -> Result<String> {
let context = context! {
messages => &messages.messages,
tools => &messages.tools.as_ref(),
add_generation_prompt => true,
};
let template = self
.env
.get_template("chat")
.map_err(|e| anyhow!(format!("render template error {}", e)))?;
let message_str = template
.render(context)
.map_err(|e| anyhow!(format!("render template error {}", e)))?;
Ok(message_str)
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/tokenizer/mod.rs | src/tokenizer/mod.rs | use anyhow::{Ok, Result, anyhow};
use candle_core::{Device, Tensor};
use tokenizers::Tokenizer;
pub struct TokenizerModel {
pub tokenizer: Tokenizer,
}
impl TokenizerModel {
pub fn init(path: &str) -> Result<Self> {
let path = path.to_string();
assert!(
std::path::Path::new(&path).exists(),
"model path file not exists"
);
let tokenizer_file = path.clone() + "/tokenizer.json";
assert!(
std::path::Path::new(&tokenizer_file).exists(),
"tokenizer.json not exists in model path"
);
let tokenizer = Tokenizer::from_file(tokenizer_file)
.map_err(|e| anyhow!(format!("tokenizer from file error{}", e)))?;
Ok(Self { tokenizer })
}
pub fn text_encode_vec(&self, text: String, add_special_token: bool) -> Result<Vec<u32>> {
let token_id = self
.tokenizer
.encode(text, add_special_token)
.map_err(|e| anyhow!(format!("tokenizer encode error: {}", e)))?
.get_ids()
.to_vec();
Ok(token_id)
}
pub fn text_encode(&self, text: String, device: &Device) -> Result<Tensor> {
// let token_id = self
// .tokenizer
// .encode(text, true)
// .map_err(|e| anyhow!(format!("tokenizer encode error: {}", e)))?
// .get_ids()
// .to_vec();
let token_id = self.text_encode_vec(text, true)?;
let token_tensor = Tensor::from_slice(&token_id, (1, token_id.len()), device)?;
Ok(token_tensor)
}
pub fn token_decode(&self, tokens: Vec<u32>) -> Result<String> {
let decode = self
.tokenizer
.decode(&tokens, true)
.map_err(|e| anyhow!(format!("tokenizer encode error{}", e)))?;
Ok(decode)
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/mod.rs | src/models/mod.rs | pub mod common;
pub mod deepseek_ocr;
pub mod hunyuan_ocr;
pub mod minicpm4;
pub mod paddleocr_vl;
pub mod qwen2_5vl;
pub mod qwen3vl;
pub mod rmbg2_0;
pub mod voxcpm;
use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::Result;
use rocket::futures::Stream;
use crate::models::{
deepseek_ocr::generate::DeepseekOCRGenerateModel,
hunyuan_ocr::generate::HunyuanOCRGenerateModel, minicpm4::generate::MiniCPMGenerateModel,
paddleocr_vl::generate::PaddleOCRVLGenerateModel, qwen2_5vl::generate::Qwen2_5VLGenerateModel,
qwen3vl::generate::Qwen3VLGenerateModel, rmbg2_0::generate::RMBG2_0Model,
voxcpm::generate::VoxCPMGenerate,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
pub enum WhichModel {
#[value(name = "minicpm4-0.5b")]
MiniCPM4_0_5B,
#[value(name = "qwen2.5vl-3b")]
Qwen2_5vl3B,
#[value(name = "qwen2.5vl-7b")]
Qwen2_5vl7B,
#[value(name = "qwen3vl-2b")]
Qwen3vl2B,
#[value(name = "qwen3vl-4b")]
Qwen3vl4B,
#[value(name = "qwen3vl-8b")]
Qwen3vl8B,
#[value(name = "qwen3vl-32b")]
Qwen3vl32B,
#[value(name = "deepseek-ocr")]
DeepSeekOCR,
#[value(name = "hunyuan-ocr")]
HunyuanOCR,
#[value(name = "paddleocr-vl")]
PaddleOCRVL,
#[value(name = "RMBG2.0")]
RMBG2_0,
#[value(name = "voxcpm")]
VoxCPM,
#[value(name = "voxcpm1.5")]
VoxCPM1_5,
}
pub trait GenerateModel {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse>;
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
>;
}
pub enum ModelInstance<'a> {
MiniCPM4(MiniCPMGenerateModel<'a>),
Qwen2_5VL(Qwen2_5VLGenerateModel<'a>),
Qwen3VL(Qwen3VLGenerateModel<'a>),
DeepSeekOCR(DeepseekOCRGenerateModel),
HunyuanOCR(HunyuanOCRGenerateModel<'a>),
PaddleOCRVL(Box<PaddleOCRVLGenerateModel<'a>>),
RMBG2_0(Box<RMBG2_0Model>),
VoxCPM(Box<VoxCPMGenerate>),
}
impl<'a> GenerateModel for ModelInstance<'a> {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
match self {
ModelInstance::MiniCPM4(model) => model.generate(mes),
ModelInstance::Qwen2_5VL(model) => model.generate(mes),
ModelInstance::Qwen3VL(model) => model.generate(mes),
ModelInstance::DeepSeekOCR(model) => model.generate(mes),
ModelInstance::HunyuanOCR(model) => model.generate(mes),
ModelInstance::PaddleOCRVL(model) => model.generate(mes),
ModelInstance::RMBG2_0(model) => model.generate(mes),
ModelInstance::VoxCPM(model) => model.generate(mes),
}
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
match self {
ModelInstance::MiniCPM4(model) => model.generate_stream(mes),
ModelInstance::Qwen2_5VL(model) => model.generate_stream(mes),
ModelInstance::Qwen3VL(model) => model.generate_stream(mes),
ModelInstance::DeepSeekOCR(model) => model.generate_stream(mes),
ModelInstance::HunyuanOCR(model) => model.generate_stream(mes),
ModelInstance::PaddleOCRVL(model) => model.generate_stream(mes),
ModelInstance::RMBG2_0(model) => model.generate_stream(mes),
ModelInstance::VoxCPM(model) => model.generate_stream(mes),
}
}
}
pub fn load_model(model_type: WhichModel, path: &str) -> Result<ModelInstance<'_>> {
let model = match model_type {
WhichModel::MiniCPM4_0_5B => {
let model = MiniCPMGenerateModel::init(path, None, None)?;
ModelInstance::MiniCPM4(model)
}
WhichModel::Qwen2_5vl3B => {
let model = Qwen2_5VLGenerateModel::init(path, None, None)?;
ModelInstance::Qwen2_5VL(model)
}
WhichModel::Qwen2_5vl7B => {
let model = Qwen2_5VLGenerateModel::init(path, None, None)?;
ModelInstance::Qwen2_5VL(model)
}
WhichModel::Qwen3vl2B => {
let model = Qwen3VLGenerateModel::init(path, None, None)?;
ModelInstance::Qwen3VL(model)
}
WhichModel::Qwen3vl4B => {
let model = Qwen3VLGenerateModel::init(path, None, None)?;
ModelInstance::Qwen3VL(model)
}
WhichModel::Qwen3vl8B => {
let model = Qwen3VLGenerateModel::init(path, None, None)?;
ModelInstance::Qwen3VL(model)
}
WhichModel::Qwen3vl32B => {
let model = Qwen3VLGenerateModel::init(path, None, None)?;
ModelInstance::Qwen3VL(model)
}
WhichModel::DeepSeekOCR => {
let model = DeepseekOCRGenerateModel::init(path, None, None)?;
ModelInstance::DeepSeekOCR(model)
}
WhichModel::HunyuanOCR => {
let model = HunyuanOCRGenerateModel::init(path, None, None)?;
ModelInstance::HunyuanOCR(model)
}
WhichModel::PaddleOCRVL => {
let model = PaddleOCRVLGenerateModel::init(path, None, None)?;
ModelInstance::PaddleOCRVL(Box::new(model))
}
WhichModel::RMBG2_0 => {
let model = RMBG2_0Model::init(path, None, None)?;
ModelInstance::RMBG2_0(Box::new(model))
}
WhichModel::VoxCPM => {
let model = VoxCPMGenerate::init(path, None, None)?;
ModelInstance::VoxCPM(Box::new(model))
}
WhichModel::VoxCPM1_5 => {
let model = VoxCPMGenerate::init(path, None, None)?;
ModelInstance::VoxCPM(Box::new(model))
}
};
Ok(model)
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/paddleocr_vl/config.rs | src/models/paddleocr_vl/config.rs | use candle_nn::Activation;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PaddleOCRVLConfig {
pub compression_ratio: f64,
pub head_dim: usize,
pub hidden_act: Activation,
pub hidden_dropout_prob: f64,
pub hidden_size: usize,
pub ignored_index: i32,
pub image_token_id: u32,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub max_sequence_length: Option<usize>,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub pad_token_id: u32,
pub rms_norm_eps: f64,
pub rope_scaling: PaddleOCRVLRopeScalingConfig,
pub rope_theta: f64,
pub sliding_window: Option<u32>,
pub tie_word_embeddings: bool,
pub torch_dtype: String,
pub use_bias: bool,
pub use_cache: bool,
pub use_flash_attention: bool,
pub video_token_id: u32,
pub vision_config: PaddleOCRVLVisionConfig,
pub vision_start_token_id: u32,
pub vocab_size: usize,
pub weight_share_add_bias: bool,
pub use_3d_rope: bool,
pub rope_is_neox_style: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PaddleOCRVLRopeScalingConfig {
pub mrope_section: Vec<usize>,
pub rope_type: String,
#[serde(rename = "type")]
pub scaling_type: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PaddleOCRVLVisionConfig {
pub attention_dropout: f64,
pub hidden_act: Activation,
pub hidden_size: usize,
pub image_size: usize,
pub intermediate_size: usize,
pub layer_norm_eps: f64,
pub num_attention_heads: usize,
pub num_channels: usize,
pub num_hidden_layers: usize,
pub pad_token_id: u32,
pub patch_size: usize,
pub spatial_merge_size: usize,
pub temporal_patch_size: usize,
pub tokens_per_second: usize,
pub torch_dtype: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PaddleOCRVLPreprocessorConfig {
pub do_convert_rgb: bool,
pub do_normalize: bool,
pub do_rescale: bool,
pub do_resize: bool,
pub image_mean: Vec<f64>,
pub image_std: Vec<f64>,
pub max_pixels: u32,
pub merge_size: usize,
pub min_pixels: u32,
pub patch_size: usize,
pub resample: u32,
pub rescale_factor: f64,
pub size: Option<SizeConfig>,
pub temporal_patch_size: usize,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SizeConfig {
pub max_pixels: usize,
pub min_pixels: usize,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/paddleocr_vl/processor.rs | src/models/paddleocr_vl/processor.rs | use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use candle_core::{DType, Device, IndexOp, Shape, Tensor};
use image::DynamicImage;
use crate::{
models::paddleocr_vl::config::PaddleOCRVLPreprocessorConfig,
utils::img_utils::{extract_images, img_smart_resize, img_transform},
};
pub struct PaddleOCRVLProcessor {
process_cfg: PaddleOCRVLPreprocessorConfig,
device: Device,
dtype: DType,
image_token: String,
}
impl PaddleOCRVLProcessor {
pub fn new(
config: PaddleOCRVLPreprocessorConfig,
device: &Device,
dtype: DType,
) -> Result<Self> {
let image_token = "<|IMAGE_PLACEHOLDER|>".to_string();
Ok(Self {
process_cfg: config,
device: device.clone(),
dtype,
image_token,
})
}
pub fn process_img(
&self,
img: &DynamicImage,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<Tensor> {
let img_h = img.height();
let img_w = img.width();
// h,w resize成 32的倍数
let (resize_h, resize_w) = img_smart_resize(
img_h,
img_w,
(self.process_cfg.patch_size * self.process_cfg.merge_size) as u32,
self.process_cfg.min_pixels,
self.process_cfg.max_pixels,
)?;
let img = img.resize_exact(resize_w, resize_h, image::imageops::FilterType::CatmullRom);
let img_tensor = img_transform(&img, img_mean, img_std, &self.device, self.dtype)?;
// (c, h, w) => (1, c, h, w)
let img_tensor = img_tensor.unsqueeze(0)?;
Ok(img_tensor)
}
pub fn process_vision_tensor(&self, img_tensor: &Tensor) -> Result<(Tensor, Tensor)> {
let channel = img_tensor.dim(1)?;
// img_temsor.dim[0] = 1, temporal_patch_size = 1, grid_t = 1
let grid_t = img_tensor.dim(0)? / self.process_cfg.temporal_patch_size;
let grid_h = img_tensor.dim(2)? / self.process_cfg.patch_size;
let grid_w = img_tensor.dim(3)? / self.process_cfg.patch_size;
let shape = Shape::from(vec![
grid_t,
self.process_cfg.temporal_patch_size,
channel,
grid_h,
self.process_cfg.patch_size,
grid_w,
self.process_cfg.patch_size,
]);
let img_tensor = img_tensor.reshape(shape)?;
// shape to // grid_t,
// grid_h,
// grid_w,
// channel,
// temporal_patch_size
// patch_size,
// patch_size,
let img_tensor = img_tensor.permute(vec![0, 3, 5, 2, 1, 4, 6])?;
let img_tensor = img_tensor
.reshape((
grid_t * grid_h * grid_w,
channel,
self.process_cfg.patch_size,
self.process_cfg.patch_size,
))?
.contiguous()?;
let grid_thw = Tensor::from_vec(
vec![grid_t as u32, grid_h as u32, grid_w as u32],
(1, 3),
&self.device,
)?;
Ok((img_tensor, grid_thw))
}
pub fn process_images(
&self,
imgs: &Vec<DynamicImage>,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<(Tensor, Tensor)> {
let mut pixel_values_vec = Vec::new();
let mut vision_grid_thws_vec = Vec::new();
for img in imgs {
let img_tensor = self.process_img(img, img_mean, img_std)?;
let (img_tensor, grid_thw) = self.process_vision_tensor(&img_tensor)?;
pixel_values_vec.push(img_tensor);
vision_grid_thws_vec.push(grid_thw);
}
let pixel_values = Tensor::cat(&pixel_values_vec, 0)?;
let vision_grid_thws = Tensor::cat(&vision_grid_thws_vec, 0)?;
Ok((pixel_values, vision_grid_thws))
}
pub fn process_info(
&self,
messages: &ChatCompletionParameters,
text: &str,
) -> Result<(String, Option<Tensor>, Option<Tensor>)> {
let imgs = extract_images(messages)?;
let img_mean = Tensor::from_slice(&self.process_cfg.image_mean, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
let img_std = Tensor::from_slice(&self.process_cfg.image_std, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
let (pixel_values, image_grid_thw) = if !imgs.is_empty() {
let (pixel_values, image_grid_thw) = self.process_images(&imgs, &img_mean, &img_std)?;
(Some(pixel_values), Some(image_grid_thw))
} else {
(None, None)
};
let merge_length = self.process_cfg.merge_size.pow(2);
let mut text = text.to_string();
if let Some(ref image_grid_thw) = image_grid_thw {
let mut index = 0;
while text.contains(&self.image_token) {
let grid_i = image_grid_thw.i(index)?;
let repeat_num =
grid_i.to_vec1::<u32>()?.iter().product::<u32>() as usize / merge_length;
let replace = "<|placeholder|>".repeat(repeat_num);
text = text.replacen(&self.image_token, &replace, 1);
index += 1;
}
text = text.replace("<|placeholder|>", &self.image_token);
}
Ok((text, pixel_values, image_grid_thw))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/paddleocr_vl/model.rs | src/models/paddleocr_vl/model.rs | use anyhow::{Result, anyhow};
use candle_core::{D, IndexOp, Shape, Tensor};
use candle_nn::{
Conv2d, Embedding, LayerNorm, Linear, Module, RmsNorm, VarBuilder, embedding, linear,
linear_no_bias, rms_norm,
};
use num::integer::Roots;
use crate::{
models::{
common::{
NaiveAttnGateUpDownMLPBlock, NaiveAttnTwoLinearMLPBlock, get_conv2d, get_layer_norm,
},
paddleocr_vl::config::{
PaddleOCRVLConfig, PaddleOCRVLRopeScalingConfig, PaddleOCRVLVisionConfig,
},
},
position_embed::rope::{Qwen2_5VLTextRotaryEmbedding, Qwen2_5VisionRotaryEmbedding},
utils::tensor_utils::{
get_vision_next_indices, interpolate_bilinear, masked_scatter_dim0, nonzero_index,
prepare_causal_attention_mask, zero_index,
},
};
pub struct Projector {
merge_size: usize,
pre_norm: LayerNorm,
linear_1: Linear,
linear_2: Linear,
}
impl Projector {
pub fn new(vb: VarBuilder, config: &PaddleOCRVLConfig) -> Result<Self> {
let merge_size = config.vision_config.spatial_merge_size;
let hidden_size = config.vision_config.hidden_size * merge_size * merge_size;
let pre_norm = get_layer_norm(
vb.pp("pre_norm"),
config.rms_norm_eps,
config.vision_config.hidden_size,
)?;
let linear_1 = linear(hidden_size, hidden_size, vb.pp("linear_1"))?;
let linear_2 = linear(hidden_size, config.hidden_size, vb.pp("linear_2"))?;
Ok(Self {
merge_size,
pre_norm,
linear_1,
linear_2,
})
}
pub fn forward(&self, xs: &Tensor, image_grid_thw: &Tensor) -> Result<Tensor> {
let img_num = image_grid_thw.dim(0)?;
let mut processed_features = vec![];
let start = 0usize;
for i in 0..img_num {
let [t, h, w] = image_grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let end = start + (t * h * w) as usize;
let xs_i = xs.i((start..end, ..))?;
let xs_i = self.pre_norm.forward(&xs_i)?;
let dim = xs_i.dim(1)?;
let shape = Shape::from(vec![
t as usize,
h as usize / self.merge_size,
self.merge_size,
w as usize / self.merge_size,
self.merge_size,
dim,
]);
let xs_i = xs_i
.reshape((t as usize, h as usize, w as usize, dim))?
.reshape(shape)?
.permute((0, 1, 3, 2, 4, 5))?
.reshape((
(t * h * w) as usize / self.merge_size / self.merge_size,
self.merge_size * self.merge_size * dim,
))?;
let xs_i = self.linear_1.forward(&xs_i)?.gelu()?;
let xs_i = self.linear_2.forward(&xs_i)?;
processed_features.push(xs_i);
}
let xs = Tensor::cat(&processed_features, 0)?;
Ok(xs)
}
}
pub struct SiglipVisionEmbeddings {
embed_dim: usize,
patch_size: usize,
patch_embedding: Conv2d,
num_positions: usize,
position_embedding: Embedding,
packing_position_embedding: Embedding,
}
impl SiglipVisionEmbeddings {
pub fn new(vb: VarBuilder, config: &PaddleOCRVLVisionConfig) -> Result<Self> {
let embed_dim = config.hidden_size;
let image_size = config.image_size;
let patch_size = config.patch_size;
let patch_embedding = get_conv2d(
vb.pp("patch_embedding"),
config.num_channels,
embed_dim,
patch_size,
0,
patch_size,
1,
1,
true,
)?;
let num_positions = (image_size / patch_size).pow(2);
let position_embedding = embedding(num_positions, embed_dim, vb.pp("position_embedding"))?;
let packing_position_embedding =
embedding(32768, embed_dim, vb.pp("packing_position_embedding"))?;
Ok(Self {
embed_dim,
patch_size,
patch_embedding,
num_positions,
position_embedding,
packing_position_embedding,
})
}
fn interpolate_pos_encoding(
&self,
h: usize,
w: usize,
is_after_patchify: bool,
) -> Result<Tensor> {
let (new_height, new_width) = if is_after_patchify {
(h, w)
} else {
(h / self.patch_size, w / self.patch_size)
};
let sqrt_num_positions = self.num_positions.sqrt();
let patch_pos_embed = self
.position_embedding
.embeddings()
.reshape((1, sqrt_num_positions, sqrt_num_positions, self.embed_dim))?
.permute((0, 3, 1, 2))?;
let patch_pos_embed =
interpolate_bilinear(&patch_pos_embed, (new_height, new_width), Some(false))?;
let patch_pos_embed =
patch_pos_embed
.permute((0, 2, 3, 1))?
.reshape((1, (), self.embed_dim))?;
Ok(patch_pos_embed)
}
pub fn forward(
&self,
pixel_values: &Tensor,
position_ids: &Tensor,
image_grid_thw: &Tensor,
interpolate_pos_encoding: bool,
) -> Result<Tensor> {
let (bs, seq_len, c, h, w) = pixel_values.dims5()?;
let pixel_values = pixel_values.reshape((bs * seq_len, c, h, w))?;
let patch_embeds = self.patch_embedding.forward(&pixel_values)?;
// (bs*seq_len, c)
let mut embeddings = patch_embeds.squeeze(D::Minus1)?.squeeze(D::Minus1)?;
if interpolate_pos_encoding {
let mut tmp_embeddings = vec![];
let img_num = image_grid_thw.dim(0)?;
let mut start = 0usize;
for i in 0..img_num {
let [t, h, w] = image_grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let end = start + (t * h * w) as usize;
let image_embeddings = embeddings.i(start..end)?;
let position_embedding = self
.interpolate_pos_encoding(h as usize, w as usize, true)?
.squeeze(0)?
.repeat((t as usize, 1usize))?;
let image_embeddings = image_embeddings.add(&position_embedding)?;
tmp_embeddings.push(image_embeddings);
start = end;
}
embeddings = Tensor::cat(&tmp_embeddings, 0)?.unsqueeze(0)?; // add bs dim
} else {
let packing_pos_embed = self.packing_position_embedding.forward(position_ids)?;
embeddings = embeddings.add(&packing_pos_embed)?.unsqueeze(0)?;
}
Ok(embeddings)
}
}
pub struct SiglipEncoder {
layers: Vec<NaiveAttnTwoLinearMLPBlock>,
rotary_pos_emb: Qwen2_5VisionRotaryEmbedding,
}
impl SiglipEncoder {
pub fn new(vb: VarBuilder, config: &PaddleOCRVLVisionConfig) -> Result<Self> {
let vb_layers = vb.pp("layers");
let mut layers = vec![];
for i in 0..config.num_hidden_layers {
let layer_i = NaiveAttnTwoLinearMLPBlock::new(
vb_layers.pp(i),
config.hidden_size,
config.num_attention_heads,
None,
None,
true,
"self_attn",
Some("out_proj"),
config.intermediate_size,
config.hidden_act,
true,
"mlp",
"fc1",
"fc2",
config.layer_norm_eps,
"layer_norm1",
"layer_norm2",
)?;
layers.push(layer_i);
}
let head_dim = config.hidden_size / config.num_attention_heads;
let rotary_pos_emb = Qwen2_5VisionRotaryEmbedding::new(head_dim / 2, Some(10000.0));
Ok(Self {
layers,
rotary_pos_emb,
})
}
pub fn forward(&self, xs: &Tensor, image_grid_thw: &Tensor) -> Result<Tensor> {
let mut split_hids = vec![];
let mut split_wids = vec![];
for i in 0..image_grid_thw.dim(0)? {
let [t, h, w] = image_grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let pos_w: Vec<u32> = (0..h).flat_map(|_| 0u32..w).collect();
let pos_w = pos_w.repeat(t as usize);
let pos_w = Tensor::new(pos_w, xs.device())?;
let pos_h: Vec<u32> = (0..h).flat_map(|h| vec![h; w as usize]).collect();
let pos_h = pos_h.repeat(t as usize);
let pos_h = Tensor::new(pos_h, xs.device())?;
split_hids.push(pos_h);
split_wids.push(pos_w);
}
let width_position_ids = Tensor::cat(&split_wids, 0)?;
let height_position_ids = Tensor::cat(&split_hids, 0)?;
let max_grid_size = image_grid_thw.i((.., 1..))?.max_all()?.to_scalar::<u32>()?;
let rope_emb_max_grid = self
.rotary_pos_emb
.forward(max_grid_size as usize, xs.device())?;
let rotary_pos_emb_h = rope_emb_max_grid.index_select(&height_position_ids, 0)?;
let rotary_pos_emb_w = rope_emb_max_grid.index_select(&width_position_ids, 0)?;
let rope_emb = Tensor::cat(&[rotary_pos_emb_h, rotary_pos_emb_w], 1)?.contiguous()?;
let rope_emb = rope_emb.repeat((1, 2))?;
let cos = rope_emb.cos()?;
let sin = rope_emb.sin()?;
let mut xs = xs.clone();
for layer in &self.layers {
xs = layer.forward(&xs, Some(&cos), Some(&sin), None, false)?;
}
Ok(xs)
}
}
pub struct SiglipVisionModel {
embeddings: SiglipVisionEmbeddings,
encoder: SiglipEncoder,
post_layernorm: LayerNorm,
}
impl SiglipVisionModel {
pub fn new(vb: VarBuilder, config: &PaddleOCRVLVisionConfig) -> Result<Self> {
let vb = vb.pp("vision_model");
let embeddings = SiglipVisionEmbeddings::new(vb.pp("embeddings"), config)?;
let encoder = SiglipEncoder::new(vb.pp("encoder"), config)?;
let post_layernorm = get_layer_norm(
vb.pp("post_layernorm"),
config.layer_norm_eps,
config.hidden_size,
)?;
Ok(Self {
embeddings,
encoder,
post_layernorm,
})
}
pub fn forward(
&self,
pixel_values: &Tensor,
image_grid_thw: &Tensor,
position_ids: &Tensor,
interpolate_pos_encoding: bool,
) -> Result<Tensor> {
let xs = self.embeddings.forward(
pixel_values,
position_ids,
image_grid_thw,
interpolate_pos_encoding,
)?;
let xs = self.encoder.forward(&xs, image_grid_thw)?;
let xs = self.post_layernorm.forward(&xs)?;
Ok(xs)
}
}
pub struct Ernie4_5Model {
embed_tokens: Embedding,
layers: Vec<NaiveAttnGateUpDownMLPBlock>,
norm: RmsNorm,
rotary_emb: Qwen2_5VLTextRotaryEmbedding,
rope_scaling: PaddleOCRVLRopeScalingConfig,
}
impl Ernie4_5Model {
pub fn new(vb: VarBuilder, config: &PaddleOCRVLConfig) -> Result<Self> {
let embed_tokens = embedding(config.vocab_size, config.hidden_size, vb.pp("embed_tokens"))?;
let vb_layers = vb.pp("layers");
let mut layers = vec![];
for i in 0..config.num_hidden_layers {
let layer_i = NaiveAttnGateUpDownMLPBlock::new(
vb_layers.pp(i),
config.hidden_size,
config.num_attention_heads,
Some(config.num_key_value_heads),
Some(config.head_dim),
config.use_bias,
"self_attn",
None,
config.intermediate_size,
config.hidden_act,
config.use_bias,
"mlp",
config.rms_norm_eps,
"input_layernorm",
"post_attention_layernorm",
)?;
layers.push(layer_i);
}
let norm = rms_norm(config.hidden_size, config.rms_norm_eps, vb.pp("norm"))?;
let rotary_emb =
Qwen2_5VLTextRotaryEmbedding::new(config.head_dim, config.rope_theta as f32);
Ok(Self {
embed_tokens,
layers,
norm,
rotary_emb,
rope_scaling: config.rope_scaling.clone(),
})
}
pub fn forward(
&mut self,
inputs_embeds: &Tensor,
seqlen_offset: usize,
position_ids: Option<&Tensor>,
) -> Result<Tensor> {
let (b_size, seq_len, _) = inputs_embeds.dims3()?;
let position_ids = match position_ids {
Some(ids) => ids.clone(),
None => Tensor::arange(
seqlen_offset as u32,
(seq_len + seqlen_offset) as u32,
inputs_embeds.device(),
)?
.unsqueeze(0)?
.unsqueeze(0)?
.broadcast_as((3, b_size, seq_len))?,
};
let (cos, sin) = self.rotary_emb.forward(
&position_ids,
inputs_embeds.dtype(),
self.rope_scaling.mrope_section.clone(),
)?;
let mut xs = inputs_embeds.clone();
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
b_size,
seq_len,
0,
xs.device(),
)?)
}
};
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, &cos, &sin, attention_mask.as_ref())?;
}
let xs = xs.apply(&self.norm)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
pub struct PaddleOCRVLModel {
mlp_ar: Projector,
visual: SiglipVisionModel,
model: Ernie4_5Model,
pub cfg: PaddleOCRVLConfig,
lm_head: Linear,
rope_deltas: Option<Tensor>,
}
impl PaddleOCRVLModel {
pub fn new(cfg: PaddleOCRVLConfig, vb: VarBuilder) -> Result<Self> {
let mlp_ar = Projector::new(vb.pp("mlp_AR"), &cfg)?;
let visual = SiglipVisionModel::new(vb.pp("visual"), &cfg.vision_config)?;
let model = Ernie4_5Model::new(vb.pp("model"), &cfg)?;
let vocab_size = cfg.vocab_size;
let lm_head = if cfg.tie_word_embeddings {
Linear::new(model.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, vocab_size, vb.pp("lm_head"))?
};
Ok(Self {
mlp_ar,
visual,
model,
cfg,
lm_head,
rope_deltas: None,
})
}
pub fn get_rope_index(
&self,
input_ids: &Tensor,
image_grid_thw: Option<&Tensor>,
video_grid_thw: Option<&Tensor>,
mask: Option<&Tensor>,
second_per_grid_ts: Option<Vec<f32>>,
) -> Result<(Tensor, Tensor)> {
let spatial_merge_size = self.cfg.vision_config.spatial_merge_size;
let mut mrope_position_deltas: Vec<i64> = Vec::new();
if image_grid_thw.is_some() || video_grid_thw.is_some() {
let total_input_ids = input_ids.clone();
let mask_ = mask
.cloned()
.unwrap_or(Tensor::ones_like(&total_input_ids)?);
let mut position_ids = Tensor::ones(
(3, input_ids.dim(0)?, input_ids.dim(1)?),
input_ids.dtype(),
input_ids.device(),
)?;
let mut image_index = 0;
let mut video_index = 0;
for i in 0..total_input_ids.dim(0)? {
let mut input_ids_i = total_input_ids.i(i)?;
let mask_i = mask_.i(i)?;
// 推理时, attention_mask如果是全1向量,取非0索引的操作没必要
if mask_i.sum_all()?.to_scalar::<u32>()? != mask_i.dim(0)? as u32 {
let nonzero_idx = nonzero_index(&mask_i)?;
input_ids_i = input_ids_i.gather(&nonzero_idx, 0)?;
}
let mut text_start = 0;
let mut text_end = 0;
let mut thw = vec![];
let mut second_per_grid_t = 0_f32;
let mut llm_pos_ids_list: Vec<Tensor> = Vec::new();
// vision start的下一个索引
let vision_indices =
get_vision_next_indices(&input_ids_i, self.cfg.vision_start_token_id);
match vision_indices {
Ok(indeices) => {
let vision_tokens = input_ids_i.gather(&indeices, 0)?.to_vec1::<u32>()?;
let vision_indices_vec = indeices.to_vec1::<u32>()?;
for (j, &token) in vision_tokens.iter().enumerate() {
if token == self.cfg.image_token_id {
thw = image_grid_thw.unwrap().i(image_index)?.to_vec1::<u32>()?;
image_index += 1;
text_end = vision_indices_vec[j];
second_per_grid_t = 0.0;
}
if token == self.cfg.video_token_id {
thw = video_grid_thw.unwrap().i(video_index)?.to_vec1::<u32>()?;
text_end = vision_indices_vec[j];
second_per_grid_t = match second_per_grid_ts {
None => 1.0,
Some(ref vec) => vec[video_index],
};
video_index += 1;
}
let llm_grid_t = thw[0];
let llm_grid_h = thw[1] / spatial_merge_size as u32;
let llm_grid_w = thw[2] / spatial_merge_size as u32;
let text_len = text_end - text_start;
let start_idx = if !llm_pos_ids_list.is_empty() {
llm_pos_ids_list[llm_pos_ids_list.len() - 1]
.max_all()?
.to_scalar::<u32>()?
+ 1
} else {
0
};
let pos_ids = Tensor::arange(
start_idx,
start_idx + text_len,
input_ids_i.device(),
)?
.unsqueeze(0)?
.broadcast_as((3usize, text_len as usize))?;
llm_pos_ids_list.push(pos_ids);
let range_tensor = Tensor::arange(0, llm_grid_t, input_ids_i.device())?
.unsqueeze(D::Minus1)?;
let expanded_range = range_tensor.broadcast_as((
llm_grid_t as usize,
(llm_grid_h * llm_grid_w) as usize,
))?;
let time_tensor = expanded_range
.broadcast_mul(&Tensor::new(
vec![
(second_per_grid_t
* self.cfg.vision_config.tokens_per_second as f32)
as u32,
],
input_ids_i.device(),
)?)?
.broadcast_add(&Tensor::new(
vec![start_idx + text_len],
input_ids_i.device(),
)?)?;
let t_index = time_tensor.flatten_all()?;
let h_index = Tensor::arange(
start_idx + text_len,
start_idx + text_len + llm_grid_h,
input_ids_i.device(),
)?
.unsqueeze(0)?
.unsqueeze(D::Minus1)?
.broadcast_as((
llm_grid_t as usize,
llm_grid_h as usize,
llm_grid_w as usize,
))?
.flatten_all()?;
let w_index = Tensor::arange(
start_idx + text_len,
start_idx + text_len + llm_grid_w,
input_ids_i.device(),
)?
.unsqueeze(0)?
.unsqueeze(0)?
.broadcast_as((
llm_grid_t as usize,
llm_grid_h as usize,
llm_grid_w as usize,
))?
.flatten_all()?;
let thw_index = Tensor::stack(&[t_index, h_index, w_index], 0)?;
llm_pos_ids_list.push(thw_index);
text_start = text_end + llm_grid_t * llm_grid_h * llm_grid_w;
}
}
Err(e) => {
println!("get vision_indices err: {e}");
}
};
if text_start < input_ids_i.dim(0)? as u32 {
let start_idx = if !llm_pos_ids_list.is_empty() {
llm_pos_ids_list[llm_pos_ids_list.len() - 1]
.max_all()?
.to_scalar::<u32>()?
+ 1
} else {
0
};
let text_len = input_ids_i.dim(0)? as u32 - text_start;
let pos_ids =
Tensor::arange(start_idx, start_idx + text_len, input_ids_i.device())?
.unsqueeze(0)?
.broadcast_as((3usize, text_len as usize))?;
llm_pos_ids_list.push(pos_ids);
}
let llm_position = Tensor::cat(&llm_pos_ids_list, 1)?.reshape((3, 1, ()))?;
position_ids = position_ids
.slice_assign(&[(0..3), (i..i + 1), (0..input_ids.dim(1)?)], &llm_position)?;
let position_deltas = llm_position.max_all()?.to_scalar::<u32>()? as i64 + 1
- input_ids_i.dim(0)? as i64;
mrope_position_deltas.push(position_deltas);
}
let mut mrope_position_deltas = Tensor::new(mrope_position_deltas, input_ids.device())?;
if mrope_position_deltas.rank() == 1 {
mrope_position_deltas = mrope_position_deltas.unsqueeze(0)?;
}
Ok((position_ids.contiguous()?, mrope_position_deltas))
} else if let Some(mask) = mask {
let mut position_ids = mask
.to_dtype(candle_core::DType::F64)?
.cumsum(D::Minus1)?
.to_dtype(candle_core::DType::U32)?
.broadcast_sub(&Tensor::new(vec![1_u32], input_ids.device())?)?;
for i in 0..position_ids.dim(0)? {
let mut position_ids_i = position_ids.i(i)?;
let mask_i = mask.i(i)?;
// 如果有pad, 将填充位置置为1
// 当bs>1, 可能存在不同序列长度,需要添加pad使seq_len长度一致
if mask_i.sum_all()?.to_scalar::<u32>()? != mask_i.dim(0)? as u32 {
let zero_indices = zero_index(&mask_i)?;
let replace_1 = Tensor::ones(
zero_indices.dim(0)?,
candle_core::DType::U32,
input_ids.device(),
)?;
position_ids_i = position_ids_i
.scatter(&zero_indices, &replace_1, 0)?
.unsqueeze(0)?;
position_ids = position_ids
.slice_assign(&[(i..i + 1), (0..position_ids.dim(1)?)], &position_ids_i)?;
}
}
position_ids = position_ids
.unsqueeze(0)?
.broadcast_as((3, input_ids.dim(0)?, input_ids.dim(1)?))?
.contiguous()?;
let mut mrope_position_deltas = position_ids
.max(0)?
.max(D::Minus1)?
.broadcast_sub(&Tensor::new(
vec![mask.dim(D::Minus1)? as u32 - 1],
input_ids.device(),
)?)?
.contiguous()?;
if mrope_position_deltas.rank() == 1 {
mrope_position_deltas = mrope_position_deltas.unsqueeze(0)?;
}
Ok((position_ids, mrope_position_deltas))
} else {
let position_ids =
Tensor::arange(0_u32, input_ids.dim(D::Minus1)? as u32, input_ids.device())?
.unsqueeze(0)?
.unsqueeze(0)?
.broadcast_as((3, input_ids.dim(0)?, input_ids.dim(D::Minus1)?))?
.contiguous()?;
let mrope_position_deltas = Tensor::zeros(
(input_ids.dim(0)?, 1),
input_ids.dtype(),
input_ids.device(),
)?;
Ok((position_ids, mrope_position_deltas))
}
}
pub fn forward(
&mut self,
input_ids: &Tensor,
pixel_values: Option<&Tensor>,
image_grid_thw: Option<&Tensor>,
image_mask: &Tensor,
cache_position: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let mut inputs_embeds = self.model.embed_tokens.forward(input_ids)?;
if let Some(pixel_values) = pixel_values
&& let Some(image_grid_thw) = image_grid_thw
{
let pixel_values = pixel_values.unsqueeze(0)?;
let mut siglip_position_ids = vec![];
let mut sample_indices = vec![];
let mut cu_seqlens = vec![0u32];
let img_num = image_grid_thw.dim(0)?;
for i in 0..img_num {
let [t, h, w] = image_grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let numel = h * w;
let image_position_ids =
Tensor::arange(0, numel, pixel_values.device())?.repeat(t as usize)?;
siglip_position_ids.push(image_position_ids);
let indices =
Tensor::new(vec![i as u32; (numel * t) as usize], pixel_values.device())?;
sample_indices.push(indices);
cu_seqlens.push(cu_seqlens[cu_seqlens.len() - 1] + numel * t);
}
let siglip_position_ids = Tensor::cat(&siglip_position_ids, 0)?;
let image_embed =
self.visual
.forward(&pixel_values, image_grid_thw, &siglip_position_ids, true)?;
let image_embed = image_embed.squeeze(0)?;
let image_embed = self.mlp_ar.forward(&image_embed, image_grid_thw)?;
inputs_embeds = masked_scatter_dim0(&inputs_embeds, &image_embed, image_mask)?;
}
let position_ids;
let rope_deltas;
if (cache_position.is_some() && cache_position.unwrap().i(0)?.to_scalar::<u32>()? == 0)
|| self.rope_deltas.is_none()
{
(position_ids, rope_deltas) =
self.get_rope_index(input_ids, image_grid_thw, None, None, None)?;
self.rope_deltas = Some(rope_deltas);
} else {
let (bs, seq_len, _) = inputs_embeds.dims3()?;
let delta = if let Some(cache_position) = cache_position {
cache_position
.i(0)?
.to_dtype(self.rope_deltas.as_ref().unwrap().dtype())?
.broadcast_add(self.rope_deltas.as_ref().unwrap())?
.contiguous()?
.to_dtype(candle_core::DType::U32)?
} else {
Tensor::zeros(1, inputs_embeds.dtype(), inputs_embeds.device())?
};
position_ids = Tensor::arange(0u32, seq_len as u32, input_ids.device())?
.unsqueeze(0)?
.broadcast_as((bs, seq_len))?
.broadcast_add(&delta)?
.unsqueeze(0)?
.broadcast_as((3, bs, seq_len))?
.contiguous()?;
}
let outputs = self
.model
.forward(&inputs_embeds, seqlen_offset, Some(&position_ids))?;
let seq_len = outputs.dim(1)?;
let hidden_state = outputs.narrow(1, seq_len - 1, 1)?;
let logits = self.lm_head.forward(&hidden_state)?;
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
self.model.clear_kv_cache();
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/paddleocr_vl/mod.rs | src/models/paddleocr_vl/mod.rs | pub mod config;
pub mod generate;
pub mod model;
pub mod processor;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/paddleocr_vl/generate.rs | src/models/paddleocr_vl/generate.rs | use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Result, anyhow};
use candle_core::{D, DType, Device, IndexOp, Tensor};
use candle_nn::VarBuilder;
use rocket::async_stream::stream;
use rocket::futures::Stream;
use crate::models::paddleocr_vl::config::{PaddleOCRVLConfig, PaddleOCRVLPreprocessorConfig};
use crate::models::paddleocr_vl::model::PaddleOCRVLModel;
use crate::models::paddleocr_vl::processor::PaddleOCRVLProcessor;
use crate::utils::tensor_utils::get_equal_mask;
use crate::utils::{
build_completion_chunk_response, build_completion_response, find_type_files, get_device,
get_dtype, get_logit_processor,
};
use crate::{chat_template::ChatTemplate, models::GenerateModel, tokenizer::TokenizerModel};
pub struct PaddleOCRVLGenerateModel<'a> {
chat_template: ChatTemplate<'a>,
tokenizer: TokenizerModel,
pre_processor: PaddleOCRVLProcessor,
paddleocr_vl: PaddleOCRVLModel,
cfg: PaddleOCRVLConfig,
device: Device,
end_token_id: u32,
model_name: String,
}
impl<'a> PaddleOCRVLGenerateModel<'a> {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let chat_template = ChatTemplate::init(path)?;
let tokenizer = TokenizerModel::init(path)?;
let config_path = path.to_string() + "/config.json";
let cfg: PaddleOCRVLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
let device = &get_device(device);
let cfg_dtype = cfg.torch_dtype.as_str();
let dtype = get_dtype(dtype, cfg_dtype);
let processor_cfg_path = path.to_string() + "/preprocessor_config.json";
let processor_cfg: PaddleOCRVLPreprocessorConfig =
serde_json::from_slice(&std::fs::read(processor_cfg_path)?)?;
let pre_processor = PaddleOCRVLProcessor::new(processor_cfg, device, dtype)?;
let end_token_id = 2;
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, device)? };
let paddleocr_vl = PaddleOCRVLModel::new(cfg.clone(), vb)?;
Ok(PaddleOCRVLGenerateModel {
chat_template,
tokenizer,
pre_processor,
paddleocr_vl,
cfg,
device: device.clone(),
end_token_id,
model_name: "paddleocr_vl".to_string(),
})
}
}
impl<'a> GenerateModel for PaddleOCRVLGenerateModel<'a> {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let (replace_text, mut pixel_values, mut image_grid_thw) =
self.pre_processor.process_info(&mes, &mes_render)?;
let mut input_ids = self.tokenizer.text_encode(replace_text, &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let image_mask = get_equal_mask(&input_ids, self.cfg.image_token_id)?;
let mut cache_position = Tensor::ones_like(&input_ids.i(0)?)?
.to_dtype(candle_core::DType::F64)?
.cumsum(D::Minus1)?
.to_dtype(candle_core::DType::U32)?
.broadcast_sub(&Tensor::new(vec![1_u32], input_ids.device())?)?;
let mut generate = Vec::new();
let sample_len = mes.max_tokens.unwrap_or(1024);
for _ in 0..sample_len {
let logits = self.paddleocr_vl.forward(
&input_ids,
pixel_values.as_ref(),
image_grid_thw.as_ref(),
&image_mask,
Some(&cache_position),
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
generate.push(next_token);
if next_token == self.end_token_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
image_grid_thw = None;
}
let num_token = generate.len() as u32;
let res = self.tokenizer.token_decode(generate)?;
self.paddleocr_vl.clear_kv_cache();
let response = build_completion_response(res, &self.model_name, Some(num_token));
Ok(response)
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let (replace_text, pixel_values, image_grid_thw) =
self.pre_processor.process_info(&mes, &mes_render)?;
let mut input_ids = self.tokenizer.text_encode(replace_text, &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let image_mask = get_equal_mask(&input_ids, self.cfg.image_token_id)?;
let mut cache_position = Tensor::ones_like(&input_ids.i(0)?)?
.to_dtype(candle_core::DType::F64)?
.cumsum(D::Minus1)?
.to_dtype(candle_core::DType::U32)?
.broadcast_sub(&Tensor::new(vec![1_u32], input_ids.device())?)?;
let sample_len = mes.max_tokens.unwrap_or(1024);
let stream = stream! {
let mut error_tokens = Vec::new();
let mut pixel_values = pixel_values.as_ref();
let mut image_grid_thw = image_grid_thw.as_ref();
for _ in 0..sample_len {
let logits = self.paddleocr_vl.forward(
&input_ids,
pixel_values,
image_grid_thw,
&image_mask,
Some(&cache_position),
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
let mut decode_ids = Vec::new();
if !error_tokens.is_empty() {
decode_ids.extend_from_slice(&error_tokens);
}
decode_ids.push(next_token);
let decoded_token = self.tokenizer.token_decode(decode_ids).map_err(|e| anyhow!(format!("stream decode error{e}")))?;
if decoded_token.contains("�") {
error_tokens.push(next_token);
if error_tokens.len() > 3 {
error_tokens.clear();
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
image_grid_thw = None;
continue;
}
error_tokens.clear();
let chunk = build_completion_chunk_response(decoded_token, &self.model_name, None, None);
yield Ok(chunk);
if next_token == self.end_token_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
image_grid_thw = None;
}
self.paddleocr_vl.clear_kv_cache();
};
Ok(Box::new(Box::pin(stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen2_5vl/config.rs | src/models/qwen2_5vl/config.rs | use candle_nn::Activation;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VisionConfig {
pub depth: usize,
pub hidden_act: Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_heads: usize,
pub in_chans: usize,
pub out_hidden_size: usize,
pub patch_size: usize,
pub spatial_merge_size: usize,
pub spatial_patch_size: usize,
pub window_size: usize,
pub fullatt_block_indexes: Vec<usize>,
pub tokens_per_second: usize,
pub temporal_patch_size: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct RopeScaling {
pub r#type: String,
pub mrope_section: Vec<usize>,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Qwen2_5VLConfig {
pub attention_dropout: f32,
pub bos_token_id: u32,
pub eos_token_id: u32,
pub vision_start_token_id: usize,
pub vision_end_token_id: usize,
pub vision_token_id: usize,
pub image_token_id: usize,
pub video_token_id: usize,
pub hidden_act: Activation,
pub hidden_size: usize,
pub initializer_range: f32,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub max_window_layers: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub sliding_window: usize,
pub tie_word_embeddings: bool,
pub torch_dtype: String,
pub use_sliding_window: bool,
pub vision_config: VisionConfig,
pub rope_scaling: RopeScaling,
pub vocab_size: usize,
}
pub struct VisionSetting {
pub image_factor: u32,
pub min_pixels: u32,
pub max_pixels: u32,
pub max_ratio: u32,
pub temporal_patch_size: usize,
pub patch_size: usize,
pub merge_size: usize,
pub video_min_pixels: u32,
pub video_max_pixels: u32,
pub video_total_pixels: u32,
pub frame_factor: u32,
pub fps: f32,
pub fps_min_frames: u32,
pub fps_max_frames: u32,
pub image_mean: Vec<f32>,
pub image_std: Vec<f32>,
}
impl Default for VisionSetting {
fn default() -> Self {
Self {
image_factor: 28,
min_pixels: 4 * 28 * 28,
max_pixels: 16384 * 28 * 28,
// max_pixels: 1000 * 28 * 28,
max_ratio: 200,
temporal_patch_size: 2,
patch_size: 14,
merge_size: 2,
video_min_pixels: 128 * 28 * 28,
video_max_pixels: 768 * 28 * 28,
video_total_pixels: 24576 * 28 * 28,
frame_factor: 2,
fps: 2.0,
fps_min_frames: 4,
fps_max_frames: 768,
image_mean: vec![0.48145466_f32, 0.4578275f32, 0.40821073f32],
image_std: vec![0.26862954f32, 0.2613026f32, 0.2757771f32],
}
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen2_5vl/processor.rs | src/models/qwen2_5vl/processor.rs | use std::collections::HashMap;
use aha_openai_dive::v1::resources::chat::{
ChatCompletionParameters, ChatMessage, ChatMessageContent, ChatMessageContentPart,
};
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, IndexOp, Shape, Tensor};
#[cfg(feature = "ffmpeg")]
use ffmpeg_next as ffmpeg;
use image::DynamicImage;
use num::integer::lcm;
use crate::{
models::qwen2_5vl::config::VisionSetting,
utils::{
img_utils::get_image,
{ceil_by_factor, floor_by_factor, round_by_factor},
},
};
#[derive(Clone)]
pub struct VisionInput {
pub data: Tensor,
pub grid_thw: Tensor,
}
#[derive(Clone)]
pub struct GeneralInput {
pub replace_text: String,
pub pixel_values: Option<Tensor>,
pub image_grid_thw: Option<Tensor>,
pub pixel_values_video: Option<Tensor>,
pub video_grid_thw: Option<Tensor>,
pub second_per_grid_ts: Option<Vec<f32>>,
}
#[allow(unused)]
pub struct Qwen2_5VLProcessor {
vision_setting: VisionSetting,
device: Device,
dtype: DType,
image_token: String,
video_token: String,
}
impl Qwen2_5VLProcessor {
pub fn new(device: &Device, dtype: DType) -> Result<Self> {
let vision_setting = VisionSetting::default();
let image_token = "<|image_pad|>".to_string();
let video_token = "<|video_pad|>".to_string();
Ok(Self {
vision_setting,
device: device.clone(),
dtype,
image_token,
video_token,
})
}
pub fn extract_vision_info(
&self,
mes: &ChatCompletionParameters,
) -> Result<HashMap<String, Vec<String>>> {
let mut vision_map = HashMap::new();
vision_map.insert("image".to_string(), Vec::new());
vision_map.insert("video".to_string(), Vec::new());
for chat_mes in mes.messages.clone() {
if let ChatMessage::User { content, .. } = chat_mes
&& let ChatMessageContent::ContentPart(part_vec) = content
{
for part in part_vec {
if let ChatMessageContentPart::Image(img_part) = part {
let img_url = img_part.image_url;
vision_map.get_mut("image").unwrap().push(img_url.url);
// } else if let ChatMessageContentPart::Video(video_part) = part {
// let video_url = video_part.video_url;
// vision_map.get_mut("video").unwrap().push(video_url.url);
}
}
}
}
Ok(vision_map)
}
pub fn process_img(
&self,
img: &DynamicImage,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<Tensor> {
let img_h = img.height();
let img_w = img.width();
// h,w resize成 28的倍数
let (resize_h, resize_w) = smart_resize(img_h, img_w, &self.vision_setting, true, None)?;
let img = img.resize_exact(resize_w, resize_h, image::imageops::FilterType::CatmullRom);
let img_vec = img.to_rgb8().into_raw();
// (h, w, c) => (c, h, w)
let img_tensor = Tensor::from_slice(
&img_vec,
(resize_h as usize, resize_w as usize, 3),
&self.device,
)?
.permute((2, 0, 1))?
.to_dtype(self.dtype)?;
// 0-255 rescale to 0-1
let img_tensor = img_tensor.affine(1.0 / 255.0, 0.)?;
// normalize
let img_tensor = img_tensor.broadcast_sub(img_mean)?.broadcast_div(img_std)?;
// (c, h, w) => (1, c, h, w)
let img_tensor = img_tensor.unsqueeze(0)?;
Ok(img_tensor)
}
pub fn process_vision_tensor(&self, img_tensor: &Tensor) -> Result<(Tensor, Tensor)> {
// Check that data have `num_frames` divisible by `temporal_patch_size`
// img_tensor: (t, c, h, w)
let t = img_tensor.dim(0)?;
let img_tensor = if t % self.vision_setting.temporal_patch_size != 0 {
let repeat_num = self.vision_setting.temporal_patch_size
- t % self.vision_setting.temporal_patch_size;
let repeats = img_tensor.i(t - 1)?.repeat((repeat_num, 1, 1, 1))?;
Tensor::cat(&[img_tensor, &repeats], 0)?
} else {
img_tensor.clone()
};
let channel = img_tensor.dim(1)?;
let grid_t = img_tensor.dim(0)? / self.vision_setting.temporal_patch_size;
let grid_h = img_tensor.dim(2)? / self.vision_setting.patch_size;
let grid_w = img_tensor.dim(3)? / self.vision_setting.patch_size;
let shape = Shape::from(vec![
grid_t,
self.vision_setting.temporal_patch_size,
channel,
grid_h / self.vision_setting.merge_size,
self.vision_setting.merge_size,
self.vision_setting.patch_size,
grid_w / self.vision_setting.merge_size,
self.vision_setting.merge_size,
self.vision_setting.patch_size,
]);
let img_tensor = img_tensor.reshape(shape)?;
// shape to // grid_t,
// grid_h / merge_size,
// grid_w / merge_size,
// merge_size,
// merge_size,
// channel,
// temporal_patch_size,
// patch_size,
// patch_size,
let img_tensor = img_tensor.permute(vec![0, 3, 6, 4, 7, 2, 1, 5, 8])?;
let img_tensor = img_tensor
.reshape((
grid_t * grid_h * grid_w,
channel
* self.vision_setting.temporal_patch_size
* self.vision_setting.patch_size
* self.vision_setting.patch_size,
))?
.contiguous()?;
let grid_thw = Tensor::from_vec(
vec![grid_t as u32, grid_h as u32, grid_w as u32],
(1, 3),
&self.device,
)?;
Ok((img_tensor, grid_thw))
}
pub fn process_images(
&self,
imgs: Vec<DynamicImage>,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<VisionInput> {
let mut pixel_values_vec = Vec::new();
let mut vision_grid_thws_vec = Vec::new();
for img in imgs {
let img_tensor = self.process_img(&img, img_mean, img_std)?;
let img_tensor = Tensor::cat(&[&img_tensor, &img_tensor], 0)?.contiguous()?;
let (img_tensor, grid_thw) = self.process_vision_tensor(&img_tensor)?;
pixel_values_vec.push(img_tensor);
vision_grid_thws_vec.push(grid_thw);
}
let pixel_values = Tensor::cat(&pixel_values_vec, 0)?;
let vision_grid_thws = Tensor::cat(&vision_grid_thws_vec, 0)?;
Ok(VisionInput {
data: pixel_values,
grid_thw: vision_grid_thws,
})
}
pub fn process_videos(
&self,
data: Vec<Tensor>,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<VisionInput> {
let mut pixel_values_vec = Vec::new();
let mut vision_grid_thws_vec = Vec::new();
for single_video in data {
// 0-255 rescale to 0-1
let video_tensor = single_video.to_dtype(self.dtype)?.affine(1.0 / 255.0, 0.)?;
// normalize
let video_tensor = video_tensor
.broadcast_sub(img_mean)?
.broadcast_div(img_std)?
.contiguous()?;
let (video_tensor, video_grid_thw) = self.process_vision_tensor(&video_tensor)?;
pixel_values_vec.push(video_tensor);
vision_grid_thws_vec.push(video_grid_thw);
}
let pixel_values = Tensor::cat(&pixel_values_vec, 0)?.contiguous()?;
let vision_grid_thws = Tensor::cat(&vision_grid_thws_vec, 0)?.contiguous()?;
Ok(VisionInput {
data: pixel_values,
grid_thw: vision_grid_thws,
})
}
#[allow(unused_mut)]
pub fn process_info(
&self,
messages: &ChatCompletionParameters,
text: &str,
) -> Result<GeneralInput> {
let mut pixel_values = None;
let mut image_grid_thw = None;
let mut pixel_values_video = None;
let mut video_grid_thw: Option<Tensor> = None;
let mut second_per_grid_ts = None;
let vision_map = self.extract_vision_info(messages)?;
let img_mean =
Tensor::from_slice(&self.vision_setting.image_mean, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
let img_std = Tensor::from_slice(&self.vision_setting.image_std, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
for (key, vec) in vision_map {
// println!("key: {}, \nvalue: {:?}", key, vec);
if key.eq("image") {
let mut file_vec = Vec::new();
for file in &vec {
let image = get_image(file);
match image {
Ok(img) => file_vec.push(img),
Err(e) => println!("get_image err: {e:?}"),
};
}
if !file_vec.is_empty() {
let vision_input = self.process_images(file_vec, &img_mean, &img_std);
match vision_input {
Ok(img_input) => {
pixel_values = Some(img_input.data);
image_grid_thw = Some(img_input.grid_thw);
}
Err(e) => println!("img process_images err: {e:?}"),
};
}
}
#[cfg(feature = "ffmpeg")]
if key.eq("video") {
let mut file_vec = Vec::new();
for file in &vec {
let video_data = get_video_data(file, &self.vision_setting, &self.device);
match video_data {
Ok(tensor) => file_vec.push(tensor),
Err(e) => println!("get_video_data err: {:?}", e),
};
}
if !file_vec.is_empty() {
let vision_input = self.process_videos(file_vec, &img_mean, &img_std);
match vision_input {
Ok(video_input) => {
let video_num = video_input.grid_thw.dim(0)?;
pixel_values_video = Some(video_input.data);
video_grid_thw = Some(video_input.grid_thw);
let second_per_grid = vec![
self.vision_setting.temporal_patch_size
as f32
/ self.vision_setting.fps;
video_num
];
second_per_grid_ts = Some(second_per_grid);
}
Err(e) => println!("video process_videos err: {:?}", e),
};
}
}
}
let merge_length = self.vision_setting.merge_size.pow(2);
let mut text = text.to_string();
if let Some(ref image_grid_thw) = image_grid_thw {
let mut index = 0;
while text.contains(&self.image_token) {
let grid_i = image_grid_thw.i(index)?;
let repeat_num =
grid_i.to_vec1::<u32>()?.iter().product::<u32>() as usize / merge_length;
let replace = "<|placeholder|>".repeat(repeat_num);
text = text.replacen(&self.image_token, &replace, 1);
index += 1;
}
text = text.replace("<|placeholder|>", &self.image_token);
}
#[cfg(feature = "ffmpeg")]
if let Some(ref video_grid_thw) = video_grid_thw {
let mut index = 0;
while text.contains(&self.video_token) {
let grid_i = video_grid_thw.i(index)?;
let repeat_num =
grid_i.to_vec1::<u32>()?.iter().product::<u32>() as usize / merge_length;
let replace = "<|placeholder|>".repeat(repeat_num);
text = text.replacen(&self.video_token, &replace, 1);
index += 1;
}
text = text.replace("<|placeholder|>", &self.video_token);
}
let input = GeneralInput {
replace_text: text,
pixel_values,
image_grid_thw,
pixel_values_video,
video_grid_thw,
second_per_grid_ts,
};
Ok(input)
}
}
pub fn smart_resize(
img_h: u32,
img_w: u32,
vision_setting: &VisionSetting,
is_img: bool,
video_ratio: Option<u32>,
) -> Result<(u32, u32)> {
if std::cmp::max(img_h, img_w) / std::cmp::min(img_h, img_w) > vision_setting.max_ratio {
return Err(anyhow!(format!(
"absolute aspect ratio mush be smaller than {}, got {}",
vision_setting.max_ratio,
std::cmp::max(img_h, img_w) / std::cmp::min(img_h, img_w)
)));
}
let mut image_factor = vision_setting.image_factor;
if let Some(ratio) = video_ratio {
image_factor = lcm(image_factor, ratio);
}
let mut h_bar = std::cmp::max(image_factor, round_by_factor(img_h, image_factor));
let mut w_bar = std::cmp::max(image_factor, round_by_factor(img_w, image_factor));
let (min_pixels, max_pixels) = if is_img {
(vision_setting.min_pixels, vision_setting.max_pixels)
} else {
(
vision_setting.video_min_pixels,
vision_setting.video_max_pixels,
)
};
if h_bar * w_bar > max_pixels {
let beta = ((img_h * img_w) as f32 / max_pixels as f32).sqrt();
h_bar = floor_by_factor(img_h as f32 / beta, image_factor);
w_bar = floor_by_factor(img_w as f32 / beta, image_factor);
} else if h_bar * w_bar < min_pixels {
let beta = (min_pixels as f32 / (img_h * img_w) as f32).sqrt();
h_bar = ceil_by_factor(img_h as f32 * beta, image_factor);
w_bar = ceil_by_factor(img_w as f32 * beta, image_factor);
}
Ok((h_bar, w_bar))
}
#[cfg(feature = "ffmpeg")]
pub fn get_video_data(
file: &String,
vision_setting: &VisionSetting,
device: &Device,
) -> Result<Tensor> {
ffmpeg::init().map_err(|e| anyhow!(format!("Failed to initialize ffmpeg: {}", e)))?;
let mut ictx = ffmpeg::format::input(&file)
.map_err(|e| anyhow!(format!("Failed to open video file: {}", e)))?;
let input = ictx
.streams()
.best(ffmpeg::media::Type::Video)
.ok_or_else(|| anyhow!(format!("No video stream found")))?;
let video_stream_index = input.index();
let context_decoder = ffmpeg::codec::context::Context::from_parameters(input.parameters())
.map_err(|e| anyhow!(format!("Failed to crate decoder context: {}", e)))?;
let mut decoder = context_decoder
.decoder()
.video()
.map_err(|e| anyhow!(format!("Failed to decoder video: {}", e)))?;
let video_h = decoder.height();
let video_w = decoder.width();
let format = decoder.format();
let frames = input.frames();
let rate = (input.rate().0 as f32 / input.rate().1 as f32).round() as u32;
// 1s取两帧
let min_frames = ceil_by_factor(
vision_setting.fps_min_frames as f32,
vision_setting.frame_factor,
);
let max_frames = floor_by_factor(
vision_setting.fps_max_frames as f32,
vision_setting.frame_factor,
);
let nframes = (frames as f32 / rate as f32 * vision_setting.fps) as u32;
let nframes = std::cmp::min(std::cmp::max(nframes, min_frames), max_frames);
let nframes = round_by_factor(nframes, vision_setting.frame_factor);
let sample_interval = (frames as f32 / nframes as f32).round() as u32;
let mut frame_id = 0_u32;
// 图片帧使用scaler reshape的时候需要保证宽高是16的倍数,不然reshape出来的是损坏的图片
// 所以计算resize的目标宽高时,需要用16和image_factor的最小公倍数
let (resize_h, resize_w) = smart_resize(video_h, video_w, vision_setting, false, Some(16))?;
let mut scaler = ffmpeg::software::scaling::context::Context::get(
format,
video_w,
video_h,
ffmpeg::format::Pixel::RGB24,
resize_w,
resize_h,
ffmpeg::software::scaling::flag::Flags::BILINEAR
| ffmpeg::software::scaling::flag::Flags::ACCURATE_RND,
)
.map_err(|e| anyhow!(format!("Failed to crate scaler: {}", e)))?;
let mut frames_vec = Vec::new();
let mut receive_and_process_decoded_frames =
|decoder: &mut ffmpeg::decoder::Video| -> Result<()> {
let mut decoded = ffmpeg::frame::Video::empty();
while decoder.receive_frame(&mut decoded).is_ok() {
if frame_id.is_multiple_of(sample_interval) {
let mut rgb_frame = ffmpeg::frame::Video::empty();
scaler
.run(&decoded, &mut rgb_frame)
.map_err(|e| anyhow!(format!("Failed to scaler run decoded: {}", e)))?;
// save_file(&rgb_frame, frame_id as usize);
let frame_data = rgb_frame.data(0);
let frame_tensor = Tensor::from_slice(
frame_data,
(resize_h as usize, resize_w as usize, 3),
device,
)?
.permute((2, 0, 1))?;
frames_vec.push(frame_tensor);
}
frame_id += 1;
}
Ok(())
};
for (stream, packet) in ictx.packets() {
if stream.index() == video_stream_index {
decoder
.send_packet(&packet)
.map_err(|e| anyhow!(format!("Failed to send packet: {}", e)))?;
receive_and_process_decoded_frames(&mut decoder)?;
}
}
decoder
.send_eof()
.map_err(|e| anyhow!(format!("Failed to decoder.send_eof(): {}", e)))?;
receive_and_process_decoded_frames(&mut decoder)?;
if frames_vec.is_empty() {
return Err(anyhow!("No frames extracted from video".to_string()));
}
// (t, c, h, w)
let frames_tensor = Tensor::stack(&frames_vec, 0)?.contiguous()?;
Ok(frames_tensor)
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen2_5vl/model.rs | src/models/qwen2_5vl/model.rs | use anyhow::{Result, anyhow};
use candle_core::{D, DType, Device, IndexOp, Tensor};
use candle_nn::{Init, Linear, Module, RmsNorm, VarBuilder, linear, linear_no_bias, rms_norm};
use crate::{
models::{
common::{GateUpDownMLP, eager_attention_forward},
qwen2_5vl::config::{Qwen2_5VLConfig, RopeScaling},
},
position_embed::rope::{
Qwen2_5VLTextRotaryEmbedding, Qwen2_5VisionRotaryEmbedding, apply_rotary_pos_emb,
apply_rotary_pos_emb_vision,
},
utils::tensor_utils::{
get_equal_mask, get_vision_next_indices, masked_scatter_dim0, nonzero_index,
prepare_causal_attention_mask, safe_arg_sort_last_dim, zero_index,
},
};
pub struct Qwen2_5VisionPatchEmbed {
conv3d_weight: Tensor,
}
impl Qwen2_5VisionPatchEmbed {
pub fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let patch_size = cfg.vision_config.patch_size;
let temporal_patch_size = cfg.vision_config.temporal_patch_size;
let in_channels = cfg.vision_config.in_chans;
let embed_dim = cfg.vision_config.hidden_size;
// conv3d weight key: visual.patch_embed.proj.weight, value: Tensor[dims 1280, 3, 2, 14, 14; bf16, cuda:0]
// (1280, 3, 2, 14, 14) -> (1280, 1176) -> (1176, 1280)
let conv3d_weight = vb
.get_with_hints(
(
embed_dim,
in_channels,
temporal_patch_size,
patch_size,
patch_size,
),
"proj.weight",
Init::Const(1.),
)?
.flatten(1, 4)?
.t()?;
Ok(Self { conv3d_weight })
}
pub fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
// hidden_states shape: (grid_t*grid_h*grid_w, c*temporal_patch_size*patch_size*patch_size)
// ((), 1176) matmul (1176, 1280) -> ((), 1280)
let hidden_states = hidden_states.matmul(&self.conv3d_weight)?;
Ok(hidden_states)
}
}
pub struct Qwen2_5VLPatchMerger {
hidden_size: usize,
ln_q: RmsNorm,
mlp_0: Linear,
mlp_2: Linear,
}
impl Qwen2_5VLPatchMerger {
pub fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let hidden_size =
cfg.vision_config.hidden_size * (cfg.vision_config.spatial_merge_size.pow(2));
let ln_q = rms_norm(
cfg.vision_config.hidden_size,
cfg.rms_norm_eps,
vb.pp("ln_q"),
)?;
let mlp_0 = linear(hidden_size, hidden_size, vb.pp("mlp.0"))?;
let mlp_2 = linear(
hidden_size,
cfg.vision_config.out_hidden_size,
vb.pp("mlp.2"),
)?;
Ok(Self {
hidden_size,
ln_q,
mlp_0,
mlp_2,
})
}
}
impl Module for Qwen2_5VLPatchMerger {
fn forward(&self, xs: &Tensor) -> candle_core::Result<Tensor> {
let xs = xs.apply(&self.ln_q)?.reshape(((), self.hidden_size))?;
let xs = xs.apply(&self.mlp_0)?.gelu()?.apply(&self.mlp_2)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct Qwen2_5VLVisionAttention {
qkv: Linear,
proj: Linear,
num_heads: usize,
// scale: Tensor,
scale: f64,
}
impl Qwen2_5VLVisionAttention {
fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.vision_config.hidden_size;
let num_heads = cfg.vision_config.num_heads;
let head_dim = hidden_size / num_heads;
let qkv = linear(hidden_size, hidden_size * 3, vb.pp("qkv"))?;
let proj = linear(hidden_size, hidden_size, vb.pp("proj"))?;
// let scale = Tensor::new(vec![1f32 / (head_dim as f32).sqrt()], vb.device())?
// .to_dtype(vb.dtype())?;
let scale = 1f64 / (head_dim as f64).sqrt();
Ok(Self {
qkv,
proj,
num_heads,
scale,
})
}
fn forward(
&self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
// xs: (seq_len, hidden_size)
let seq_length = xs.dim(0)?;
// (seq_len, hidden_size) -> (seq_len, hidden_size*3)
// -> (seq_len, 3, num_heads, head_dim) -> (3, seq_len, num_heads, head_dim)
let qkv_states = xs
.apply(&self.qkv)?
.reshape((seq_length, 3, self.num_heads, ()))?
.permute((1, 0, 2, 3))?;
// (seq_len, num_heads, head_dim)
let query_states = qkv_states.i(0)?.contiguous()?;
let key_states = qkv_states.i(1)?.contiguous()?;
let value_states = qkv_states.i(2)?.contiguous()?;
let (query_states, key_states) =
apply_rotary_pos_emb_vision(&query_states, &key_states, cos, sin)?;
//(seq_len, num_heads, head_dim) -> (num_heads, seq_len, head_dim) -> (1, num_heads, seq_len, head_dim)
let query_states = query_states.transpose(0, 1)?.unsqueeze(0)?.contiguous()?;
let key_states = key_states.transpose(0, 1)?.unsqueeze(0)?.contiguous()?;
let value_states = value_states.transpose(0, 1)?.unsqueeze(0)?.contiguous()?;
let attn_output = eager_attention_forward(
&query_states,
&key_states,
&value_states,
None,
Some(attention_mask),
self.scale,
)?;
//(1, seq_len, n_head, dim) -> (seq_len, n_head, dim)
let attn_output = attn_output
.squeeze(0)?
.reshape((seq_length, ()))?
.contiguous()?;
let attn_ouput = attn_output.apply(&self.proj)?;
Ok(attn_ouput)
}
}
#[derive(Debug, Clone)]
struct Qwen2_5VLVisionBlock {
attn: Qwen2_5VLVisionAttention,
mlp: GateUpDownMLP,
norm1: RmsNorm,
norm2: RmsNorm,
}
impl Qwen2_5VLVisionBlock {
fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let attn = Qwen2_5VLVisionAttention::new(cfg, vb.pp("attn"))?;
let mlp = GateUpDownMLP::new(
vb.pp("mlp"),
cfg.vision_config.hidden_size,
cfg.vision_config.intermediate_size,
cfg.vision_config.hidden_act,
true,
)?;
let norm1 = rms_norm(
cfg.vision_config.hidden_size,
cfg.rms_norm_eps,
vb.pp("norm1"),
)?;
let norm2 = rms_norm(
cfg.vision_config.hidden_size,
cfg.rms_norm_eps,
vb.pp("norm2"),
)?;
Ok(Self {
attn,
mlp,
norm1,
norm2,
})
}
fn forward(
&self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let residual = xs;
let xs = self.norm1.forward(xs)?;
let xs = self.attn.forward(&xs, cos, sin, attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.mlp)?;
let xs = (residual + xs)?;
Ok(xs)
}
}
pub struct Qwen2_5VLVisionModel {
spatial_merge_size: usize,
patch_size: usize,
fullatt_block_indexes: Vec<usize>,
window_size: usize,
spatial_merge_unit: usize,
patch_embed: Qwen2_5VisionPatchEmbed,
rotary_pos_emb: Qwen2_5VisionRotaryEmbedding,
blocks: Vec<Qwen2_5VLVisionBlock>,
merger: Qwen2_5VLPatchMerger,
dtype: DType,
}
impl Qwen2_5VLVisionModel {
pub fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let spatial_merge_size = cfg.vision_config.spatial_merge_size;
let patch_size = cfg.vision_config.patch_size;
let fullatt_block_indexes = cfg.vision_config.fullatt_block_indexes.clone();
let window_size = cfg.vision_config.window_size;
let spatial_merge_unit = spatial_merge_size * spatial_merge_size;
let head_dim = cfg.vision_config.hidden_size / cfg.vision_config.num_heads;
let patch_embed = Qwen2_5VisionPatchEmbed::new(cfg, vb.pp("patch_embed"))?;
let rotary_pos_emb = Qwen2_5VisionRotaryEmbedding::new(head_dim / 2, None);
let mut blocks = Vec::new();
let vb_blocks = vb.pp("blocks");
for i in 0..cfg.vision_config.depth {
let block = Qwen2_5VLVisionBlock::new(cfg, vb_blocks.pp(i))?;
blocks.push(block);
}
let merger = Qwen2_5VLPatchMerger::new(cfg, vb.pp("merger"))?;
let dtype = vb.dtype();
Ok(Self {
spatial_merge_size,
patch_size,
fullatt_block_indexes,
window_size,
spatial_merge_unit,
patch_embed,
rotary_pos_emb,
blocks,
merger,
dtype,
})
}
pub fn rot_pos_emb(&self, grid_thw: &Tensor) -> Result<Tensor> {
let mut pos_ids = Vec::new();
for i in 0..grid_thw.dim(0)? {
let [t, h, w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
// hpos_ids shape (h, w)
let hpos_ids = Tensor::arange(0, h, grid_thw.device())?
.unsqueeze(1)?
.expand((h as usize, w as usize))?;
let hpos_ids = hpos_ids.reshape((
h as usize / self.spatial_merge_size,
self.spatial_merge_size,
w as usize / self.spatial_merge_size,
self.spatial_merge_size,
))?;
let hpos_ids = hpos_ids.permute((0, 2, 1, 3))?.flatten_all()?;
let wpos_ids = Tensor::arange(0, w, grid_thw.device())?
.unsqueeze(0)?
.expand((h as usize, w as usize))?;
let wpos_ids = wpos_ids.reshape((
h as usize / self.spatial_merge_size,
self.spatial_merge_size,
w as usize / self.spatial_merge_size,
self.spatial_merge_size,
))?;
let wpos_ids = wpos_ids.permute((0, 2, 1, 3))?.flatten_all()?;
// thw_pos_ids shape (h*w, 2)
let thw_pos_ids =
Tensor::stack(&[&hpos_ids, &wpos_ids], D::Minus1)?.repeat((t as usize, 1))?;
pos_ids.push(thw_pos_ids);
}
let pos_ids = Tensor::cat(&pos_ids, 0)?.contiguous()?;
let max_grid_size = grid_thw.i((.., 1..))?.max_all()?.to_scalar::<u32>()?;
let rotary_pos_emb_full = self
.rotary_pos_emb
.forward(max_grid_size as usize, grid_thw.device())?;
// contiguous()一定要加!!!很重要!!!!,不然index_select出来的是错的
// 找错找了半天,都是泪啊,做维度索引操作后contiguous顺手写上总没错
// 第一列是h维度的索引
let pos_ids_h = pos_ids.i((.., 0))?.contiguous()?;
// 第二列是w维度的索引
let pos_ids_w = pos_ids.i((.., 1))?.contiguous()?;
let rotary_pos_emb_h = rotary_pos_emb_full.index_select(&pos_ids_h, 0)?;
let rotary_pos_emb_w = rotary_pos_emb_full.index_select(&pos_ids_w, 0)?;
// 每个patch融合h索引和w索引两个的位置编码信息
let rotary_pos_emb = Tensor::cat(&[rotary_pos_emb_h, rotary_pos_emb_w], 1)?.contiguous()?;
Ok(rotary_pos_emb)
}
pub fn get_window_index(&self, grid_thw: &Tensor) -> Result<(Tensor, Tensor)> {
let mut window_index = Vec::new();
let mut cu_window_seqlens = vec![0];
let mut window_index_id = 0_i64;
let vit_merger_window_size =
(self.window_size / self.spatial_merge_size / self.patch_size) as u32;
for i in 0..grid_thw.dim(0)? {
let [grid_t, grid_h, grid_w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let llm_grid_h = grid_h / self.spatial_merge_size as u32;
let llm_grid_w = grid_w / self.spatial_merge_size as u32;
// 因为后续需要使用-100来做填充,所以需要int类型
// candle好像不支持i32, DType里面都没有定义i32, 所以这里使用i64
let mut index = Tensor::arange(
window_index_id,
window_index_id + (grid_t * llm_grid_h * llm_grid_w) as i64,
grid_thw.device(),
)?
.reshape((grid_t as usize, llm_grid_h as usize, llm_grid_w as usize))?
.contiguous()?;
// python transformers 中实现如下
// let pad_h = (vit_merger_window_size - llm_grid_h % vit_merger_window_size);
// 后面加上 % vit_merger_window_size,保证llm_grid_h能整除vit_merger_window_size时不需要pad
// 按理说能整除应该是不需要pad的,transformers中这样实现不知道是不是有什么其他原因
let pad_h = (vit_merger_window_size - llm_grid_h % vit_merger_window_size)
% vit_merger_window_size;
let pad_w = (vit_merger_window_size - llm_grid_w % vit_merger_window_size)
% vit_merger_window_size;
let num_window_h = (llm_grid_h + pad_h) / vit_merger_window_size;
let num_window_w = (llm_grid_w + pad_w) / vit_merger_window_size;
if pad_h > 0 {
let pad_h_t = Tensor::new(vec![-100_i64], grid_thw.device())?
.broadcast_as((grid_t as usize, pad_h as usize, llm_grid_w as usize))?
.contiguous()?;
index = Tensor::cat(&[&index, &pad_h_t], 1)?;
}
if pad_w > 0 {
let pad_w_t = Tensor::new(vec![-100_i64], grid_thw.device())?
.broadcast_as((
grid_t as usize,
(llm_grid_h + pad_h) as usize,
pad_w as usize,
))?
.contiguous()?;
index = Tensor::cat(&[&index, &pad_w_t], 2)?;
}
let index_padded = index
.reshape((
grid_t as usize,
num_window_h as usize,
vit_merger_window_size as usize,
num_window_w as usize,
vit_merger_window_size as usize,
))?
.permute((0, 1, 3, 2, 4))?;
let index_padded = index_padded
.reshape((
grid_t as usize,
(num_window_h * num_window_w) as usize,
vit_merger_window_size as usize,
vit_merger_window_size as usize,
))?
.contiguous()?;
let is_pad = Tensor::new(vec![-100_i64], grid_thw.device())?;
let seqlens = index_padded
.broadcast_ne(&is_pad)?
.sum((2, 3))?
.flatten_all()?;
let index_padded = index_padded.flatten_all()?;
let not_pad = index_padded.broadcast_ne(&is_pad)?.to_vec1::<u8>()?;
let indices: Vec<u32> = not_pad
.iter()
.enumerate()
.filter_map(|(idx, &val)| if val != 0 { Some(idx as u32) } else { None })
.collect();
let indices_tensor = Tensor::from_slice(&indices, indices.len(), grid_thw.device())?;
let index_new = index_padded.gather(&indices_tensor, 0)?;
window_index.push(index_new);
let seq_len_last = cu_window_seqlens[cu_window_seqlens.len() - 1];
// cumsum方法i64类型执行会报错,先转成F64计算后再转回i64
let cu_seqlens_tmp = seqlens
.to_dtype(candle_core::DType::F64)?
.cumsum(0)?
.to_dtype(candle_core::DType::I64)?
.broadcast_mul(&Tensor::new(
vec![self.spatial_merge_unit as i64],
grid_thw.device(),
)?)?
.broadcast_add(&Tensor::new(vec![seq_len_last], grid_thw.device())?)?;
cu_window_seqlens.extend_from_slice(&cu_seqlens_tmp.to_vec1::<i64>()?);
window_index_id += (grid_t * llm_grid_h * llm_grid_w) as i64;
}
let window_index_tensor = Tensor::cat(&window_index, 0)?;
let cu_window_seqlens_tensor = Tensor::from_slice(
&cu_window_seqlens,
cu_window_seqlens.len(),
grid_thw.device(),
)?
.to_dtype(candle_core::DType::U32)?;
Ok((window_index_tensor, cu_window_seqlens_tensor))
}
pub fn get_attention_mask(
&self,
cu_seqlens: &Tensor,
seq_len: usize,
device: &Device,
dtype: DType,
) -> Result<Tensor> {
let mut attention_mask =
Tensor::new(f32::NEG_INFINITY, device)?.broadcast_as((1, seq_len, seq_len))?;
for i in 1..cu_seqlens.dim(0)? {
let start = cu_seqlens.i(i - 1)?.to_scalar::<u32>()? as usize;
let end = cu_seqlens.i(i)?.to_scalar::<u32>()? as usize;
let block_size = end - start;
let zeros =
Tensor::zeros((1, block_size, block_size), candle_core::DType::F32, device)?;
attention_mask =
attention_mask.slice_assign(&[(0..1), (start..end), (start..end)], &zeros)?;
}
let attention_mask = attention_mask.to_dtype(dtype)?.contiguous()?;
Ok(attention_mask)
}
pub fn forward(&self, hidden_states: &Tensor, grid_thw: &Tensor) -> Result<Tensor> {
// hidden_states: (seq_len, hidden_size)
// grid_thw: (num_images_or_videos, 3), temporal, height, width
let hidden_states = hidden_states.to_dtype(self.dtype)?;
let hidden_states = self.patch_embed.forward(&hidden_states)?;
let rotary_pos_emb = self.rot_pos_emb(grid_thw)?;
let (window_index, cu_window_seqlens) = self.get_window_index(grid_thw)?;
let seq_len = hidden_states.dim(0)?;
let hidden_states = hidden_states
.reshape((
seq_len / self.spatial_merge_unit,
self.spatial_merge_unit,
(),
))?
.contiguous()?;
let hidden_states = hidden_states.index_select(&window_index, 0)?;
let mut hidden_states = hidden_states.reshape((seq_len, ()))?;
let rotary_pos_emb = rotary_pos_emb.reshape((
seq_len / self.spatial_merge_unit,
self.spatial_merge_unit,
(),
))?;
let rotary_pos_emb = rotary_pos_emb.index_select(&window_index, 0)?;
let rotary_pos_emb = rotary_pos_emb.reshape((seq_len, ()))?;
let emb = Tensor::cat(&[&rotary_pos_emb, &rotary_pos_emb], D::Minus1)?;
let cos = emb.cos()?.to_dtype(hidden_states.dtype())?;
let sin = emb.sin()?.to_dtype(hidden_states.dtype())?;
let cu_seqlens = grid_thw.i((.., 1))?.mul(&grid_thw.i((.., 2))?)?;
let grid_t = grid_thw.i((.., 0))?.to_vec1::<u32>()?;
let mut cu_seqlens_repeat = Vec::new();
for (index, t) in grid_t.iter().enumerate() {
cu_seqlens_repeat.push(cu_seqlens.i(index)?.repeat(*t as usize)?);
}
let cu_seqlens_full = Tensor::cat(&cu_seqlens_repeat, 0)?.flatten_all()?;
let cu_seqlens = cu_seqlens_full
.to_dtype(DType::F64)?
.cumsum(0)?
.to_dtype(DType::U32)?;
let pad_zero = Tensor::from_vec(vec![0_u32], 1, hidden_states.device())?;
let cu_seqlens = Tensor::cat(&[&pad_zero, &cu_seqlens], D::Minus1)?;
let attention_mask_window = self.get_attention_mask(
&cu_window_seqlens,
seq_len,
hidden_states.device(),
hidden_states.dtype(),
)?;
let attention_mask_full = self.get_attention_mask(
&cu_seqlens,
seq_len,
hidden_states.device(),
hidden_states.dtype(),
)?;
let mut attention_mask;
for (layer_num, block) in self.blocks.iter().enumerate() {
if self.fullatt_block_indexes.contains(&layer_num) {
attention_mask = attention_mask_full.clone();
} else {
attention_mask = attention_mask_window.clone();
}
hidden_states = block.forward(&hidden_states, &cos, &sin, &attention_mask)?;
}
let hidden_states = self.merger.forward(&hidden_states)?;
let reverse_indices = safe_arg_sort_last_dim(&window_index, true)?;
let hidden_states = hidden_states.index_select(&reverse_indices, 0)?;
Ok(hidden_states)
}
}
#[derive(Debug, Clone)]
struct Qwen2_5VLTextAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Qwen2_5VLTextAttention {
fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_size / num_heads;
let q_proj = linear(hidden_size, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_size, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_size, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(hidden_size, hidden_size, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
apply_rotary_pos_emb(&query_states, &key_states, cos, sin, false)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_output = eager_attention_forward(
&query_states,
&key_states,
&value_states,
Some(self.num_kv_groups),
attention_mask,
scale,
)?;
let attn_output = attn_output.reshape((b_sz, q_len, self.hidden_size))?;
let attn_output = attn_output.apply(&self.o_proj)?;
Ok(attn_output)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Qwen2_5VLTextDecoderLayer {
self_attn: Qwen2_5VLTextAttention,
mlp: GateUpDownMLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl Qwen2_5VLTextDecoderLayer {
fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let self_attn = Qwen2_5VLTextAttention::new(cfg, vb.pp("self_attn"))?;
let mlp = GateUpDownMLP::new(
vb.pp("mlp"),
cfg.hidden_size,
cfg.intermediate_size,
cfg.hidden_act,
false,
)?;
let input_layernorm =
rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, cos, sin, attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
let xs = (residual + xs)?;
Ok(xs)
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Qwen2_5VLTextModel {
pub embed_tokens: candle_nn::Embedding,
layers: Vec<Qwen2_5VLTextDecoderLayer>,
norm: RmsNorm,
rotary_emb: Qwen2_5VLTextRotaryEmbedding,
dtype: DType,
rope_scaling: RopeScaling,
}
impl Qwen2_5VLTextModel {
pub fn new(cfg: &Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
let rotary_emb = Qwen2_5VLTextRotaryEmbedding::new(head_dim, cfg.rope_theta);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = Qwen2_5VLTextDecoderLayer::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("norm"))?;
let rope_scaling = cfg.rope_scaling.clone();
Ok(Self {
embed_tokens,
layers,
norm,
rotary_emb,
dtype: vb.dtype(),
rope_scaling,
})
}
pub fn forward(
&mut self,
inputs_embeds: &Tensor,
seqlen_offset: usize,
position_ids: Option<&Tensor>,
) -> Result<Tensor> {
let (b_size, seq_len, _) = inputs_embeds.dims3()?;
let position_ids = match position_ids {
Some(ids) => ids.clone(),
None => Tensor::arange(
seqlen_offset as u32,
(seq_len + seqlen_offset) as u32,
inputs_embeds.device(),
)?
.unsqueeze(0)?
.unsqueeze(0)?
.broadcast_as((3, b_size, seq_len))?,
};
let (cos, sin) = self.rotary_emb.forward(
&position_ids,
self.dtype,
self.rope_scaling.mrope_section.clone(),
)?;
let mut xs = inputs_embeds.clone();
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
b_size,
seq_len,
0,
xs.device(),
)?)
}
};
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, &cos, &sin, attention_mask.as_ref())?;
}
let xs = xs.apply(&self.norm)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
pub struct Qwen2_5VLModel {
visual: Qwen2_5VLVisionModel,
model: Qwen2_5VLTextModel,
pub cfg: Qwen2_5VLConfig,
lm_head: Linear,
rope_deltas: Option<Tensor>,
}
impl Qwen2_5VLModel {
pub fn new(cfg: Qwen2_5VLConfig, vb: VarBuilder) -> Result<Self> {
let visual = Qwen2_5VLVisionModel::new(&cfg, vb.pp("visual"))?;
let model = Qwen2_5VLTextModel::new(&cfg, vb.pp("model"))?;
let vocab_size = cfg.vocab_size;
let lm_head = if cfg.tie_word_embeddings {
Linear::new(model.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, vocab_size, vb.pp("lm_head"))?
};
Ok(Self {
visual,
model,
cfg,
lm_head,
rope_deltas: None,
})
}
pub fn get_rope_index(
&self,
input_ids: &Tensor,
image_grid_thw: Option<&Tensor>,
video_grid_thw: Option<&Tensor>,
mask: Option<&Tensor>,
second_per_grid_ts: Option<Vec<f32>>,
) -> Result<(Tensor, Tensor)> {
let spatial_merge_size = self.cfg.vision_config.spatial_merge_size;
let mut mrope_position_deltas: Vec<i64> = Vec::new();
if image_grid_thw.is_some() || video_grid_thw.is_some() {
let total_input_ids = input_ids.clone();
let mask_ = mask
.cloned()
.unwrap_or(Tensor::ones_like(&total_input_ids)?);
let mut position_ids = Tensor::ones(
(3, input_ids.dim(0)?, input_ids.dim(1)?),
input_ids.dtype(),
input_ids.device(),
)?;
let mut image_index = 0;
let mut video_index = 0;
for i in 0..total_input_ids.dim(0)? {
let mut input_ids_i = total_input_ids.i(i)?;
let mask_i = mask_.i(i)?;
// 推理时, attention_mask如果是全1向量,取非0索引的操作没必要
if mask_i.sum_all()?.to_scalar::<u32>()? != mask_i.dim(0)? as u32 {
let nonzero_idx = nonzero_index(&mask_i)?;
input_ids_i = input_ids_i.gather(&nonzero_idx, 0)?;
}
let mut text_start = 0;
let mut text_end = 0;
let mut thw = vec![];
let mut second_per_grid_t = 0_f32;
let mut llm_pos_ids_list: Vec<Tensor> = Vec::new();
// vision start的下一个索引
let vision_indices =
get_vision_next_indices(&input_ids_i, self.cfg.vision_start_token_id as u32);
match vision_indices {
Ok(indeices) => {
let vision_tokens = input_ids_i.gather(&indeices, 0)?.to_vec1::<u32>()?;
let vision_indices_vec = indeices.to_vec1::<u32>()?;
for (j, &token) in vision_tokens.iter().enumerate() {
if token == self.cfg.image_token_id as u32 {
thw = image_grid_thw.unwrap().i(image_index)?.to_vec1::<u32>()?;
image_index += 1;
text_end = vision_indices_vec[j];
second_per_grid_t = 0.0;
}
if token == self.cfg.video_token_id as u32 {
thw = video_grid_thw.unwrap().i(video_index)?.to_vec1::<u32>()?;
text_end = vision_indices_vec[j];
second_per_grid_t = match second_per_grid_ts {
None => 1.0,
Some(ref vec) => vec[video_index],
};
video_index += 1;
}
let llm_grid_t = thw[0];
let llm_grid_h = thw[1] / spatial_merge_size as u32;
let llm_grid_w = thw[2] / spatial_merge_size as u32;
let text_len = text_end - text_start;
let start_idx = if !llm_pos_ids_list.is_empty() {
llm_pos_ids_list[llm_pos_ids_list.len() - 1]
.max_all()?
.to_scalar::<u32>()?
+ 1
} else {
0
};
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | true |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen2_5vl/mod.rs | src/models/qwen2_5vl/mod.rs | pub mod config;
pub mod generate;
pub mod model;
pub mod processor;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen2_5vl/generate.rs | src/models/qwen2_5vl/generate.rs | use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Result, anyhow};
use candle_core::{D, DType, Device, IndexOp, Tensor};
use candle_nn::VarBuilder;
use rocket::async_stream::stream;
use rocket::futures::Stream;
use crate::models::qwen2_5vl::config::Qwen2_5VLConfig;
use crate::utils::{
build_completion_chunk_response, build_completion_response, find_type_files, get_device,
get_dtype, get_logit_processor,
};
use crate::{
chat_template::ChatTemplate,
models::{
GenerateModel,
qwen2_5vl::{model::Qwen2_5VLModel, processor::Qwen2_5VLProcessor},
},
tokenizer::TokenizerModel,
};
pub struct Qwen2_5VLGenerateModel<'a> {
chat_template: ChatTemplate<'a>,
tokenizer: TokenizerModel,
pre_processor: Qwen2_5VLProcessor,
qwen2_5_vl: Qwen2_5VLModel,
device: Device,
endoftext_id: u32,
im_end_id: u32,
model_name: String,
}
impl<'a> Qwen2_5VLGenerateModel<'a> {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let chat_template = ChatTemplate::init(path)?;
let tokenizer = TokenizerModel::init(path)?;
let config_path = path.to_string() + "/config.json";
let cfg: Qwen2_5VLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
let device = &get_device(device);
let cfg_dtype = cfg.torch_dtype.as_str();
let dtype = get_dtype(dtype, cfg_dtype);
let pre_processor = Qwen2_5VLProcessor::new(device, dtype)?;
let endoftext_id = cfg.bos_token_id;
let im_end_id = cfg.eos_token_id;
// let model_list = find_safetensors_files(&path)?;
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, device)? };
let qwen2_5_vl = Qwen2_5VLModel::new(cfg, vb)?;
Ok(Qwen2_5VLGenerateModel {
chat_template,
tokenizer,
pre_processor,
qwen2_5_vl,
device: device.clone(),
endoftext_id,
im_end_id,
model_name: "qwen2.5vl".to_string(),
})
}
}
impl<'a> GenerateModel for Qwen2_5VLGenerateModel<'a> {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let input = self.pre_processor.process_info(&mes, &mes_render)?;
let mut input_ids = self
.tokenizer
.text_encode(input.replace_text.clone(), &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let mut pixel_values = input.pixel_values.as_ref();
let image_grid_thw = input.image_grid_thw.as_ref();
let mut pixel_values_video = input.pixel_values_video.as_ref();
let video_grid_thw = input.video_grid_thw.as_ref();
let second_per_grid_ts = input.second_per_grid_ts.clone();
let mut mask = Tensor::ones_like(&input_ids)?;
let mut cache_position = Tensor::ones_like(&input_ids.i(0)?)?
.to_dtype(candle_core::DType::F64)?
.cumsum(D::Minus1)?
.to_dtype(candle_core::DType::U32)?
.broadcast_sub(&Tensor::new(vec![1_u32], input_ids.device())?)?;
let mut generate = Vec::new();
let sample_len = mes.max_tokens.unwrap_or(1024);
for _ in 0..sample_len {
let logits = self.qwen2_5_vl.forward(
&input_ids,
pixel_values,
image_grid_thw,
pixel_values_video,
video_grid_thw,
&mask,
Some(&cache_position),
seqlen_offset,
second_per_grid_ts.clone(),
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
generate.push(next_token);
if next_token == self.endoftext_id || next_token == self.im_end_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
let appendd_mask = Tensor::ones((1, 1), mask.dtype(), &self.device)?;
mask = Tensor::cat(&[mask, appendd_mask], 1)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
}
let num_token = generate.len() as u32;
let res = self.tokenizer.token_decode(generate)?;
self.qwen2_5_vl.clear_kv_cache();
let response = build_completion_response(res, &self.model_name, Some(num_token));
Ok(response)
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let input = self.pre_processor.process_info(&mes, &mes_render)?;
let mut input_ids = self
.tokenizer
.text_encode(input.replace_text.clone(), &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let pixel_values = input.pixel_values.clone();
let image_grid_thw = input.image_grid_thw.clone();
let pixel_values_video = input.pixel_values_video.clone();
let video_grid_thw = input.video_grid_thw.clone();
let second_per_grid_ts = input.second_per_grid_ts.clone();
let mut mask = Tensor::ones_like(&input_ids)?;
let mut cache_position = Tensor::ones_like(&input_ids.i(0)?)?
.to_dtype(candle_core::DType::F64)?
.cumsum(D::Minus1)?
.to_dtype(candle_core::DType::U32)?
.broadcast_sub(&Tensor::new(vec![1_u32], input_ids.device())?)?;
let sample_len = mes.max_tokens.unwrap_or(512);
let stream = stream! {
let mut error_tokens = Vec::new();
let mut pixel_values = pixel_values.as_ref();
let image_grid_thw = image_grid_thw.as_ref();
let mut pixel_values_video = pixel_values_video.as_ref();
let video_grid_thw = video_grid_thw.as_ref();
let mut tool_call_id = None;
let mut tool_call_content = String::new();
for _ in 0..sample_len {
let logits = self.qwen2_5_vl.forward(
&input_ids,
pixel_values,
image_grid_thw,
pixel_values_video,
video_grid_thw,
&mask,
Some(&cache_position),
seqlen_offset,
second_per_grid_ts.clone(),
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
let mut decode_ids = Vec::new();
if !error_tokens.is_empty() {
decode_ids.extend_from_slice(&error_tokens);
}
decode_ids.push(next_token);
let decoded_token = self.tokenizer.token_decode(decode_ids).map_err(|e| anyhow!(format!("stream decode error{e}")))?;
if decoded_token.contains("�") {
error_tokens.push(next_token);
if error_tokens.len() > 3 {
error_tokens.clear();
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
let appendd_mask = Tensor::ones((1, 1), mask.dtype(), &self.device)?;
mask = Tensor::cat(&[mask, appendd_mask], 1)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
continue;
}
error_tokens.clear();
// 处理特殊标记和工具调用
match decoded_token.as_str() {
"<tool_call>" => {
// 开始工具调用
tool_call_id = Some(uuid::Uuid::new_v4().to_string());
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
continue;
}
"</tool_call>" => {
// 结束工具调用
let chunk = build_completion_chunk_response(
decoded_token,
&self.model_name,
tool_call_id.clone(),
Some(tool_call_content.clone())
);
tool_call_id = None;
tool_call_content = String::new();
yield Ok(chunk);
}
_ => {
if tool_call_id.is_some() {
// 在工具调用过程中,收集工具调用内容
tool_call_content.push_str(&decoded_token);
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
continue;
} else {
// 正常文本输出
let chunk = build_completion_chunk_response(
decoded_token,
&self.model_name,
None,
None
);
yield Ok(chunk);
}
}
}
// let chunk = build_completion_chunk_response(decoded_token, &self.model_name, None, None);
// yield Ok(chunk);
if next_token == self.endoftext_id || next_token == self.im_end_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
let appendd_mask = Tensor::ones((1, 1), mask.dtype(), &self.device)?;
mask = Tensor::cat(&[mask, appendd_mask], 1)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
}
self.qwen2_5_vl.clear_kv_cache();
};
Ok(Box::new(Box::pin(stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/minicpm4.rs | src/models/voxcpm/minicpm4.rs | use anyhow::{Ok, Result, anyhow};
use candle_core::{D, DType, Device, Tensor};
use candle_nn::{Embedding, Module, RmsNorm, VarBuilder, embedding, rms_norm};
use crate::{
models::{
common::{GateUpDownMLP, NaiveAttention},
voxcpm::config::VoxMiniCPM4Config,
},
position_embed::rope::compute_default_rope_parameters,
utils::tensor_utils::prepare_causal_attention_mask,
};
pub struct MiniCPMLongRoPE {
short_factor: Vec<f32>,
long_factor: Vec<f32>,
original_max_position_embeddings: usize,
max_seq_len_cached: usize,
scaling_factor: f64,
inv_freq: Tensor,
cos_cached: Tensor,
sin_cached: Tensor,
device: Device,
dtype: DType,
}
impl MiniCPMLongRoPE {
pub fn new(cfg: &VoxMiniCPM4Config, device: &Device, dtype: DType) -> Result<Self> {
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
let rope_theta = cfg.rope_theta;
let short_factor = cfg.rope_scaling.short_factor.clone();
let long_factor = cfg.rope_scaling.short_factor.clone();
let original_max_position_embeddings = cfg.rope_scaling.original_max_position_embeddings;
let max_position_embeddings = cfg.max_position_embeddings;
let scale = max_position_embeddings as f64 / original_max_position_embeddings as f64;
let scaling_factor =
(1.0 + scale.ln() / (original_max_position_embeddings as f64).ln()).sqrt();
let inv_freq = compute_default_rope_parameters(head_dim, rope_theta);
let inv_freq = Tensor::from_slice(&inv_freq, (1, inv_freq.len()), device)?;
let max_seq_len_cached = max_position_embeddings;
let t = Tensor::arange(0.0_f32, max_position_embeddings as f32, device)?
.reshape((max_position_embeddings, 1))?;
// short_factor.len() = 32
// head_dim = 1024 / 16 = 64, inv_freq.len() = 32
let ext_factors = Tensor::from_slice(&short_factor, (1, short_factor.len()), device)?;
let ext_factors = Tensor::ones_like(&ext_factors)?.div(&ext_factors)?;
// (seq_len, 1) matmul (1, 32) -> (seq_len, 32) * (1, 32)-> (seq_len, 32)
let freqs = t.matmul(&ext_factors)?.broadcast_mul(&inv_freq)?;
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
let cos_cached = emb.cos()?.affine(scaling_factor, 0.0)?.to_dtype(dtype)?;
let sin_cached = emb.sin()?.affine(scaling_factor, 0.0)?.to_dtype(dtype)?;
Ok(Self {
short_factor,
long_factor,
original_max_position_embeddings,
max_seq_len_cached,
scaling_factor,
inv_freq,
cos_cached,
sin_cached,
device: device.clone(),
dtype,
})
}
pub fn update_cos_sin_cache(&mut self, seqlen: usize) -> Result<()> {
self.max_seq_len_cached = seqlen;
let t = Tensor::arange(0.0_f32, seqlen as f32, &self.device)?.reshape((seqlen, 1))?;
let mut ext_factors = Tensor::from_slice(
&self.short_factor,
(1, self.short_factor.len()),
&self.device,
)?;
if seqlen > self.original_max_position_embeddings {
ext_factors =
Tensor::from_slice(&self.long_factor, (1, self.long_factor.len()), &self.device)?;
}
let ext_factors = Tensor::ones_like(&ext_factors)?.div(&ext_factors)?;
let freqs = t.matmul(&ext_factors)?.broadcast_mul(&self.inv_freq)?;
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
let cos_cached = emb
.cos()?
.affine(self.scaling_factor, 0.0)?
.to_dtype(self.dtype)?;
let sin_cached = emb
.sin()?
.affine(self.scaling_factor, 0.0)?
.to_dtype(self.dtype)?;
self.cos_cached = cos_cached;
self.sin_cached = sin_cached;
Ok(())
}
pub fn forward(&mut self, pos_offset: usize, seqlen: usize) -> Result<(Tensor, Tensor)> {
if pos_offset + seqlen > self.max_seq_len_cached {
self.update_cos_sin_cache(pos_offset + seqlen)?;
}
let cos = self.cos_cached.narrow(0, pos_offset, seqlen)?;
let sin = self.sin_cached.narrow(0, pos_offset, seqlen)?;
Ok((cos, sin))
}
}
pub struct MiniCPMDecoderLayer {
self_attn: NaiveAttention,
mlp: GateUpDownMLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
scale_depth: f32,
num_hidden_layers: usize,
use_mup: bool,
}
impl MiniCPMDecoderLayer {
pub fn new(vb: VarBuilder, cfg: &VoxMiniCPM4Config) -> Result<Self> {
let self_attn = NaiveAttention::new(
vb.pp("self_attn"),
cfg.hidden_size,
cfg.num_attention_heads,
cfg.num_key_value_heads,
None,
false,
None,
)?;
let mlp = GateUpDownMLP::new(
vb.pp("mlp"),
cfg.hidden_size,
cfg.intermediate_size,
candle_nn::Activation::Silu,
false,
)?;
let input_layernorm =
rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
scale_depth: cfg.scale_depth,
num_hidden_layers: cfg.num_hidden_layers,
use_mup: cfg.use_mup,
})
}
pub fn forward(
&self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward(&xs, Some(cos), Some(sin), attention_mask, true)?;
let xs = if self.use_mup {
(residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
))?
} else {
(residual + xs)?
};
let residual = xs.clone();
let xs = xs.apply(&self.post_attention_layernorm)?;
let xs = xs.apply(&self.mlp)?;
let xs = if self.use_mup {
(residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
))?
} else {
(residual + xs)?
};
Ok(xs)
}
pub fn forward_with_cache(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward_with_cache(&xs, cos, sin, attention_mask, true)?;
let xs = if self.use_mup {
(residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
)?)?
} else {
(residual + xs)?
};
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
let xs = if self.use_mup {
(residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
)?)?
} else {
(residual + xs)?
};
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
pub struct MiniCPMModel {
// cfg: VoxMiniCPM4Config,
pub embed_tokens: Option<Embedding>,
layers: Vec<MiniCPMDecoderLayer>,
norm: RmsNorm,
rope_emb: MiniCPMLongRoPE,
}
impl MiniCPMModel {
pub fn new(vb: VarBuilder, cfg: VoxMiniCPM4Config) -> Result<Self> {
// let vb = vb.pp("model");
let embed_tokens = if cfg.vocab_size > 0 {
Some(embedding(
cfg.vocab_size,
cfg.hidden_size,
vb.pp("embed_tokens"),
)?)
} else {
None
};
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_layers = vb.pp("layers");
for i in 0..cfg.num_hidden_layers {
let layer = MiniCPMDecoderLayer::new(vb_layers.pp(i), &cfg)?;
layers.push(layer);
}
let norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("norm"))?;
let rope_emb = MiniCPMLongRoPE::new(&cfg, vb.device(), vb.dtype())?;
Ok(Self {
// cfg,
embed_tokens,
layers,
norm,
rope_emb,
})
}
pub fn forward(
&mut self,
input_embeds: &Tensor,
position_id: usize,
is_causal: bool,
) -> Result<Tensor> {
let (bs, seq_len, _) = input_embeds.dims3()?;
let attention_mask: Option<Tensor> = {
if !is_causal || seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
bs,
seq_len,
0,
input_embeds.device(),
)?)
}
};
let (cos, sin) = self.rope_emb.forward(position_id, seq_len)?;
let mut hidden_states = input_embeds.clone();
for decode_layer in &self.layers {
hidden_states =
decode_layer.forward(&hidden_states, &cos, &sin, attention_mask.as_ref())?;
}
hidden_states = self.norm.forward(&hidden_states)?;
Ok(hidden_states)
}
pub fn forward_with_cache(
&mut self,
input_embeds: &Tensor,
position_id: usize,
) -> Result<Tensor> {
let input_embeds = match input_embeds.rank() {
2 => input_embeds.unsqueeze(1)?,
3 => input_embeds.clone(),
_ => return Err(anyhow!("MiniCPMModelinput_embeds illigal")),
};
let (bs, seq_len, _) = input_embeds.dims3()?;
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
bs,
seq_len,
0,
input_embeds.device(),
)?)
}
};
let (cos, sin) = self.rope_emb.forward(position_id, seq_len)?;
let mut hidden_states = input_embeds.clone();
for decode_layer in &mut self.layers {
hidden_states = decode_layer.forward_with_cache(
&hidden_states,
&cos,
&sin,
attention_mask.as_ref(),
)?;
}
hidden_states = self.norm.forward(&hidden_states)?;
Ok(hidden_states)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/config.rs | src/models/voxcpm/config.rs | #[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VoxRopeScalingConfig {
pub r#type: String,
pub long_factor: Vec<f32>,
pub short_factor: Vec<f32>,
pub original_max_position_embeddings: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VoxMiniCPM4Config {
pub bos_token_id: u32,
pub eos_token_id: u32,
pub hidden_size: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub rope_scaling: VoxRopeScalingConfig,
pub vocab_size: usize,
pub scale_emb: f32,
pub dim_model_base: usize,
pub scale_depth: f32,
pub use_mup: bool,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VoxCPMEncoderConfig {
pub hidden_dim: usize,
pub ffn_dim: usize,
pub num_heads: usize,
pub num_layers: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct CfmConfig {
pub sigma_min: f32,
pub solver: String,
pub t_scheduler: String,
pub inference_cfg_rate: f32,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VoxCPMDitConfig {
pub hidden_dim: usize,
pub ffn_dim: usize,
pub num_heads: usize,
pub num_layers: usize,
pub cfm_config: CfmConfig,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct AudioVaeConfig {
pub encoder_dim: usize,
pub encoder_rates: Vec<usize>,
pub latent_dim: usize,
pub decoder_dim: usize,
pub decoder_rates: Vec<usize>,
pub sample_rate: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VoxCPMConfig {
pub lm_config: VoxMiniCPM4Config,
pub patch_size: usize,
pub feat_dim: usize,
pub scalar_quantization_latent_dim: usize,
pub scalar_quantization_scale: usize,
pub residual_lm_num_layers: usize,
pub encoder_config: VoxCPMEncoderConfig,
pub dit_config: VoxCPMDitConfig,
pub audio_vae_config: Option<AudioVaeConfig>,
pub max_length: usize,
pub dtype: String,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/model.rs | src/models/voxcpm/model.rs | use std::{cmp::max, collections::HashMap, f64};
use anyhow::{Ok, Result};
use candle_core::{D, DType, Device, IndexOp, Tensor};
use candle_nn::{Linear, Module, VarBuilder, linear, linear_no_bias};
use candle_transformers::models::deepseek2::SplitOp;
use crate::{
models::voxcpm::{
audio_vae::AudioVAE,
config::{CfmConfig, VoxCPMConfig, VoxMiniCPM4Config},
minicpm4::MiniCPMModel,
tokenizer::SingleChineseTokenizer,
},
utils::{audio_utils::load_audio_with_resample, tensor_utils::linspace},
};
pub struct ScalarQuantizationLayer {
scale: usize,
in_proj: Linear,
out_proj: Linear,
}
impl ScalarQuantizationLayer {
pub fn new(
vb: VarBuilder,
in_dim: usize,
out_dim: usize,
laten_dim: usize,
scale: usize,
) -> Result<Self> {
let in_proj = linear(in_dim, laten_dim, vb.pp("in_proj"))?;
let out_proj = linear(laten_dim, out_dim, vb.pp("out_proj"))?;
Ok(Self {
scale,
in_proj,
out_proj,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.in_proj.forward(xs)?;
let xs = xs.tanh()?;
let xs = xs
.affine(self.scale as f64, 0.0)?
.round()?
.affine(1.0 / self.scale as f64, 0.0)?;
let xs = self.out_proj.forward(&xs)?;
Ok(xs)
}
}
pub struct SinusoidalPosEmb {
dim: usize,
}
impl SinusoidalPosEmb {
pub fn new(dim: usize) -> Result<Self> {
assert_eq!(dim % 2, 0, "SinusoidalPosEmb requires dim to be even");
Ok(Self { dim })
}
pub fn forward(&self, x: &Tensor, scale: usize) -> Result<Tensor> {
let x = if x.rank() < 1 {
x.unsqueeze(0)?
} else {
x.clone()
};
let half_dim = self.dim / 2;
let dif = 10000.0_f64.ln() / (half_dim - 1) as f64;
let emb = Tensor::arange(0.0, half_dim as f32, x.device())?
.affine(-dif, 0.0)?
.exp()?
.to_dtype(x.dtype())?;
let emb = x
.unsqueeze(1)?
.contiguous()?
.affine(scale as f64, 0.0)?
.matmul(&emb.unsqueeze(0)?.contiguous()?)?;
let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], D::Minus1)?;
Ok(emb)
}
}
pub struct TimestepEmbedding {
linear_1: Linear,
linear_2: Linear,
}
impl TimestepEmbedding {
pub fn new(
vb: VarBuilder,
in_channels: usize,
time_embed_dim: usize,
out_dim: Option<usize>,
) -> Result<Self> {
let linear_1 = linear(in_channels, time_embed_dim, vb.pp("linear_1"))?;
let time_embed_dim_out = if let Some(out_dim) = out_dim {
out_dim
} else {
time_embed_dim
};
let linear_2 = linear(time_embed_dim, time_embed_dim_out, vb.pp("linear_2"))?;
Ok(Self { linear_1, linear_2 })
}
pub fn forward(&self, sample: &Tensor) -> Result<Tensor> {
let sample = self.linear_1.forward(sample)?.silu()?;
let sample = self.linear_2.forward(&sample)?;
Ok(sample)
}
}
pub struct VoxCPMLocDiT {
in_proj: Linear,
cond_proj: Linear,
out_proj: Linear,
time_embeddings: SinusoidalPosEmb,
time_mlp: TimestepEmbedding,
delta_time_mlp: TimestepEmbedding,
decoder: MiniCPMModel,
// config: VoxMiniCPM4Config,
// in_channels: usize,
}
impl VoxCPMLocDiT {
pub fn new(vb: VarBuilder, config: VoxMiniCPM4Config, in_channels: usize) -> Result<Self> {
let in_proj = linear(in_channels, config.hidden_size, vb.pp("in_proj"))?;
let cond_proj = linear(in_channels, config.hidden_size, vb.pp("cond_proj"))?;
let out_proj = linear(config.hidden_size, in_channels, vb.pp("out_proj"))?;
let time_embeddings = SinusoidalPosEmb::new(config.hidden_size)?;
let time_mlp = TimestepEmbedding::new(
vb.pp("time_mlp"),
config.hidden_size,
config.hidden_size,
None,
)?;
let delta_time_mlp = TimestepEmbedding::new(
vb.pp("delta_time_mlp"),
config.hidden_size,
config.hidden_size,
None,
)?;
assert_eq!(config.vocab_size, 0, "vocab_size must be 0 for local DiT");
let decoder = MiniCPMModel::new(vb.pp("decoder"), config.clone())?;
Ok(Self {
in_proj,
cond_proj,
out_proj,
time_embeddings,
time_mlp,
delta_time_mlp,
decoder,
// config,
// in_channels,
})
}
pub fn forward(
&mut self,
x: &Tensor,
mu: &Tensor,
t: &Tensor,
cond: &Tensor,
dt: &Tensor,
) -> Result<Tensor> {
let x = self.in_proj.forward(&x.transpose(1, 2)?.contiguous()?)?;
let cond = self
.cond_proj
.forward(&cond.transpose(1, 2)?.contiguous()?)?;
let prefix = cond.dim(1)?;
let t = self.time_embeddings.forward(t, 1000)?.to_dtype(x.dtype())?;
let t = self.time_mlp.forward(&t)?;
let dt = self
.time_embeddings
.forward(dt, 1000)?
.to_dtype(x.dtype())?;
let dt = self.delta_time_mlp.forward(&dt)?;
let t = t.add(&dt)?;
let x = Tensor::cat(&[mu.add(&t)?.unsqueeze(1)?, cond, x], 1)?;
let hidden = self.decoder.forward(&x, 0, false)?;
let select_len = hidden.dims()[1] - (prefix + 1);
let hidden = hidden.narrow(1, prefix + 1, select_len)?;
let hidden = self.out_proj.forward(&hidden)?;
let hidden = hidden.transpose(1, 2)?.contiguous()?;
Ok(hidden)
}
}
pub struct UnifiedCFM {
// solver: String,
// sigma_min: f32,
// t_scheduler: String,
in_channels: usize,
mean_mode: bool,
estimator: VoxCPMLocDiT,
}
impl UnifiedCFM {
pub fn new(
in_channels: usize,
_cfm_params: CfmConfig,
estimator: VoxCPMLocDiT,
mean_mode: bool,
) -> Result<Self> {
// let solver = cfm_params.solver;
// let sigma_min = cfm_params.sigma_min;
// let t_scheduler = cfm_params.t_scheduler;
Ok(Self {
// solver,
// sigma_min,
// t_scheduler,
in_channels,
mean_mode,
estimator,
})
}
pub fn forward(
&mut self,
mu: &Tensor,
n_timesteps: usize,
patch_size: usize,
cond: &Tensor,
temperature: f64,
cfg_value: f64,
sway_sampling_coef: f64,
use_cfg_zero_star: bool,
) -> Result<Tensor> {
let (b, _) = mu.dims2()?;
let t = patch_size;
let dtype = mu.dtype();
let z = Tensor::randn(0.0f32, 1.0, (b, self.in_channels, t), mu.device())?
.to_dtype(dtype)?
.affine(temperature, 0.0)?;
let t_span = linspace(1.0, 0.0, n_timesteps + 1, mu.device())?.to_dtype(dtype)?;
let t_span = t_span
.affine(f64::consts::PI / 2.0, 0.0)?
.cos()?
.affine(1.0, -1.0)?
.add(&t_span)?
.affine(sway_sampling_coef, 0.0)?
.add(&t_span)?;
let x = self.solve_euler(&z, &t_span, mu, cond, cfg_value, use_cfg_zero_star)?;
Ok(x)
}
pub fn optimized_scale(
&self,
positive_flat: &Tensor,
negative_flat: &Tensor,
) -> Result<Tensor> {
let dot_product = positive_flat.mul(negative_flat)?.sum_keepdim(1)?;
let squared_norm = negative_flat.powf(2.0)?.sum_keepdim(1)?.affine(1.0, 1e-8)?;
let st_star = dot_product.div(&squared_norm)?;
Ok(st_star)
}
pub fn solve_euler(
&mut self,
x: &Tensor,
t_span: &Tensor,
mu: &Tensor,
cond: &Tensor,
cfg_value: f64,
use_cfg_zero_star: bool,
) -> Result<Tensor> {
let mut t = t_span.i(0)?;
let mut dt = t.sub(&t_span.i(1)?)?;
let mut sol = Vec::new();
let t_span_len = t_span.dim(0)?;
let zero_init_steps = max(1, (t_span_len as f32 * 0.04) as usize);
let mut dphi_dt;
let mut x = x.clone();
for step in 1..t_span_len {
if use_cfg_zero_star && step <= zero_init_steps {
// dphi_dt = Tensor::zeros(1, t_span.dtype(), t_span.device())?;
dphi_dt = x.zeros_like()?;
} else {
let b = x.dim(0)?;
// let x_in = Tensor::zeros((2*b, self.in_channels, x.dim(2)?), x.dtype(), x.device())?;
let x_in = Tensor::cat(&[x.clone(), x.clone()], 0)?;
let mu_in = Tensor::zeros((b, mu.dim(1)?), x.dtype(), x.device())?;
let mu_in = Tensor::cat(&[mu.clone(), mu_in], 0)?;
let t_in = t.broadcast_as(2 * b)?;
let dt_in = if self.mean_mode {
dt.broadcast_as(2 * b)?
} else {
Tensor::zeros(2 * b, x.dtype(), x.device())?
};
let cond_in = Tensor::cat(&[cond, cond], 0)?;
dphi_dt = self
.estimator
.forward(&x_in, &mu_in, &t_in, &cond_in, &dt_in)?;
let split = dphi_dt.split(&[b, b], 0)?;
dphi_dt = split[0].clone();
let cfg_dphi_dt = split[1].clone();
let mut st_star = Tensor::ones(1, x.dtype(), x.device())?;
if use_cfg_zero_star {
let positive_flat = dphi_dt.reshape((b, ()))?;
let negative_flat = cfg_dphi_dt.reshape((b, ()))?;
st_star = self.optimized_scale(&positive_flat, &negative_flat)?;
let mut vec_shape = vec![b];
let vec_shape1 = vec![1; dphi_dt.rank() - 1];
vec_shape.extend_from_slice(&vec_shape1);
st_star = st_star.reshape(vec_shape)?;
}
let cfg = cfg_dphi_dt.broadcast_mul(&st_star)?;
dphi_dt = cfg.add(&dphi_dt.sub(&cfg)?.affine(cfg_value, 0.0)?)?; // step步的预测噪声
}
x = x.broadcast_sub(&dphi_dt.broadcast_mul(&dt)?)?; // 逐步去噪
t = t.sub(&dt)?;
sol.push(x.clone());
if step < t_span_len - 1 {
dt = t.sub(&t_span.i(step + 1)?)?;
}
}
let ret = sol[sol.len() - 1].clone();
Ok(ret)
}
}
pub struct VoxCPMLocEnc {
special_token: Tensor,
in_proj: Linear,
encoder: MiniCPMModel,
hidden_size: usize,
}
impl VoxCPMLocEnc {
pub fn new(vb: VarBuilder, config: VoxMiniCPM4Config, input_dim: usize) -> Result<Self> {
let special_token = vb.get((1, 1, 1, config.hidden_size), "special_token")?;
let in_proj = linear(input_dim, config.hidden_size, vb.pp("in_proj"))?;
assert_eq!(
config.vocab_size, 0,
"vocab_size must be 0 for local encoder"
);
let hidden_size = config.hidden_size;
let encoder = MiniCPMModel::new(vb.pp("encoder"), config)?;
Ok(Self {
special_token,
in_proj,
encoder,
hidden_size,
})
}
pub fn forward(&mut self, x: &Tensor) -> Result<Tensor> {
let (b, t, _, _) = x.dims4()?;
let x = self.in_proj.forward(x)?;
let special_tokens = self.special_token.expand((b, t, 1, self.hidden_size))?;
let x = Tensor::cat(&[special_tokens, x], 2)?;
let (b, t, p, c) = x.dims4()?;
let x = x.reshape((b * t, p, c))?;
let outputs = self.encoder.forward(&x, 0, false)?;
let cls_output = outputs.i((.., 0, ..))?;
let cls_output = cls_output.reshape((b, t, c))?;
Ok(cls_output)
}
}
pub struct VoxCPMModel {
config: VoxCPMConfig,
patch_size: usize,
audio_start_token: usize,
// audio_end_token: usize,
chunk_size: usize,
sample_rate: usize,
tokenizer: SingleChineseTokenizer,
audio_vae: AudioVAE,
base_lm: MiniCPMModel,
residual_lm: MiniCPMModel,
feat_encoder: VoxCPMLocEnc,
feat_decoder: UnifiedCFM,
fsq_layer: ScalarQuantizationLayer,
enc_to_lm_proj: Linear,
lm_to_dit_proj: Linear,
res_to_dit_proj: Linear,
stop_proj: Linear,
stop_head: Linear,
device: Device,
dtype: DType,
}
impl VoxCPMModel {
pub fn new(
vb: VarBuilder,
config: VoxCPMConfig,
tokenizer: SingleChineseTokenizer,
audio_vae: AudioVAE,
) -> Result<Self> {
let base_lm = MiniCPMModel::new(vb.pp("base_lm"), config.lm_config.clone())?;
let audio_start_token = 101usize;
// let audio_end_token = 102usize;
let mut residual_lm_config = config.lm_config.clone();
residual_lm_config.num_hidden_layers = config.residual_lm_num_layers;
residual_lm_config.vocab_size = 0;
let residual_lm = MiniCPMModel::new(vb.pp("residual_lm"), residual_lm_config)?;
let mut encoder_config = config.lm_config.clone();
encoder_config.hidden_size = config.encoder_config.hidden_dim;
encoder_config.intermediate_size = config.encoder_config.ffn_dim;
encoder_config.num_attention_heads = config.encoder_config.num_heads;
encoder_config.num_hidden_layers = config.encoder_config.num_layers;
encoder_config.vocab_size = 0;
let feat_encoder =
VoxCPMLocEnc::new(vb.pp("feat_encoder"), encoder_config, config.feat_dim)?;
let mut decoder_config = config.lm_config.clone();
decoder_config.hidden_size = config.dit_config.hidden_dim;
decoder_config.intermediate_size = config.dit_config.ffn_dim;
decoder_config.num_attention_heads = config.dit_config.num_heads;
decoder_config.num_hidden_layers = config.dit_config.num_layers;
decoder_config.vocab_size = 0;
let estimator = VoxCPMLocDiT::new(
vb.pp("feat_decoder.estimator"),
decoder_config,
config.feat_dim,
)?;
let feat_decoder = UnifiedCFM::new(
config.feat_dim,
config.dit_config.cfm_config.clone(),
estimator,
false,
)?;
let fsq_layer = ScalarQuantizationLayer::new(
vb.pp("fsq_layer"),
config.lm_config.hidden_size,
config.lm_config.hidden_size,
config.scalar_quantization_latent_dim,
config.scalar_quantization_scale,
)?;
let enc_to_lm_proj = linear(
config.encoder_config.hidden_dim,
config.lm_config.hidden_size,
vb.pp("enc_to_lm_proj"),
)?;
let lm_to_dit_proj = linear(
config.lm_config.hidden_size,
config.dit_config.hidden_dim,
vb.pp("lm_to_dit_proj"),
)?;
let res_to_dit_proj = linear(
config.lm_config.hidden_size,
config.dit_config.hidden_dim,
vb.pp("res_to_dit_proj"),
)?;
let stop_proj = linear(
config.lm_config.hidden_size,
config.lm_config.hidden_size,
vb.pp("stop_proj"),
)?;
let stop_head = linear_no_bias(config.lm_config.hidden_size, 2, vb.pp("stop_head"))?;
let patch_size = config.patch_size;
Ok(Self {
config,
patch_size,
audio_start_token,
// audio_end_token,
chunk_size: audio_vae.chunk_size,
sample_rate: audio_vae.sample_rate,
tokenizer,
audio_vae,
base_lm,
residual_lm,
feat_encoder,
feat_decoder,
fsq_layer,
enc_to_lm_proj,
lm_to_dit_proj,
res_to_dit_proj,
stop_proj,
stop_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
pub fn generate(
&mut self,
target_text: String,
prompt_text: Option<String>,
prompt_wav_path: Option<String>,
min_len: usize,
max_len: usize,
inference_timesteps: usize,
cfg_value: f64,
// retry_badcase: bool,
retry_badcase_ratio_threshold: f64,
) -> Result<Tensor> {
let (text_token, text_mask, audio_feat, audio_mask) = match prompt_wav_path {
None => {
let text_token = self.tokenizer.encode(target_text.clone())?;
let text_token = Tensor::from_slice(&text_token, text_token.len(), &self.device)?;
let audio_start = Tensor::new(vec![self.audio_start_token as u32], &self.device)?;
let text_token = Tensor::cat(&[text_token, audio_start], D::Minus1)?;
let text_length = text_token.dim(0)?;
let audio_feat = Tensor::zeros(
(text_length, self.patch_size, self.audio_vae.latent_dim),
DType::F32,
&self.device,
)?;
let text_mask = Tensor::ones(text_length, self.dtype, &self.device)?;
let audio_mask = Tensor::zeros(text_length, self.dtype, &self.device)?;
(text_token, text_mask, audio_feat, audio_mask)
}
Some(path) => {
let text = prompt_text.unwrap_or("".to_string()) + &target_text;
let text_token = self.tokenizer.encode(text)?;
let text_token = Tensor::from_slice(&text_token, text_token.len(), &self.device)?;
let audio_start = Tensor::new(vec![self.audio_start_token as u32], &self.device)?;
let text_token = Tensor::cat(&[text_token, audio_start], D::Minus1)?;
let text_length = text_token.dim(0)?;
let mut audio =
load_audio_with_resample(&path, self.device.clone(), Some(self.sample_rate))?;
let patch_len = self.patch_size * self.chunk_size;
if audio.dim(1)? % patch_len != 0 {
audio = audio.pad_with_zeros(
D::Minus1,
// 0,
// patch_len - audio.dim(1)? % patch_len,
patch_len - audio.dim(1)? % patch_len,
0,
)?;
}
let audio_feat = self.audio_vae.encode(&audio, Some(self.sample_rate))?;
let audio_feat = audio_feat
.reshape((self.audio_vae.latent_dim, (), self.patch_size))?
.permute((1, 2, 0))?;
// let dim0 = audio_feat.dim(0)? - 1;
// let audio_feat = audio_feat.i(..dim0)?;
let audio_length = audio_feat.dim(0)?;
let text_pad_token = Tensor::zeros(audio_length, DType::U32, &self.device)?;
let text_token = Tensor::cat(&[text_token, text_pad_token], D::Minus1)?;
let audio_pad_feat = Tensor::zeros(
(text_length, self.patch_size, self.audio_vae.latent_dim),
audio_feat.dtype(),
&self.device,
)?;
let audio_feat = Tensor::cat(&[audio_pad_feat, audio_feat], 0)?;
let text_mask = Tensor::cat(
&[
Tensor::ones(text_length, self.dtype, &self.device)?,
Tensor::zeros(audio_length, self.dtype, &self.device)?,
],
D::Minus1,
)?;
let audio_mask = Tensor::cat(
&[
Tensor::zeros(text_length, self.dtype, &self.device)?,
Tensor::ones(audio_length, self.dtype, &self.device)?,
],
D::Minus1,
)?;
(text_token, text_mask, audio_feat, audio_mask)
}
};
let target_text_length = self.tokenizer.encode(target_text)?.len();
// let max_len = if retry_badcase {
// (target_text_length as f64 * retry_badcase_ratio_threshold + 10.0) as usize
// } else {
// max_len
// };
let max_len = max_len
.min((target_text_length as f64 * retry_badcase_ratio_threshold + 10.0) as usize);
let decode_audio = self._generate(
&text_token,
&text_mask,
&audio_feat,
&audio_mask,
min_len,
max_len,
inference_timesteps,
cfg_value,
)?;
Ok(decode_audio)
}
fn _generate(
&mut self,
text_token: &Tensor,
text_mask: &Tensor,
audio_feat: &Tensor,
audio_mask: &Tensor,
min_len: usize,
max_len: usize,
inference_timesteps: usize,
cfg_value: f64,
) -> Result<Tensor> {
let text_token = text_token.unsqueeze(0)?;
let text_mask = text_mask.unsqueeze(0)?;
let audio_feat = audio_feat.unsqueeze(0)?.to_dtype(self.dtype)?;
let audio_mask = audio_mask.unsqueeze(0)?;
let latent_pred = self.inference(
&text_token,
&text_mask,
&audio_feat,
&audio_mask,
min_len,
max_len,
inference_timesteps,
cfg_value,
)?;
let decode_audio = self
.audio_vae
.decode(&latent_pred.to_dtype(DType::F32)?)?
.squeeze(1)?;
let decode_audio_len = decode_audio.dim(D::Minus1)? - 640 - 640;
let decode_audio = decode_audio.narrow(D::Minus1, 640, decode_audio_len)?;
Ok(decode_audio)
}
fn inference(
&mut self,
text: &Tensor,
text_mask: &Tensor,
feat: &Tensor,
feat_mask: &Tensor,
min_len: usize,
max_len: usize,
inference_timesteps: usize,
cfg_value: f64,
) -> Result<Tensor> {
let (_, t, _, _) = feat.dims4()?;
let feat_embed = self.feat_encoder.forward(feat)?; // [b, t, h_feat]
let feat_embed = self.enc_to_lm_proj.forward(&feat_embed)?;
let scale_emb = if self.config.lm_config.use_mup {
self.config.lm_config.scale_emb
} else {
1.0
};
let text_embed = self
.base_lm
.embed_tokens
.as_ref()
.unwrap()
.forward(text)?
.affine(scale_emb as f64, 0.0)?;
let combined_embed = text_mask
.unsqueeze(D::Minus1)?
.broadcast_mul(&text_embed)?
.add(&feat_mask.unsqueeze(D::Minus1)?.broadcast_mul(&feat_embed)?)?;
let mut prefix_feat_cond = feat.i((.., t - 1, ..))?;
let mut pred_feat_seq = Vec::new();
let mut position_id = 0;
let mut seq_len = t;
let enc_outputs = self
.base_lm
.forward_with_cache(&combined_embed, position_id)?;
let enc_outputs = self
.fsq_layer
.forward(&enc_outputs)?
.broadcast_mul(&feat_mask.unsqueeze(D::Minus1)?)?
.add(&enc_outputs.broadcast_mul(&text_mask.unsqueeze(D::Minus1)?)?)?;
let mut lm_hidden = enc_outputs.i((.., t - 1, ..))?;
let input_embeds =
enc_outputs.add(&feat_mask.unsqueeze(D::Minus1)?.broadcast_mul(&feat_embed)?)?;
let residual_enc_outputs = self
.residual_lm
.forward_with_cache(&input_embeds, position_id)?;
let mut residual_hidden = residual_enc_outputs.i((.., t - 1, ..))?;
for i in 0..max_len {
let dit_hidden_1 = self.lm_to_dit_proj.forward(&lm_hidden)?; // [b, h_dit]
let dit_hidden_2 = self.res_to_dit_proj.forward(&residual_hidden)?; // [b, h_dit]
let dit_hidden = dit_hidden_1.add(&dit_hidden_2)?;
let cond = prefix_feat_cond.transpose(1, 2)?.contiguous()?;
let pred_feat = self
.feat_decoder
.forward(
&dit_hidden,
inference_timesteps,
self.patch_size,
&cond,
1.0,
cfg_value,
1.0,
true,
)?
.transpose(1, 2)?; // [b, p, d]
let curr_embed = self.feat_encoder.forward(&pred_feat.unsqueeze(1)?)?; // [b, 1, c]
let curr_embed = self.enc_to_lm_proj.forward(&curr_embed)?;
pred_feat_seq.push(pred_feat.unsqueeze(1)?);
prefix_feat_cond = pred_feat;
let stop_flag = self.stop_proj.forward(&lm_hidden)?.silu()?;
let stop_flag = self
.stop_head
.forward(&stop_flag)?
.argmax(D::Minus1)?
.i(0)?
.to_scalar::<u32>()?;
if i > min_len && stop_flag == 1 {
break;
}
position_id += seq_len;
seq_len = 1;
lm_hidden = self
.base_lm
.forward_with_cache(&curr_embed.i((.., 0, ..))?, position_id)?
.squeeze(1)?;
lm_hidden = self.fsq_layer.forward(&lm_hidden)?;
residual_hidden = self
.residual_lm
.forward_with_cache(&lm_hidden.add(&curr_embed.i((.., 0, ..))?)?, position_id)?
.squeeze(1)?;
}
let pred_seq = Tensor::cat(&pred_feat_seq, 1)?; // (b, t, p, d)
let (b, _, _, d) = pred_seq.dims4()?;
let feat_pred = pred_seq
.permute((0, 3, 1, 2))?
.reshape((b, d, ()))?
.contiguous()?;
self.base_lm.clear_kv_cache();
self.residual_lm.clear_kv_cache();
Ok(feat_pred)
}
pub fn build_prompt_cache(
&mut self,
prompt_text: String,
prompt_wav_path: String,
) -> Result<HashMap<String, Tensor>> {
let text_token = self.tokenizer.encode(prompt_text)?;
let text_token = Tensor::from_slice(&text_token, text_token.len(), &self.device)?;
let mut audio = load_audio_with_resample(
&prompt_wav_path,
self.device.clone(),
Some(self.sample_rate),
)?;
let patch_len = self.patch_size * self.chunk_size;
if audio.dim(1)? % patch_len != 0 {
audio = audio.pad_with_zeros(D::Minus1, 0, patch_len - audio.dim(1)? % patch_len)?;
}
let audio_feat = self.audio_vae.encode(&audio, Some(self.sample_rate))?;
let audio_feat = audio_feat
.reshape((self.audio_vae.latent_dim, (), self.patch_size))?
.permute((1, 2, 0))?;
let dim0 = audio_feat.dim(0)? - 1;
let audio_feat = audio_feat.i(..dim0)?;
let mut hashmap = HashMap::new();
hashmap.insert("text_token".to_string(), text_token);
hashmap.insert("audio_feat".to_string(), audio_feat);
Ok(hashmap)
}
pub fn generate_with_prompt_cache(
&mut self,
target_text: String,
prompt_cache: HashMap<String, Tensor>,
min_len: usize,
max_len: usize,
inference_timesteps: usize,
cfg_value: f64,
retry_badcase: bool,
retry_badcase_ratio_threshold: f64,
) -> Result<Tensor> {
let target_text_token = self.tokenizer.encode(target_text.clone())?;
let target_text_token =
Tensor::from_slice(&target_text_token, target_text_token.len(), &self.device)?;
let text_token = match prompt_cache.get("text_token") {
Some(token) => Tensor::cat(&[token, &target_text_token], 0)?,
None => target_text_token,
};
let audio_start = Tensor::new(vec![self.audio_start_token as u32], &self.device)?;
let text_token = Tensor::cat(&[text_token, audio_start], D::Minus1)?;
let text_length = text_token.dim(0)?;
let (audio_length, audio_feat) = match prompt_cache.get("audio_feat") {
Some(feat) => (feat.dim(0)?, Some(feat.clone())),
None => (0, None),
};
let (text_token, text_mask, audio_feat, audio_mask) = if audio_length > 0 {
let audio_feat = audio_feat.unwrap();
let audio_length = audio_feat.dim(0)?;
let text_pad_token = Tensor::zeros(audio_length, DType::U32, &self.device)?;
let text_token = Tensor::cat(&[text_token, text_pad_token], D::Minus1)?;
let audio_pad_feat = Tensor::zeros(
(text_length, self.patch_size, self.audio_vae.latent_dim),
audio_feat.dtype(),
&self.device,
)?;
let audio_feat = Tensor::cat(&[audio_pad_feat, audio_feat], 0)?;
let text_mask = Tensor::cat(
&[
Tensor::ones(text_length, self.dtype, &self.device)?,
Tensor::zeros(audio_length, self.dtype, &self.device)?,
],
D::Minus1,
)?;
let audio_mask = Tensor::cat(
&[
Tensor::zeros(text_length, self.dtype, &self.device)?,
Tensor::ones(audio_length, self.dtype, &self.device)?,
],
D::Minus1,
)?;
(text_token, text_mask, audio_feat, audio_mask)
} else {
let audio_feat = Tensor::zeros(
(text_length, self.patch_size, self.audio_vae.latent_dim),
DType::F32,
&self.device,
)?;
let text_mask = Tensor::ones(text_length, self.dtype, &self.device)?;
let audio_mask = Tensor::zeros(text_length, self.dtype, &self.device)?;
(text_token, text_mask, audio_feat, audio_mask)
};
let target_text_length = self.tokenizer.encode(target_text)?.len();
let max_len = if retry_badcase {
(target_text_length as f64 * retry_badcase_ratio_threshold + 10.0) as usize
} else {
max_len
};
let decode_audio = self._generate(
&text_token,
&text_mask,
&audio_feat,
&audio_mask,
min_len,
max_len,
inference_timesteps,
cfg_value,
)?;
Ok(decode_audio)
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/mod.rs | src/models/voxcpm/mod.rs | pub mod audio_vae;
pub mod config;
pub mod generate;
pub mod minicpm4;
pub mod model;
pub mod tokenizer;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/audio_vae.rs | src/models/voxcpm/audio_vae.rs | use anyhow::{Ok, Result};
use candle_core::{D, Tensor};
use candle_nn::{Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, Module, VarBuilder};
pub struct CausalConv1d {
conv1d: Conv1d,
padding: usize,
}
impl CausalConv1d {
pub fn new(
weight: Tensor,
bias: Option<Tensor>,
padding: usize,
dilation: usize,
groups: usize,
stride: usize,
) -> Result<Self> {
let config = Conv1dConfig {
padding: 0,
stride,
dilation,
groups,
cudnn_fwd_algo: None,
};
let conv1d = Conv1d::new(weight, bias, config);
Ok(Self { conv1d, padding })
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_pad = x.pad_with_zeros(D::Minus1, self.padding * 2, 0)?;
let x = self.conv1d.forward(&x_pad)?;
Ok(x)
}
}
pub struct CausalConvTranspose1d {
conv_transpose1d: ConvTranspose1d,
padding: usize,
output_padding: usize,
}
// 元素间:stride-1
// 两边: k-p-1
// (h-1)*s -2p+k
// (h+1)*s
impl CausalConvTranspose1d {
pub fn new(
weight: Tensor,
bias: Option<Tensor>,
padding: usize,
dilation: usize,
output_padding: usize,
groups: usize,
stride: usize,
) -> Result<Self> {
let config = ConvTranspose1dConfig {
padding: 0,
output_padding: 0,
stride,
dilation,
groups,
};
let conv_transpose1d = ConvTranspose1d::new(weight, bias, config);
Ok(Self {
conv_transpose1d,
padding,
output_padding,
})
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.conv_transpose1d.forward(x)?;
let last_dim = x.dim(D::Minus1)?;
let select_num = last_dim - (self.padding * 2 - self.output_padding);
let x = x.narrow(D::Minus1, 0, select_num)?;
Ok(x)
}
}
pub struct WNCausalConv1d {
conv: CausalConv1d,
}
impl WNCausalConv1d {
pub fn new(
vb: VarBuilder,
in_c: usize,
out_c: usize,
kernel_size: usize,
dilation: usize,
padding: usize,
groups: usize,
stride: usize,
) -> Result<Self> {
let in_c = in_c / groups;
let weight_g = vb.get((out_c, 1, 1), "weight_g")?;
let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?;
let bias = vb.get(out_c, "bias").ok();
let weight_norm = weight_v.sqr()?.sum_keepdim(1)?.sum_keepdim(2)?.sqrt()?;
let normalized_weight = weight_v.broadcast_div(&weight_norm)?;
let scaled_weight = normalized_weight.broadcast_mul(&weight_g)?;
let conv = CausalConv1d::new(scaled_weight, bias, padding, dilation, groups, stride)?;
Ok(Self { conv })
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.conv.forward(x)?;
Ok(x)
}
}
pub struct WNCausalConvTranspose1d {
conv_transpose: CausalConvTranspose1d,
}
impl WNCausalConvTranspose1d {
pub fn new(
vb: VarBuilder,
in_c: usize,
out_c: usize,
dilation: usize,
kernel_size: usize,
padding: usize,
output_padding: usize,
groups: usize,
stride: usize,
) -> Result<Self> {
let in_c = in_c / groups;
let weight_g = vb.get((in_c, 1, 1), "weight_g")?;
let weight_v = vb.get((in_c, out_c, kernel_size), "weight_v")?;
let bias = vb.get(out_c, "bias").ok();
let weight_norm = weight_v.sqr()?.sum_keepdim(1)?.sum_keepdim(2)?.sqrt()?;
let normalized_weight = weight_v.broadcast_div(&weight_norm)?;
let scaled_weight = normalized_weight.broadcast_mul(&weight_g)?;
let conv_transpose = CausalConvTranspose1d::new(
scaled_weight,
bias,
padding,
dilation,
output_padding,
groups,
stride,
)?;
Ok(Self { conv_transpose })
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.conv_transpose.forward(x)?;
Ok(x)
}
}
pub struct Snake1d {
alpha: Tensor,
}
impl Snake1d {
pub fn new(vb: VarBuilder, channels: usize) -> Result<Self> {
let alpha = vb.get((1, channels, 1), "alpha")?;
Ok(Self { alpha })
}
// x + sin(alpha*x)^2 / alpha
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let dims = x.dims();
let x = x.reshape((dims[0], dims[1], ()))?;
let alpha_ = self.alpha.affine(1.0, 1e-9)?.recip()?;
let alpha_ = x
.broadcast_mul(&self.alpha)?
.sin()?
.powf(2.0)?
.broadcast_mul(&alpha_)?;
let x = x.add(&alpha_)?;
let x = x.reshape(dims)?;
Ok(x)
}
}
pub struct CausalResidualUnit {
// pad: usize,
block0: Snake1d,
block1: WNCausalConv1d,
block2: Snake1d,
block3: WNCausalConv1d,
}
impl CausalResidualUnit {
pub fn new(
vb: VarBuilder,
dim: usize,
dilation: usize,
kernel: usize,
groups: usize,
) -> Result<Self> {
let pad = ((kernel - 1) * dilation) / 2;
let block0 = Snake1d::new(vb.pp("block.0"), dim)?;
let block1 =
WNCausalConv1d::new(vb.pp("block.1"), dim, dim, kernel, dilation, pad, groups, 1)?;
let block2 = Snake1d::new(vb.pp("block.2"), dim)?;
let block3 = WNCausalConv1d::new(vb.pp("block.3"), dim, dim, 1, 1, 0, 1, 1)?;
Ok(Self {
// pad,
block0,
block1,
block2,
block3,
})
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
// let orig_dim = x.dims();
let last_dim_x = x.dim(D::Minus1)?;
let mut res_x = x.clone();
let y = self.block0.forward(x)?;
let y = self.block1.forward(&y)?;
let y = self.block2.forward(&y)?;
let y = self.block3.forward(&y)?;
// let dim = y.dims();
let last_dim_y = y.dim(D::Minus1)?;
let pad = (last_dim_x - last_dim_y) / 2;
if pad > 0 {
res_x = res_x.narrow(D::Minus1, pad, last_dim_y)?;
}
let x = y.add(&res_x)?;
Ok(x)
}
}
pub struct CausalEncoderBlock {
block0: CausalResidualUnit,
block1: CausalResidualUnit,
block2: CausalResidualUnit,
block3: Snake1d,
block4: WNCausalConv1d,
}
impl CausalEncoderBlock {
pub fn new(
vb: VarBuilder,
in_dim: Option<usize>,
out_dim: usize,
stride: usize,
groups: usize,
) -> Result<Self> {
let in_dim = match in_dim {
Some(d) => d,
None => out_dim / 2,
};
let block0 = CausalResidualUnit::new(vb.pp("block.0"), in_dim, 1, 7, groups)?;
let block1 = CausalResidualUnit::new(vb.pp("block.1"), in_dim, 3, 7, groups)?;
let block2 = CausalResidualUnit::new(vb.pp("block.2"), in_dim, 9, 7, groups)?;
let block3 = Snake1d::new(vb.pp("block.3"), in_dim)?;
let padding = (stride as f32 / 2.0).ceil() as usize;
let block4 = WNCausalConv1d::new(
vb.pp("block.4"),
in_dim,
out_dim,
2 * stride,
1,
padding,
1,
stride,
)?;
Ok(Self {
block0,
block1,
block2,
block3,
block4,
})
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.block0.forward(x)?;
let x = self.block1.forward(&x)?;
let x = self.block2.forward(&x)?;
let x = self.block3.forward(&x)?;
let x = self.block4.forward(&x)?;
Ok(x)
}
}
pub struct CausalEncoder {
block0: WNCausalConv1d,
blocks: Vec<CausalEncoderBlock>,
fc_mu: WNCausalConv1d,
fc_logvar: WNCausalConv1d,
}
impl CausalEncoder {
pub fn new(
vb: VarBuilder,
d_model: usize,
laten_dim: usize,
strides: Vec<usize>,
depthwise: bool,
) -> Result<Self> {
let mut d_model = d_model;
let mut groups;
let block0 = WNCausalConv1d::new(vb.pp("block.0"), 1, d_model, 7, 1, 3, 1, 1)?;
let vb_block = vb.pp("block");
let mut blocks = Vec::new();
for (i, stride) in strides.iter().enumerate() {
d_model *= 2;
groups = if depthwise { d_model / 2 } else { 1 };
let block_i =
CausalEncoderBlock::new(vb_block.pp(i + 1), None, d_model, *stride, groups)?;
blocks.push(block_i);
}
let fc_mu = WNCausalConv1d::new(vb.pp("fc_mu"), d_model, laten_dim, 3, 1, 1, 1, 1)?;
let fc_logvar = WNCausalConv1d::new(vb.pp("fc_logvar"), d_model, laten_dim, 3, 1, 1, 1, 1)?;
Ok(Self {
block0,
blocks,
fc_mu,
fc_logvar,
})
}
pub fn forward(&self, x: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let mut hidden_state = self.block0.forward(x)?;
for block_i in &self.blocks {
hidden_state = block_i.forward(&hidden_state)?;
}
let mu = self.fc_mu.forward(&hidden_state)?;
let logvar = self.fc_logvar.forward(&hidden_state)?;
Ok((hidden_state, mu, logvar))
}
}
pub struct NoiseBlock {
linear: WNCausalConv1d,
}
impl NoiseBlock {
pub fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
let linear = WNCausalConv1d::new(vb.pp("linear"), dim, dim, 1, 1, 0, 1, 1)?;
Ok(Self { linear })
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let (bs, _, t) = x.dims3()?;
let noise = Tensor::randn(0.0_f32, 1.0, (bs, 1, t), x.device())?.to_dtype(x.dtype())?;
let h = self.linear.forward(x)?;
let n = h.broadcast_mul(&noise)?;
let x = x.add(&n)?;
Ok(x)
}
}
pub struct CausalDecoderBlock {
block0: Snake1d,
block1: WNCausalConvTranspose1d,
block2: CausalResidualUnit,
block3: CausalResidualUnit,
block4: CausalResidualUnit,
}
impl CausalDecoderBlock {
pub fn new(
vb: VarBuilder,
input_dim: usize,
output_dim: usize,
stride: usize,
groups: usize,
) -> Result<Self> {
let block0 = Snake1d::new(vb.pp("block.0"), input_dim)?;
let padding = (stride as f32 / 2.0).ceil() as usize;
let block1 = WNCausalConvTranspose1d::new(
vb.pp("block.1"),
input_dim,
output_dim,
1,
2 * stride,
padding,
stride % 2,
1,
stride,
)?;
let block2 = CausalResidualUnit::new(vb.pp("block.2"), output_dim, 1, 7, groups)?;
let block3 = CausalResidualUnit::new(vb.pp("block.3"), output_dim, 3, 7, groups)?;
let block4 = CausalResidualUnit::new(vb.pp("block.4"), output_dim, 9, 7, groups)?;
Ok(Self {
block0,
block1,
block2,
block3,
block4,
})
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.block0.forward(x)?;
let x = self.block1.forward(&x)?;
let x = self.block2.forward(&x)?;
let x = self.block3.forward(&x)?;
let x = self.block4.forward(&x)?;
Ok(x)
}
}
pub struct CausalDecoder {
model0: WNCausalConv1d,
model1: WNCausalConv1d,
models: Vec<CausalDecoderBlock>,
model_minus_2: Snake1d,
model_minus_1: WNCausalConv1d,
}
impl CausalDecoder {
pub fn new(
vb: VarBuilder,
input_channel: usize,
channels: usize,
rates: Vec<usize>,
d_out: usize,
depthwise: bool,
) -> Result<Self> {
let model0 = WNCausalConv1d::new(
vb.pp("model.0"),
input_channel,
input_channel,
7,
1,
3,
input_channel,
1,
)?;
let model1 = WNCausalConv1d::new(vb.pp("model.1"), input_channel, channels, 1, 1, 0, 1, 1)?;
let vb_model = vb.pp("model");
let mut output_dim = channels;
let mut models = Vec::new();
for (i, stride) in rates.iter().enumerate() {
let input_dim = channels / 2_usize.pow(i as u32);
output_dim = channels / 2_usize.pow((i + 1) as u32);
let groups = if depthwise { output_dim } else { 1 };
let model_i = CausalDecoderBlock::new(
vb_model.pp(i + 2),
input_dim,
output_dim,
*stride,
groups,
)?;
models.push(model_i);
}
let idx = rates.len() + 2;
let model_minus_2 = Snake1d::new(vb_model.pp(idx), output_dim)?;
let model_minus_1 =
WNCausalConv1d::new(vb_model.pp(idx + 1), output_dim, d_out, 7, 1, 3, 1, 1)?;
Ok(Self {
model0,
model1,
models,
model_minus_2,
model_minus_1,
})
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.model0.forward(x)?;
let mut x = self.model1.forward(&x)?;
for model_i in &self.models {
x = model_i.forward(&x)?;
}
let x = self.model_minus_2.forward(&x)?;
let x = self.model_minus_1.forward(&x)?;
let x = x.tanh()?;
Ok(x)
}
}
pub struct AudioVAE {
// encoder_dim: usize,
// encoder_rates: Vec<usize>,
// decoder_dim: usize,
// decoder_rates: Vec<usize>,
pub latent_dim: usize,
hop_length: usize,
encoder: CausalEncoder,
decoder: CausalDecoder,
pub sample_rate: usize,
pub chunk_size: usize,
}
impl AudioVAE {
pub fn new(
vb: VarBuilder,
encoder_dim: usize,
encoder_rates: Vec<usize>,
laten_dim: Option<usize>,
decoder_dim: usize,
decoder_rates: Vec<usize>,
sample_rate: usize,
) -> Result<Self> {
let latent_dim = match laten_dim {
Some(d) => d,
None => encoder_dim * (2_usize.pow(encoder_rates.len() as u32)),
};
let hop_length = encoder_rates.iter().product();
let encoder = CausalEncoder::new(
vb.pp("encoder"),
encoder_dim,
latent_dim,
encoder_rates.clone(),
true,
)?;
let decoder = CausalDecoder::new(
vb.pp("decoder"),
latent_dim,
decoder_dim,
decoder_rates.clone(),
1,
true,
)?;
let chunk_size = hop_length;
Ok(Self {
// encoder_dim,
// encoder_rates,
// decoder_dim,
// decoder_rates,
latent_dim,
hop_length,
encoder,
decoder,
sample_rate,
chunk_size,
})
}
pub fn preprocess(&self, audio_data: &Tensor, sample_rate: Option<usize>) -> Result<Tensor> {
let sample_rate = match sample_rate {
Some(r) => r,
None => self.sample_rate,
};
assert_eq!(sample_rate, self.sample_rate);
let pad_to = self.hop_length;
let length = audio_data.dim(D::Minus1)?;
let right_pad = (length as f32 / pad_to as f32).ceil() as usize * pad_to - length;
let audio_data = audio_data.pad_with_zeros(D::Minus1, 0, right_pad)?;
Ok(audio_data)
}
pub fn decode(&self, z: &Tensor) -> Result<Tensor> {
let x = self.decoder.forward(z)?;
Ok(x)
}
pub fn encode(&self, audio_data: &Tensor, sample_rate: Option<usize>) -> Result<Tensor> {
let audio_data = match audio_data.rank() {
2 => audio_data.unsqueeze(1)?,
_ => audio_data.clone(),
};
let audio_data = self.preprocess(&audio_data, sample_rate)?;
let (_, mu, _) = self.encoder.forward(&audio_data)?;
Ok(mu)
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/tokenizer.rs | src/models/voxcpm/tokenizer.rs | use anyhow::{Ok, Result, anyhow};
use tokenizers::Tokenizer;
pub struct SingleChineseTokenizer {
tokenizer: Tokenizer,
multichar_tokens: Vec<String>,
}
impl SingleChineseTokenizer {
pub fn new(path: &str) -> Result<Self> {
let path = path.to_string();
assert!(
std::path::Path::new(&path).exists(),
"model path file not exists"
);
let tokenizer_file = path.clone() + "/tokenizer.json";
assert!(
std::path::Path::new(&tokenizer_file).exists(),
"tokenizer.json not exists in model path"
);
let tokenizer = Tokenizer::from_file(tokenizer_file)
.map_err(|e| anyhow!(format!("tokenizer from file error{e}")))?;
let mut multichar_tokens = Vec::new();
for (token, _) in tokenizer.get_vocab(false) {
let len = token.chars().count();
if len >= 2 {
let is_chinese = token.chars().all(|c| {
let c_ = c as u32;
(0x4E00..=0x9FFF).contains(&c_)
});
if is_chinese {
multichar_tokens.push(token);
}
}
}
Ok(Self {
tokenizer,
multichar_tokens,
})
}
pub fn encode(&self, text: String) -> Result<Vec<u32>> {
let encode = self
.tokenizer
.encode(text, false)
.map_err(|e| anyhow!(format!("tokenizer encode error: {e}")))?;
let tokens = encode.get_tokens();
// println!("tokens: {:?}", tokens);
let mut split_character = Vec::new();
for token in tokens {
let clean_token = token.replace("▁", "");
if self.multichar_tokens.contains(&clean_token) {
let chars: Vec<String> = clean_token.chars().map(|c| c.to_string()).collect();
split_character.extend(chars);
} else {
split_character.push(token.clone());
}
}
// println!("split_character: {:?}", split_character);
let ids: Vec<u32> = split_character
.iter()
.filter_map(|c| self.tokenizer.token_to_id(c))
.collect();
Ok(ids)
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/voxcpm/generate.rs | src/models/voxcpm/generate.rs | use std::collections::HashMap;
use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Ok, Result};
use base64::{Engine, prelude::BASE64_STANDARD};
use candle_core::{DType, Device, Tensor, pickle::read_all_with_key};
use candle_nn::VarBuilder;
use rocket::futures::{Stream, stream};
use crate::{
models::{
GenerateModel,
voxcpm::{
audio_vae::AudioVAE,
config::{AudioVaeConfig, VoxCPMConfig},
model::VoxCPMModel,
tokenizer::SingleChineseTokenizer,
},
},
utils::{
audio_utils::{extract_audio_url, get_audio_wav_u8},
build_audio_completion_response, extract_metadata_value, extract_user_text,
find_type_files, get_device, get_dtype,
},
};
pub struct VoxCPMGenerate {
voxcpm: VoxCPMModel,
prompt_cache: Option<HashMap<String, Tensor>>,
sample_rate: usize,
model_name: String,
}
impl VoxCPMGenerate {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let device = &get_device(device);
let config_path = path.to_string() + "/config.json";
let config: VoxCPMConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
let model_list = find_type_files(path, "pth")?;
// println!(" pth model_list: {:?}", model_list);
let mut dict_to_hashmap = HashMap::new();
let mut vae_dtype = candle_core::DType::F32;
for m in model_list {
let dict = read_all_with_key(m, Some("state_dict"))?;
vae_dtype = dict[0].1.dtype();
for (k, v) in dict {
// println!("key: {}, tensor shape: {:?}", k, v);
dict_to_hashmap.insert(k, v);
}
}
let vb_vae = VarBuilder::from_tensors(dict_to_hashmap, vae_dtype, device);
let audio_config = match config.audio_vae_config.clone() {
Some(config) => config,
None => AudioVaeConfig {
encoder_dim: 128,
encoder_rates: vec![2, 5, 8, 8],
latent_dim: 64,
decoder_dim: 1536,
decoder_rates: vec![8, 8, 5, 2],
sample_rate: 16000,
},
};
let model_name = if audio_config.sample_rate == 16000 {
"VoxCPM".to_string()
} else {
"VoxCPM1.5".to_string()
};
let audio_vae = AudioVAE::new(
vb_vae,
audio_config.encoder_dim,
audio_config.encoder_rates.clone(),
Some(audio_config.latent_dim),
audio_config.decoder_dim,
audio_config.decoder_rates.clone(),
audio_config.sample_rate,
)?;
let cfg_dtype = config.dtype.as_str();
let m_dtype = get_dtype(dtype, cfg_dtype);
let model_list = find_type_files(path, "bin")?;
// voxcpm0.5B模型文件是.bin类型, voxcpm1.5模型文件是.safetensors类型
let vb_voxcpm = if model_list.is_empty() {
let model_list = find_type_files(path, "safetensors")?;
unsafe { VarBuilder::from_mmaped_safetensors(&model_list, m_dtype, device)? }
} else {
dict_to_hashmap = HashMap::new();
let cfg_dtype = config.dtype.as_str();
let m_dtype = get_dtype(dtype, cfg_dtype);
for m in model_list {
let dict = read_all_with_key(m, Some("state_dict"))?;
for (k, v) in dict {
// println!("key: {}, tensor shape: {:?}", k, v);
dict_to_hashmap.insert(k, v);
}
}
VarBuilder::from_tensors(dict_to_hashmap, m_dtype, device)
};
let tokenizer = SingleChineseTokenizer::new(path)?;
let voxcpm = VoxCPMModel::new(vb_voxcpm, config, tokenizer, audio_vae)?;
Ok(Self {
voxcpm,
prompt_cache: None,
sample_rate: audio_config.sample_rate,
model_name,
})
}
pub fn build_prompt_cache(
&mut self,
prompt_text: String,
prompt_wav_path: String,
) -> Result<()> {
let cache = self
.voxcpm
.build_prompt_cache(prompt_text, prompt_wav_path)?;
self.prompt_cache = Some(cache);
Ok(())
}
pub fn generate_use_prompt_cache(
&mut self,
target_text: String,
min_len: usize,
max_len: usize,
inference_timesteps: usize,
cfg_value: f64,
retry_badcase: bool,
retry_badcase_ratio_threshold: f64,
) -> Result<Tensor> {
let audio = match &self.prompt_cache {
Some(cache) => {
let prompt_cache = cache.clone();
self.voxcpm.generate_with_prompt_cache(
target_text,
prompt_cache,
min_len,
max_len,
inference_timesteps,
cfg_value,
retry_badcase,
retry_badcase_ratio_threshold,
)?
}
None => self.generate_simple(target_text)?,
};
Ok(audio)
}
pub fn generate_with_prompt_simple(
&mut self,
target_text: String,
prompt_text: Option<String>,
prompt_wav_path: Option<String>,
) -> Result<Tensor> {
let audio = self.inference(
target_text,
prompt_text,
prompt_wav_path,
2,
1000,
10,
2.0,
// false,
6.0,
)?;
Ok(audio)
}
pub fn generate_simple(&mut self, target_text: String) -> Result<Tensor> {
// let audio = self.generate(target_text, None, None, 2, 100, 10, 2.0, false, 6.0)?;
let audio = self.inference(target_text, None, None, 2, 100, 10, 2.0, 6.0)?;
Ok(audio)
}
pub fn inference(
&mut self,
target_text: String,
prompt_text: Option<String>,
prompt_wav_path: Option<String>,
min_len: usize,
max_len: usize,
inference_timesteps: usize,
cfg_value: f64,
// retry_badcase: bool,
retry_badcase_ratio_threshold: f64,
) -> Result<Tensor> {
let audio = self.voxcpm.generate(
target_text,
prompt_text,
prompt_wav_path,
min_len,
max_len,
inference_timesteps,
cfg_value,
// retry_badcase,
retry_badcase_ratio_threshold,
)?;
Ok(audio)
}
}
impl GenerateModel for VoxCPMGenerate {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let prompt_text = extract_metadata_value::<String>(&mes.metadata, "prompt_text");
let min_len = extract_metadata_value::<usize>(&mes.metadata, "min_len").unwrap_or(2);
let max_len = extract_metadata_value::<usize>(&mes.metadata, "max_len").unwrap_or(4096);
let inference_timesteps =
extract_metadata_value::<usize>(&mes.metadata, "inference_timesteps").unwrap_or(10);
let cfg_value = extract_metadata_value::<f64>(&mes.metadata, "cfg_value").unwrap_or(2.0);
let retry_badcase_ratio_threshold =
extract_metadata_value::<f64>(&mes.metadata, "retry_badcase_ratio_threshold")
.unwrap_or(6.0);
let target_text = extract_user_text(&mes)?;
let prompt_wav = extract_audio_url(&mes)?;
let prompt_wav_path = if !prompt_wav.is_empty() {
Some(prompt_wav[0].clone())
} else {
None
};
let audio = self.voxcpm.generate(
target_text,
prompt_text,
prompt_wav_path,
min_len,
max_len,
inference_timesteps,
cfg_value,
retry_badcase_ratio_threshold,
)?;
let wav_u8 = get_audio_wav_u8(&audio, self.sample_rate as u32)?;
let base64_audio = BASE64_STANDARD.encode(wav_u8);
let response = build_audio_completion_response(&base64_audio, &self.model_name);
Ok(response)
}
#[allow(unused_variables)]
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let error_stream = stream::once(async {
Err(anyhow::anyhow!(format!(
"{} model not support stream",
self.model_name
))) as Result<ChatCompletionChunkResponse, anyhow::Error>
});
Ok(Box::new(Box::pin(error_stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/rmbg2_0/model.rs | src/models/rmbg2_0/model.rs | use anyhow::{Result, anyhow};
use candle_core::{D, DType, Device, IndexOp, Shape, Tensor};
use candle_nn::{
Activation, BatchNorm, Conv2d, Init, LayerNorm, Linear, Module, ModuleT, VarBuilder, linear,
linear_no_bias, ops::sigmoid,
};
use crate::{
models::common::{
TwoLinearMLP, deform_conv2d_kernel, get_batch_norm, get_conv2d, get_layer_norm,
},
utils::tensor_utils::{
get_equal_mask, index_select_2d, interpolate_bilinear, split_tensor_with_size,
},
};
struct PatchEmbed {
proj: Conv2d,
norm: Option<LayerNorm>,
patch_size: usize,
embed_dim: usize,
}
impl PatchEmbed {
pub fn new(
vb: VarBuilder,
in_chans: usize,
embed_dim: usize,
patch_size: usize,
patch_norm: bool,
) -> Result<Self> {
let proj = get_conv2d(
vb.pp("proj"),
in_chans,
embed_dim,
patch_size,
0,
patch_size,
1,
1,
true,
)?;
let norm = if patch_norm {
Some(get_layer_norm(vb.pp("norm"), 1e-5, embed_dim)?)
} else {
None
};
Ok(Self {
patch_size,
proj,
norm,
embed_dim,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_, _, h, w) = xs.dims4()?;
let mut xs = xs.clone();
if w % self.patch_size != 0 {
xs = xs.pad_with_zeros(3, 0, self.patch_size - w % self.patch_size)?;
}
if h % self.patch_size != 0 {
xs = xs.pad_with_zeros(2, 0, self.patch_size - h % self.patch_size)?;
}
xs = self.proj.forward(&xs)?;
if self.norm.is_some() {
let (_, _, ph, pw) = xs.dims4()?;
xs = xs.flatten_from(2)?.transpose(1, 2)?;
xs = self.norm.as_ref().unwrap().forward(&xs)?;
xs = xs.transpose(1, 2)?.reshape(((), self.embed_dim, ph, pw))?;
}
Ok(xs)
}
}
pub struct WindowAttention {
num_heads: usize,
relative_position_bias: Tensor,
qkv: Linear,
proj: Linear,
scaling: f64,
}
impl WindowAttention {
pub fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
window_size: (usize, usize),
) -> Result<Self> {
let head_dim = dim / num_heads;
let scaling = 1.0 / (head_dim as f64).sqrt();
let qkv = if qkv_bias {
linear(dim, dim * 3, vb.pp("qkv"))?
} else {
linear_no_bias(dim, dim * 3, vb.pp("qkv"))?
};
let proj = linear(dim, dim, vb.pp("proj"))?;
let relative_position_bias_table = vb.get_with_hints(
((2 * window_size.0 - 1) * (2 * window_size.1 - 1), num_heads),
"relative_position_bias_table",
Init::Const(0.),
)?; //2*Wh-1 * 2*Ww-1, nH
let coords_h = Tensor::arange(0f32, window_size.0 as f32, vb.device())?
.unsqueeze(1)?
.broadcast_as(window_size)?;
let coords_w = Tensor::arange(0f32, window_size.1 as f32, vb.device())?
.unsqueeze(0)?
.broadcast_as(window_size)?;
let coords = Tensor::stack(&[coords_h, coords_w], 0)?.flatten_from(1)?; // (2, wh, ww)
let coords1 = coords.unsqueeze(2)?;
let coords2 = coords.unsqueeze(1)?;
let relative_coords = coords1
.broadcast_sub(&coords2)?
.permute((1, 2, 0))?
.contiguous()?; // (wh*ww, wh*ww, 2)
let relative_coords_0 = relative_coords
.i((.., .., 0))?
.affine(1.0, window_size.0 as f64 - 1.0)?;
let relative_coords_1 = relative_coords
.i((.., .., 1))?
.affine(1.0, window_size.1 as f64 - 1.0)?;
let relative_coords_0 = relative_coords_0.affine(2.0 * window_size.1 as f64 - 1.0, 0.0)?;
let relative_position_index = relative_coords_0
.add(&relative_coords_1)?
.to_dtype(candle_core::DType::U32)?;
let relative_position_bias =
index_select_2d(&relative_position_bias_table, &relative_position_index)?;
Ok(Self {
num_heads,
relative_position_bias,
qkv,
proj,
scaling,
})
}
pub fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> {
let (b, seq_len, _) = xs.dims3()?;
// (3, B, n_head, h*w, head_dim)
let qkv = self
.qkv
.forward(xs)?
.reshape((b, seq_len, 3, self.num_heads, ()))?
.permute((2, 0, 3, 1, 4))?
.contiguous()?;
let query_states = qkv.i(0)?.contiguous()?;
let key_states = qkv.i(1)?.contiguous()?;
let value_states = qkv.i(2)?.contiguous()?;
let attn_bias = self
.relative_position_bias
.permute((2, 0, 1))?
.contiguous()?
.unsqueeze(0)?;
let query_states = (query_states * self.scaling)?;
let attn_weights = query_states.matmul(&key_states.transpose(D::Minus2, D::Minus1)?)?;
let attn_weights = attn_weights.broadcast_add(&attn_bias)?;
let attn_weights = match attn_mask {
None => attn_weights,
Some(mask) => {
let nw: usize = mask.dim(0)?;
let attn_weights = attn_weights
.reshape((b / nw, nw, self.num_heads, seq_len, seq_len))?
.broadcast_add(
&mask
.unsqueeze(1)?
.unsqueeze(0)?
.to_dtype(attn_weights.dtype())?,
)?;
attn_weights.reshape(((), self.num_heads, seq_len, seq_len))?
}
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&value_states)?;
//(b, n_head, seq_len, dim) -> (b, seq_len, n_head, dim)
let xs = attn_output.transpose(1, 2)?.contiguous()?;
// (b, h*w, n_head, dim)
let xs = xs.reshape((b, seq_len, ()))?;
let xs = self.proj.forward(&xs)?;
Ok(xs)
}
}
fn window_partition(x: &Tensor, window_size: usize) -> Result<Tensor> {
let (b, h, w, c) = x.dims4()?;
let x = x.reshape((
b,
h / window_size,
window_size,
w / window_size,
window_size,
c,
))?;
let windows =
x.permute((0, 1, 3, 2, 4, 5))?
.contiguous()?
.reshape(((), window_size, window_size, c))?;
Ok(windows)
}
fn window_reverse(windows: &Tensor, window_size: usize, pad_hw: (usize, usize)) -> Result<Tensor> {
let (hp, wp) = pad_hw;
let b = windows.dim(0)? / (hp * wp / window_size / window_size);
let last_dim = windows.dim(D::Minus1)?;
let x = windows.reshape(&[
b,
hp / window_size,
wp / window_size,
window_size,
window_size,
last_dim,
])?;
let x = x
.permute((0, 1, 3, 2, 4, 5))?
.contiguous()?
.reshape((b, hp, wp, ()))?;
Ok(x)
}
struct SwinTransformerBlock {
norm1: LayerNorm,
attn: WindowAttention,
norm2: LayerNorm,
mlp: TwoLinearMLP,
window_size: usize,
shift_size: usize,
}
impl SwinTransformerBlock {
pub fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
mlp_ratio: f32,
qkv_bias: bool,
act: Activation,
window_size: usize,
shift_size: usize,
) -> Result<Self> {
let norm1 = get_layer_norm(vb.pp("norm1"), 1e-5, dim)?;
let attn = WindowAttention::new(
vb.pp("attn"),
dim,
num_heads,
qkv_bias,
(window_size, window_size),
)?;
let norm2 = get_layer_norm(vb.pp("norm2"), 1e-5, dim)?;
let mlp_dim = (dim as f32 * mlp_ratio) as usize;
let mlp = TwoLinearMLP::new(vb.pp("mlp"), dim, mlp_dim, act, true, "fc1", "fc2")?;
Ok(Self {
norm1,
attn,
norm2,
mlp,
window_size,
shift_size,
})
}
pub fn forward(
&self,
xs: &Tensor,
mask_matrix: Option<&Tensor>,
h: usize,
w: usize,
) -> Result<Tensor> {
let (b, seq_len, c) = xs.dims3()?;
assert_eq!(
seq_len,
h * w,
"swin transformer block sq_len not equal to h*w"
);
let shortcut = xs.clone();
let xs = self.norm1.forward(xs)?;
let xs = xs.reshape((b, h, w, c))?;
let pad_h = (self.window_size - h % self.window_size) % self.window_size;
let pad_w = (self.window_size - w % self.window_size) % self.window_size;
let xs = xs.pad_with_zeros(1, 0, pad_h)?;
let xs = xs.pad_with_zeros(2, 0, pad_w)?;
let (_, hp, wp, _) = xs.dims4()?;
let (shifted_x, attn_mask) = if self.shift_size > 0 {
(
xs.roll(-(self.shift_size as i32), 1)?
.roll(-(self.shift_size as i32), 2)?,
mask_matrix,
)
} else {
(xs, None)
};
let xs = window_partition(&shifted_x, self.window_size)?;
let xs = xs.reshape(((), self.window_size * self.window_size, c))?;
let xs = self.attn.forward(&xs, attn_mask)?;
let xs = window_reverse(&xs, self.window_size, (hp, wp))?;
let mut xs = if self.shift_size > 0 {
xs.roll(self.shift_size as i32, 1)?
.roll(self.shift_size as i32, 2)?
} else {
xs
};
if pad_h > 0 || pad_w > 0 {
xs = xs.i((.., 0..h, 0..w, ..))?.contiguous()?;
}
let xs = xs.reshape((b, h * w, c))?;
let x = shortcut.add(&xs)?;
let x = x.add(&self.mlp.forward(&self.norm2.forward(&x)?)?)?;
Ok(x)
}
}
struct PatchMerging {
reduction: Linear,
norm: LayerNorm,
}
impl PatchMerging {
pub fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
let reduction = linear_no_bias(4 * dim, 2 * dim, vb.pp("reduction"))?;
let norm = get_layer_norm(vb.pp("norm"), 1e-5, 4 * dim)?;
Ok(Self { reduction, norm })
}
pub fn forward(&self, xs: &Tensor, h: usize, w: usize) -> Result<Tensor> {
let (b, l, c) = xs.dims3()?;
assert_eq!(l, h * w, "input feature has wrong size");
let mut xs = xs.reshape((b, h, w, c))?;
let pad_input = (h % 2 == 1) || (w % 2 == 1);
if pad_input {
xs = xs
.pad_with_zeros(2, 0, w % 2)?
.pad_with_zeros(1, 0, h % 2)?;
}
let shape = Shape::from_dims(&[b, h / 2, 2, w / 2, 2, c]);
let xs = xs.reshape(shape)?;
let x0 = xs.i((.., .., 0, .., 0, ..))?;
let x1 = xs.i((.., .., 1, .., 0, ..))?;
let x2 = xs.i((.., .., 0, .., 1, ..))?;
let x3 = xs.i((.., .., 1, .., 1, ..))?;
let xs = Tensor::cat(&[x0, x1, x2, x3], D::Minus1)?;
let xs = xs.reshape((b, (), 4 * c))?;
let xs = self.norm.forward(&xs)?;
let xs = self.reduction.forward(&xs)?;
Ok(xs)
}
}
struct BasicLayer {
window_size: usize,
shift_size: usize,
blocks: Vec<SwinTransformerBlock>,
downsample: Option<PatchMerging>,
}
impl BasicLayer {
pub fn new(
vb: VarBuilder,
dim: usize,
depth: usize,
num_heads: usize,
window_size: usize,
mlp_ratio: f32,
qkv_bias: bool,
downsample: bool,
) -> Result<Self> {
let shift_size = window_size / 2;
let mut blocks = vec![];
let vb_blocks = vb.pp("blocks");
for i in 0..depth {
let block_shift_size = if i % 2 == 0 { 0usize } else { shift_size };
let block = SwinTransformerBlock::new(
vb_blocks.pp(i),
dim,
num_heads,
mlp_ratio,
qkv_bias,
Activation::Gelu,
window_size,
block_shift_size,
)?;
blocks.push(block);
}
let downsample = if downsample {
Some(PatchMerging::new(vb.pp("downsample"), dim)?)
} else {
None
};
Ok(Self {
window_size,
shift_size,
blocks,
downsample,
})
}
pub fn forward(
&self,
xs: &Tensor,
h: usize,
w: usize,
) -> Result<(Tensor, usize, usize, Tensor, usize, usize)> {
let hp = (h as f32 / self.window_size as f32).ceil() as usize * self.window_size;
let wp = (w as f32 / self.window_size as f32).ceil() as usize * self.window_size;
let mut img_mask = Tensor::zeros((1, hp, wp, 1), xs.dtype(), xs.device())?;
let h_slices = [
(0usize, hp - self.window_size),
(hp - self.window_size, hp - self.shift_size),
(hp - self.shift_size, hp),
];
let w_slices = [
(0usize, wp - self.window_size),
(wp - self.window_size, wp - self.shift_size),
(wp - self.shift_size, wp),
];
let mut cnt = 0f64;
for (h_start, h_end) in h_slices {
for (w_start, w_end) in w_slices {
let mask_value = Tensor::zeros(
(1, h_end - h_start, w_end - w_start, 1),
xs.dtype(),
xs.device(),
)?
.affine(1.0, cnt)?;
img_mask = img_mask.slice_assign(
&[(0..1), (h_start..h_end), (w_start..w_end), (0..1)],
&mask_value,
)?;
cnt += 1.0;
}
}
let mask_windows = window_partition(&img_mask, self.window_size)?;
let mask_windows = mask_windows.reshape(((), self.window_size * self.window_size))?;
let attn_mask = mask_windows
.unsqueeze(1)?
.broadcast_sub(&mask_windows.unsqueeze(2)?)?;
let equal_zero_mask = get_equal_mask(&attn_mask, 0)?;
let attn_mask = equal_zero_mask.where_cond(
&Tensor::new(0f32, xs.device())?.broadcast_as(equal_zero_mask.shape())?,
&Tensor::new(-100f32, xs.device())?.broadcast_as(equal_zero_mask.shape())?,
)?;
let mut xs = xs.clone();
for block in &self.blocks {
xs = block.forward(&xs, Some(&attn_mask), h, w)?;
}
let (xs_down, wh, ww) = match self.downsample.as_ref() {
Some(down) => {
let xs_down = down.forward(&xs, h, w)?;
// let wh = (h + 1) / 2;
// let ww = (w + 1) / 2;
let wh = h.div_ceil(2);
let ww = w.div_ceil(2);
(xs_down, wh, ww)
}
None => (xs.clone(), h, w),
};
Ok((xs, h, w, xs_down, wh, ww))
}
}
pub struct SwinTransformer {
patch_embed: PatchEmbed,
num_layers: usize,
// pos_drop: Dropout,
layers: Vec<BasicLayer>,
norms: Vec<LayerNorm>,
out_indices: Vec<usize>,
num_features: Vec<usize>,
}
impl SwinTransformer {
pub fn new(
vb: VarBuilder,
patch_size: usize,
in_channels: usize,
embed_dim: usize,
depths: Vec<usize>,
num_heads: Vec<usize>,
window_size: usize,
mlp_ratio: f32,
qkv_bias: bool,
patch_norm: bool,
out_indices: Vec<usize>,
) -> Result<Self> {
let patch_embed = PatchEmbed::new(
vb.pp("patch_embed"),
in_channels,
embed_dim,
patch_size,
patch_norm,
)?;
let num_layers = depths.len();
let mut layers = vec![];
let vb_layers = vb.pp("layers");
let mut num_features = vec![];
for i in 0..num_layers {
let downsample = i < num_layers - 1;
let dim_i = embed_dim * 2usize.pow(i as u32);
num_features.push(dim_i);
let layer_i = BasicLayer::new(
vb_layers.pp(i),
dim_i,
depths[i],
num_heads[i],
window_size,
mlp_ratio,
qkv_bias,
downsample,
)?;
layers.push(layer_i);
}
let mut norms = vec![];
for i in out_indices.clone() {
let layer_i = get_layer_norm(vb.pp(format!("norm{i}")), 1e-5, num_features[i])?;
norms.push(layer_i);
}
Ok(Self {
num_layers,
patch_embed,
layers,
norms,
out_indices,
num_features,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Vec<Tensor>> {
let xs = self.patch_embed.forward(xs)?;
let (_, _, mut wh, mut ww) = xs.dims4()?;
let mut outs = vec![];
let mut xs = xs.flatten_from(2)?.transpose(1, 2)?;
let mut norm_idx = 0;
for i in 0..self.num_layers {
let layer = &self.layers[i];
let (x_out, h, w, xs_, wh_, ww_) = layer.forward(&xs, wh, ww)?;
xs = xs_.clone();
wh = wh_;
ww = ww_;
if self.out_indices.contains(&i) {
let norm_layer = &self.norms[norm_idx];
norm_idx += 1;
let x_out = norm_layer.forward(&x_out)?;
let out = x_out
.reshape(((), h, w, self.num_features[i]))?
.permute((0, 3, 1, 2))?
.contiguous()?;
outs.push(out);
}
}
Ok(outs)
}
}
#[allow(unused)]
struct DeformableConv2d {
offset_conv: Conv2d,
modulator_conv: Conv2d,
regular_conv: Conv2d,
stride: usize,
padding: usize,
ks: usize,
}
#[allow(unused)]
impl DeformableConv2d {
pub fn new(
vb: VarBuilder,
in_c: usize,
out_c: usize,
kernel_size: usize,
stride: usize,
padding: usize,
bias: bool,
) -> Result<Self> {
let offset_conv = get_conv2d(
vb.pp("offset_conv"),
in_c,
2 * kernel_size * kernel_size,
kernel_size,
padding,
stride,
1,
1,
true,
)?;
let modulator_conv = get_conv2d(
vb.pp("modulator_conv"),
in_c,
kernel_size * kernel_size,
kernel_size,
padding,
stride,
1,
1,
true,
)?;
let regular_conv = get_conv2d(
vb.pp("regular_conv"),
in_c,
out_c,
kernel_size,
0,
kernel_size,
1,
1,
bias,
)?;
Ok(Self {
offset_conv,
modulator_conv,
regular_conv,
stride,
padding,
ks: kernel_size,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.forward_use_kernel(xs)
// if self.ks > 1 {
// self.forward_use_kernel(xs)
// } else {
// self.forward_use_tensor(xs)
// }
}
pub fn forward_use_kernel(&self, xs: &Tensor) -> Result<Tensor> {
let offset = self.offset_conv.forward(xs)?; // (b, 2*k*k, out_h, out_w)
let modulator = sigmoid(&self.modulator_conv.forward(xs)?)?
.affine(2.0, 0.0)?
.contiguous()?;
let out = deform_conv2d_kernel(
xs,
self.regular_conv.weight(),
self.regular_conv.bias(),
&offset,
Some(&modulator),
self.stride,
self.padding,
)?;
Ok(out)
}
pub fn forward_use_tensor(&self, xs: &Tensor) -> Result<Tensor> {
let offset = self.offset_conv.forward(xs)?; // (b, 2*k*k, out_h, out_w)
let modulator = sigmoid(&self.modulator_conv.forward(xs)?)?
.affine(2.0, 0.0)?
.contiguous()?;
let n = offset.dim(1)? / 2;
let xs = if self.padding > 0 {
xs.pad_with_zeros(2, self.padding, self.padding)?
.pad_with_zeros(3, self.padding, self.padding)?
} else {
xs.clone()
};
let offset = if self.ks > 3 {
offset.to_device(&Device::Cpu)?
} else {
offset
};
let p = self.get_p(&offset)?;
// drop(offset);
// (b, h, w, 2n)
let p = p.permute((0, 2, 3, 1))?.contiguous()?;
let q_lt = p.floor()?;
let q_rb = (&q_lt + 1.0)?;
let (_, _, in_h, in_w) = xs.dims4()?;
let in_h = in_h as f64;
let in_w = in_w as f64;
// 分开处理x和y坐标
let p_x = p.narrow(3, 0, n)?.clamp(0.0, in_h - 1.0)?;
let p_y = p.narrow(3, n, n)?.clamp(0.0, in_w - 1.0)?;
// drop(p);
let q_lt_x = q_lt.narrow(3, 0, n)?.clamp(0.0, in_h - 1.0)?;
let q_lt_y = q_lt.narrow(3, n, n)?.clamp(0.0, in_w - 1.0)?;
let q_rb_x = q_rb.narrow(3, 0, n)?.clamp(0.0, in_h - 1.0)?;
let q_rb_y = q_rb.narrow(3, n, n)?.clamp(0.0, in_w - 1.0)?;
// drop(q_lt);
// drop(q_rb);
// 转换为整数索引
let q_lt_x_idx = q_lt_x.to_dtype(DType::U32)?;
let q_lt_y_idx = q_lt_y.to_dtype(DType::U32)?;
let q_rb_x_idx = q_rb_x.to_dtype(DType::U32)?;
let q_rb_y_idx = q_rb_y.to_dtype(DType::U32)?;
// 计算双线性权重
let p_sub_lt_x = (&p_x - &q_lt_x)?;
let one_sub_lt_x = (1.0 - &p_sub_lt_x)?;
let p_sub_lt_y = (&p_y - &q_lt_y)?;
let one_sub_lt_y = (1.0 - &p_sub_lt_y)?;
// drop(q_lt_x);
// drop(q_lt_y);
// drop(q_rb_x);
// drop(q_rb_y);
let g_lt = (&one_sub_lt_x * &one_sub_lt_y)?;
let g_rb = (&p_sub_lt_x * &p_sub_lt_y)?;
let g_lb = (&one_sub_lt_x * &p_sub_lt_y)?;
let g_rt = (&p_sub_lt_x * &one_sub_lt_y)?;
// drop(p_sub_lt_x);
// drop(one_sub_lt_x);
// drop(p_sub_lt_y);
// drop(one_sub_lt_y);
let xs = if self.ks > 3 {
xs.to_device(&Device::Cpu)?
} else {
xs
};
// 采样四个角点的特征
let x_q_lt = self.get_x_q(&xs, &q_lt_x_idx, &q_lt_y_idx)?;
let x_q_rb = self.get_x_q(&xs, &q_rb_x_idx, &q_rb_y_idx)?;
let x_q_lb = self.get_x_q(&xs, &q_lt_x_idx, &q_rb_y_idx)?;
let x_q_rt = self.get_x_q(&xs, &q_rb_x_idx, &q_lt_y_idx)?;
// drop(q_lt_x_idx);
// drop(q_lt_y_idx);
// drop(q_rb_x_idx);
// drop(q_rb_y_idx);
// 双线性插值
let x_offset = g_lt.unsqueeze(1)?.broadcast_mul(&x_q_lt)?;
// drop(g_lt);
// drop(x_q_lt);
let x_offset = x_offset.add(&g_rb.unsqueeze(1)?.broadcast_mul(&x_q_rb)?)?;
// drop(g_rb);
// drop(x_q_rb);
let x_offset = x_offset.add(&g_lb.unsqueeze(1)?.broadcast_mul(&x_q_lb)?)?;
// drop(g_lb);
// drop(x_q_lb);
let x_offset = x_offset.add(&g_rt.unsqueeze(1)?.broadcast_mul(&x_q_rt)?)?;
// drop(g_rt);
// drop(x_q_rt);
// (bs, n, h, w) -> (bs, h, w, n) -> (bs, 1, h, w, n)
let m = modulator.permute((0, 2, 3, 1))?.unsqueeze(1)?;
let x_offset = x_offset.to_device(m.device())?.broadcast_mul(&m)?;
let x_offset = self.reshape_x_offset(&x_offset, self.ks)?;
let xs = self.regular_conv.forward(&x_offset)?;
Ok(xs)
}
fn reshape_x_offset(&self, xs: &Tensor, ks: usize) -> Result<Tensor> {
let (b, c, h, w, _) = xs.dims5()?;
let xs = xs.reshape((b, c, h, w, ks, ks))?;
let xs = xs.permute((0, 1, 2, 4, 3, 5))?;
let xs = xs.reshape((b, c, h * ks, w * ks))?;
let x_offset = xs.contiguous()?;
Ok(x_offset)
}
fn get_x_q(&self, xs: &Tensor, q_x: &Tensor, q_y: &Tensor) -> Result<Tensor> {
let (b, h, w, n) = q_x.dims4()?;
let padded_w = xs.dim(3)?;
let c = xs.dim(1)?;
// 展平输入: (b, c, H, W) -> (b, c, H*W)
let xs_flat = xs.flatten_from(2)?;
// 计算索引
let index = q_x.affine(padded_w as f64, 0.0)?.add(q_y)?; // (b, h, w, n)
// 扩展维度以匹配通道数
let index = index
.unsqueeze(1)?
.expand((b, c, h, w, n))?
.flatten_from(2)?; // (b, c, h*w*n)
// 收集特征
let xs = xs_flat.gather(&index, 2)?.reshape((b, c, h, w, n))?;
Ok(xs)
}
fn get_p_n(&self, n: usize, dtype: DType, device: &Device) -> Result<Tensor> {
let ks = self.ks as f32;
let range = Tensor::arange_step(-(ks - 1.0) / 2.0, (ks - 1.0) / 2.0 + 1.0, 1.0, device)?;
// 假设 ks=3
// [(-1, -1), (-1, 0), (-1, 1)
// (0, -1), (0, 0), (0, 1)
// (1, -1), (1, 0), (1, 1)]
// range: [-1, 0, 1]
// unsqueeze(1) -> [[-1], [0], [1]]
// broadcase_as(3, 3) -> [[-1, -1, -1], [0, 0, 0], [1, 1, 1]]
// flatten_all -> [-1, -1, -1, 0, 0, 0, 1, 1, 1]
let p_n_x = range
.unsqueeze(1)?
.broadcast_as((self.ks, self.ks))?
.flatten_all()?;
// range: [-1, 0, 1]
// unsqueeze(0) -> [[-1, 0, 1]]
// broadcase_as(3, 3) -> [[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]
// flatten_all -> [-1, 0, 1, -1, 0, 1, -1, 0, 1]
let p_n_y = range
.unsqueeze(0)?
.broadcast_as((self.ks, self.ks))?
.flatten_all()?;
let p = Tensor::cat(&[p_n_x, p_n_y], 0)?
.reshape((1, 2 * n, 1, 1))?
.to_dtype(dtype)?
.contiguous()?;
Ok(p)
}
fn get_p_0(
&self,
h: usize,
w: usize,
n: usize,
dtype: DType,
device: &Device,
) -> Result<Tensor> {
// 假设 in featuremap h=w=5, padding=1, hp=wp=7
// out featuremap h=w=5,
// padding 后的 in featuremap
// [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6)
// (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6)
// (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6)
// (3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6)
// (4, 0), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6)
// (5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6)
// (6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6)
let start = self.padding as f32;
// let start = 0.0f32;
let p_0_x = Tensor::arange_step(
start,
start + h as f32 * self.stride as f32,
self.stride as f32,
device,
)?
.unsqueeze(1)?
.broadcast_as((h, w))?
.reshape((1, 1, h, w))?
.repeat((1, n, 1, 1))?;
let p_0_y = Tensor::arange_step(
start,
start + w as f32 * self.stride as f32,
self.stride as f32,
device,
)?
.unsqueeze(0)?
.broadcast_as((h, w))?
.reshape((1, 1, h, w))?
.repeat((1, n, 1, 1))?;
let p_0 = Tensor::cat(&[p_0_x, p_0_y], 1)?
.to_dtype(dtype)?
.contiguous()?;
Ok(p_0)
}
fn get_p(&self, offset: &Tensor) -> Result<Tensor> {
let (_, n, h, w) = offset.dims4()?;
let n = n / 2;
// (1, 2n, 1, 1)
let p_n = self.get_p_n(n, offset.dtype(), offset.device())?;
// (1, 2n, h, w)
let p_0 = self.get_p_0(h, w, n, offset.dtype(), offset.device())?;
let p = p_0
.broadcast_add(&p_n)?
.broadcast_add(offset)?
.contiguous()?;
Ok(p)
}
}
struct _ASPPModuleDeformable {
atrous_conv: DeformableConv2d,
bn: BatchNorm,
}
impl _ASPPModuleDeformable {
pub fn new(
vb: VarBuilder,
in_c: usize,
out_c: usize,
kernel_size: usize,
padding: usize,
) -> Result<Self> {
let atrous_conv = DeformableConv2d::new(
vb.pp("atrous_conv"),
in_c,
out_c,
kernel_size,
1,
padding,
false,
)?;
let bn = get_batch_norm(vb.pp("bn"), 1e-5, out_c)?;
Ok(Self { atrous_conv, bn })
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.atrous_conv.forward(xs)?;
let xs = self.bn.forward_t(&xs, false)?.relu()?;
Ok(xs)
}
}
struct ASPPDeformable {
aspp1: _ASPPModuleDeformable,
// aspp_deforms: Vec<_ASPPModuleDeformable>,
aspp_deforms_0: _ASPPModuleDeformable,
aspp_deforms_1: _ASPPModuleDeformable,
aspp_deforms_2: _ASPPModuleDeformable,
// avgpool2d + conv2d + BatchNorm2d + relu
global_avg_pool_1: Conv2d,
global_avg_pool_2: BatchNorm,
conv1: Conv2d,
bn1: BatchNorm,
}
impl ASPPDeformable {
pub fn new(
vb: VarBuilder,
in_c: usize,
out_c: usize,
parallel_block_sizes: Vec<usize>,
) -> Result<Self> {
let in_channelster = 256;
let aspp1 = _ASPPModuleDeformable::new(vb.pp("aspp1"), in_c, in_channelster, 1, 0)?;
let vb_aspp_deforms = vb.pp("aspp_deforms");
let aspp_deforms_0 = _ASPPModuleDeformable::new(
vb_aspp_deforms.pp(0),
in_c,
in_channelster,
parallel_block_sizes[0],
parallel_block_sizes[0] / 2,
)?;
let aspp_deforms_1 = _ASPPModuleDeformable::new(
vb_aspp_deforms.pp(1),
in_c,
in_channelster,
parallel_block_sizes[1],
parallel_block_sizes[1] / 2,
)?;
let aspp_deforms_2 = _ASPPModuleDeformable::new(
vb_aspp_deforms.pp(2),
in_c,
in_channelster,
parallel_block_sizes[2],
parallel_block_sizes[2] / 2,
)?;
let global_avg_pool_1 = get_conv2d(
vb.pp("global_avg_pool.1"),
in_c,
in_channelster,
1,
0,
1,
1,
1,
false,
)?;
let global_avg_pool_2 = get_batch_norm(vb.pp("global_avg_pool.2"), 1e-5, in_channelster)?;
let conv1 = get_conv2d(
vb.pp("conv1"),
in_channelster * (2 + parallel_block_sizes.len()),
out_c,
1,
0,
1,
1,
1,
false,
)?;
let bn1 = get_batch_norm(vb.pp("bn1"), 1e-5, out_c)?;
Ok(Self {
aspp1,
aspp_deforms_0,
aspp_deforms_1,
aspp_deforms_2,
global_avg_pool_1,
global_avg_pool_2,
conv1,
bn1,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let x1 = self.aspp1.forward(xs)?;
let x_aspp_deforms_0 = self.aspp_deforms_0.forward(xs)?;
let x_aspp_deforms_1 = self.aspp_deforms_1.forward(xs)?;
let x_aspp_deforms_2 = self.aspp_deforms_2.forward(xs)?;
let (_, _, h, w) = xs.dims4()?;
assert_eq!(h, w, "avg_pool2d h, w mus be equal");
let x5 = xs.avg_pool2d(h)?;
let x5 = self.global_avg_pool_1.forward(&x5)?;
let x5 = self.global_avg_pool_2.forward_t(&x5, false)?.relu()?;
let (_, _, h, w) = x1.dims4()?;
let x5 = interpolate_bilinear(&x5, (h, w), Some(true))?;
let xs = Tensor::cat(
&[x1, x_aspp_deforms_0, x_aspp_deforms_1, x_aspp_deforms_2, x5],
1,
)?;
let xs = self.conv1.forward(&xs)?;
let xs = self.bn1.forward_t(&xs, false)?.relu()?;
Ok(xs)
}
}
struct BasicDecBlk {
conv_in: Conv2d,
dec_att: ASPPDeformable,
conv_out: Conv2d,
bn_in: BatchNorm,
bn_out: BatchNorm,
}
impl BasicDecBlk {
pub fn new(vb: VarBuilder, in_c: usize, out_c: usize) -> Result<Self> {
let inter_channels = 64;
let conv_in = get_conv2d(vb.pp("conv_in"), in_c, inter_channels, 3, 1, 1, 1, 1, true)?;
let dec_att = ASPPDeformable::new(vb.pp("dec_att"), inter_channels, inter_channels, vec![
1, 3, 7,
])?;
let conv_out = get_conv2d(
vb.pp("conv_out"),
inter_channels,
out_c,
3,
1,
1,
1,
1,
true,
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | true |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/rmbg2_0/mod.rs | src/models/rmbg2_0/mod.rs | pub mod generate;
pub mod model;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/rmbg2_0/generate.rs | src/models/rmbg2_0/generate.rs | use std::io::Cursor;
use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::Result;
use base64::{Engine, prelude::BASE64_STANDARD};
use candle_core::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use image::RgbaImage;
use rayon::prelude::*;
use rocket::futures::{Stream, stream};
use crate::{
models::{GenerateModel, rmbg2_0::model::BiRefNet},
utils::{
build_img_completion_response, find_type_files, get_device, get_dtype,
img_utils::{extract_images, float_tensor_to_dynamic_image, img_transform_with_resize},
},
};
pub struct RMBG2_0Model {
model: BiRefNet,
h: u32,
w: u32,
img_mean: Tensor,
img_std: Tensor,
device: Device,
dtype: DType,
model_name: String,
}
impl RMBG2_0Model {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let device = get_device(device);
let dtype = get_dtype(dtype, "float32");
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, &device)? };
let model = BiRefNet::new(vb)?;
let img_mean =
Tensor::from_slice(&[0.485, 0.456, 0.406], (3, 1, 1), &device)?.to_dtype(dtype)?;
let img_std =
Tensor::from_slice(&[0.229, 0.224, 0.225], (3, 1, 1), &device)?.to_dtype(dtype)?;
Ok(Self {
model,
h: 1024,
w: 1024,
img_mean,
img_std,
device,
dtype,
model_name: "RMBG2.0".to_string(),
})
}
#[cfg(test)]
pub fn h(&self) -> u32 {
self.h
}
#[cfg(test)]
pub fn w(&self) -> u32 {
self.w
}
#[cfg(test)]
pub fn img_mean(&self) -> &Tensor {
&self.img_mean
}
#[cfg(test)]
pub fn img_std(&self) -> &Tensor {
&self.img_std
}
#[cfg(test)]
pub fn device(&self) -> &Device {
&self.device
}
#[cfg(test)]
pub fn dtype(&self) -> DType {
self.dtype
}
#[cfg(test)]
pub fn model(&self) -> &BiRefNet {
&self.model
}
pub fn inference(&self, mes: ChatCompletionParameters) -> Result<Vec<RgbaImage>> {
let imgs = extract_images(&mes)?;
if imgs.is_empty() {
return Ok(vec![]);
}
// 并行预处理:提取原始尺寸、RGB 数据和转换为 tensor
let preprocessed: Vec<_> = imgs
.par_iter()
.map(|img| {
let height = img.height();
let width = img.width();
let rgb_img = img.to_rgb8();
let tensor = img_transform_with_resize(
img,
self.h,
self.w,
&self.img_mean,
&self.img_std,
&self.device,
self.dtype,
);
(rgb_img, height, width, tensor)
})
.collect();
// 检查预处理是否有错误
let mut tensors = Vec::with_capacity(preprocessed.len());
let mut meta: Vec<_> = Vec::with_capacity(preprocessed.len());
for (rgb_img, height, width, tensor_result) in preprocessed {
let tensor = tensor_result?;
tensors.push(tensor);
meta.push((rgb_img, height, width));
}
// 批量推理:将所有图片合并为一个 batch
// to guobin211: 感谢你贡献的代码,不过现在模型中可变形卷积的实现只支持batch_size=1,所以推理还是用的循环QaQ
// let batch_tensor = Tensor::stack(&tensors, 0)?;
// let batch_output = self.model.forward(&batch_tensor)?;
let mut batch_output = vec![];
for img_tensor in tensors {
let output = self.model.forward(&img_tensor.unsqueeze(0)?)?.squeeze(0)?;
batch_output.push(output);
}
// 并行后处理:生成 RGBA 图像
let results: Vec<Result<RgbaImage>> = meta
.into_par_iter()
.enumerate()
.map(|(i, (rgb_img, height, width))| {
// let rmbg_tensor = batch_output.i(i)?;
let rmbg_tensor = &batch_output[i];
let alpha_img = float_tensor_to_dynamic_image(rmbg_tensor)?;
let alpha_img =
alpha_img.resize_exact(width, height, image::imageops::FilterType::CatmullRom);
let alpha_gray = alpha_img.to_luma8();
let rgb_raw = rgb_img.as_raw();
let alpha_raw = alpha_gray.as_raw();
let pixel_count = (width * height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
// 并行分块写入
rgba_raw
.par_chunks_mut(4)
.enumerate()
.for_each(|(idx, chunk)| {
let src = idx * 3;
chunk[0] = rgb_raw[src];
chunk[1] = rgb_raw[src + 1];
chunk[2] = rgb_raw[src + 2];
chunk[3] = alpha_raw[idx];
});
RgbaImage::from_raw(width, height, rgba_raw)
.ok_or_else(|| anyhow::anyhow!("Failed to create RGBA image"))
})
.collect();
results.into_iter().collect()
}
}
impl GenerateModel for RMBG2_0Model {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let rmbg_png = self.inference(mes)?;
let mut base64_vec = vec![];
for img in rmbg_png {
let mut png_bytes = Vec::new();
img.write_to(&mut Cursor::new(&mut png_bytes), image::ImageFormat::Png)?;
let base64_string = BASE64_STANDARD.encode(png_bytes);
base64_vec.push(base64_string);
}
let response = build_img_completion_response(&base64_vec, &self.model_name);
Ok(response)
}
#[allow(unused_variables)]
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let error_stream = stream::once(async {
Err(anyhow::anyhow!(format!(
"{} model not support stream",
self.model_name
))) as Result<ChatCompletionChunkResponse, anyhow::Error>
});
Ok(Box::new(Box::pin(error_stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen3vl/config.rs | src/models/qwen3vl/config.rs | use candle_nn::Activation;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Size {
pub longest_edge: usize,
pub shortest_edge: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct PreprocessorConfig {
pub size: Size,
pub patch_size: usize,
pub temporal_patch_size: usize,
pub merge_size: usize,
pub image_mean: Vec<f32>,
pub image_std: Vec<f32>,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct RopeScaling {
pub rope_type: String,
pub mrope_section: Vec<usize>,
pub mrope_interleaved: bool,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Qwen3VLTextConfig {
pub attention_bias: bool,
pub attention_dropout: f32,
pub bos_token_id: usize,
pub dtype: String,
pub eos_token_id: usize,
pub head_dim: usize,
pub hidden_act: Activation,
pub hidden_size: usize,
pub initializer_range: f32,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_scaling: RopeScaling,
pub rope_theta: f32,
pub use_cache: bool,
pub vocab_size: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Qwen3VLVisionConfig {
pub deepstack_visual_indexes: Vec<usize>,
pub depth: usize,
pub hidden_act: Activation,
pub hidden_size: usize,
pub in_channels: usize,
pub initializer_range: f32,
pub intermediate_size: usize,
pub num_heads: usize,
pub num_position_embeddings: usize,
pub out_hidden_size: usize,
pub patch_size: usize,
pub spatial_merge_size: usize,
pub temporal_patch_size: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Qwen3VLConfig {
pub image_token_id: usize,
pub text_config: Qwen3VLTextConfig,
pub tie_word_embeddings: bool,
pub video_token_id: usize,
pub vision_config: Qwen3VLVisionConfig,
pub vision_end_token_id: usize,
pub vision_start_token_id: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Qwen3VLGenerationConfig {
pub bos_token_id: usize,
pub pad_token_id: usize,
pub do_sample: bool,
pub eos_token_id: Vec<usize>,
pub top_p: f32,
pub top_k: usize,
pub temperature: f32,
pub repetition_penalty: f32,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen3vl/processor.rs | src/models/qwen3vl/processor.rs | use std::collections::HashMap;
use aha_openai_dive::v1::resources::chat::{
ChatCompletionParameters, ChatMessage, ChatMessageContent, ChatMessageContentPart,
};
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, IndexOp, Shape, Tensor};
#[cfg(feature = "ffmpeg")]
use ffmpeg_next as ffmpeg;
use image::DynamicImage;
use num::integer::lcm;
use crate::{
models::qwen3vl::config::PreprocessorConfig,
utils::{
ceil_by_factor, floor_by_factor,
img_utils::{get_image, img_smart_resize, img_transform},
round_by_factor,
},
};
#[derive(Clone)]
pub struct VisionInput {
pub data: Tensor,
pub grid_thw: Tensor,
}
#[derive(Clone)]
pub struct GeneralInput {
pub replace_text: String,
pub pixel_values: Option<Tensor>,
pub image_grid_thw: Option<Tensor>,
pub pixel_values_video: Option<Tensor>,
pub video_grid_thw: Option<Tensor>,
}
#[allow(unused)]
#[derive(Debug, Clone)]
pub struct VideoMetadata {
total_num_frames: u32,
fps: f32,
width: u32,
height: u32,
duration: f32,
frame_indices: Vec<u32>,
}
#[allow(unused)]
pub struct Qwen3VLProcessor {
img_process_cfg: PreprocessorConfig,
video_process_cfg: PreprocessorConfig,
device: Device,
dtype: DType,
image_token: String,
video_token: String,
vision_start_token: String,
vision_end_token: String,
fps: u32,
min_frames: u32,
max_frames: u32,
}
impl Qwen3VLProcessor {
pub fn new(path: &str, device: &Device, dtype: DType) -> Result<Self> {
let path = path.to_string();
assert!(
std::path::Path::new(&path).exists(),
"model path file not exists"
);
let img_process_cfg_file = path.clone() + "/preprocessor_config.json";
assert!(
std::path::Path::new(&img_process_cfg_file).exists(),
"preprocessor_config.json not exists in model path"
);
let img_process_cfg: PreprocessorConfig =
serde_json::from_slice(&std::fs::read(img_process_cfg_file)?)?;
let video_process_cfg_file = path.clone() + "/video_preprocessor_config.json";
assert!(
std::path::Path::new(&video_process_cfg_file).exists(),
"video_preprocessor_config.json not exists in model path"
);
let video_process_cfg: PreprocessorConfig =
serde_json::from_slice(&std::fs::read(video_process_cfg_file)?)?;
let image_token = "<|image_pad|>".to_string();
let video_token = "<|video_pad|>".to_string();
let vision_start_token = "<|vision_start|>".to_string();
let vision_end_token = "<|vision_end|>".to_string();
Ok(Self {
img_process_cfg,
video_process_cfg,
device: device.clone(),
dtype,
image_token,
video_token,
vision_start_token,
vision_end_token,
fps: 2,
min_frames: 4,
max_frames: 768,
})
}
pub fn extract_vision_info(
&self,
mes: &ChatCompletionParameters,
) -> Result<HashMap<String, Vec<String>>> {
let mut vision_map = HashMap::new();
vision_map.insert("image".to_string(), Vec::new());
vision_map.insert("video".to_string(), Vec::new());
for chat_mes in mes.messages.clone() {
if let ChatMessage::User { content, .. } = chat_mes
&& let ChatMessageContent::ContentPart(part_vec) = content
{
for part in part_vec {
if let ChatMessageContentPart::Image(img_part) = part {
let img_url = img_part.image_url;
vision_map.get_mut("image").unwrap().push(img_url.url);
} else if let ChatMessageContentPart::Video(video_part) = part {
let video_url = video_part.video_url;
vision_map.get_mut("video").unwrap().push(video_url.url);
}
}
}
}
Ok(vision_map)
}
pub fn process_img(
&self,
img: &DynamicImage,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<Tensor> {
let img_h = img.height();
let img_w = img.width();
// h,w resize成 32的倍数
let (resize_h, resize_w) = img_smart_resize(
img_h,
img_w,
(self.img_process_cfg.patch_size * self.img_process_cfg.merge_size) as u32,
self.img_process_cfg.size.shortest_edge as u32,
self.img_process_cfg.size.longest_edge as u32,
)?;
let img = img.resize_exact(resize_w, resize_h, image::imageops::FilterType::CatmullRom);
let img_tensor = img_transform(&img, img_mean, img_std, &self.device, self.dtype)?;
// (c, h, w) => (1, c, h, w)
let img_tensor = img_tensor.unsqueeze(0)?;
Ok(img_tensor)
}
pub fn process_vision_tensor(&self, img_tensor: &Tensor) -> Result<(Tensor, Tensor)> {
// Check that data have `num_frames` divisible by `temporal_patch_size`
// img_tensor: (t, c, h, w)
let t = img_tensor.dim(0)?;
let img_tensor = if t % self.img_process_cfg.temporal_patch_size != 0 {
let repeat_num = self.img_process_cfg.temporal_patch_size
- t % self.img_process_cfg.temporal_patch_size;
let repeats = img_tensor.i(t - 1)?.repeat((repeat_num, 1, 1, 1))?;
Tensor::cat(&[img_tensor, &repeats], 0)?
} else {
img_tensor.clone()
};
let channel = img_tensor.dim(1)?;
let grid_t = img_tensor.dim(0)? / self.img_process_cfg.temporal_patch_size;
let grid_h = img_tensor.dim(2)? / self.img_process_cfg.patch_size;
let grid_w = img_tensor.dim(3)? / self.img_process_cfg.patch_size;
let shape = Shape::from(vec![
grid_t,
self.img_process_cfg.temporal_patch_size,
channel,
grid_h / self.img_process_cfg.merge_size,
self.img_process_cfg.merge_size,
self.img_process_cfg.patch_size,
grid_w / self.img_process_cfg.merge_size,
self.img_process_cfg.merge_size,
self.img_process_cfg.patch_size,
]);
let img_tensor = img_tensor.reshape(shape)?;
// shape to // grid_t,
// grid_h / merge_size,
// grid_w / merge_size,
// merge_size,
// merge_size,
// channel,
// temporal_patch_size,
// patch_size,
// patch_size,
let img_tensor = img_tensor.permute(vec![0, 3, 6, 4, 7, 2, 1, 5, 8])?;
let img_tensor = img_tensor
.reshape((
grid_t * grid_h * grid_w,
channel
* self.img_process_cfg.temporal_patch_size
* self.img_process_cfg.patch_size
* self.img_process_cfg.patch_size,
))?
.contiguous()?;
let grid_thw = Tensor::from_vec(
vec![grid_t as u32, grid_h as u32, grid_w as u32],
(1, 3),
&self.device,
)?;
Ok((img_tensor, grid_thw))
}
pub fn process_images(
&self,
imgs: Vec<DynamicImage>,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<VisionInput> {
let mut pixel_values_vec = Vec::new();
let mut vision_grid_thws_vec = Vec::new();
for img in imgs {
let img_tensor = self.process_img(&img, img_mean, img_std)?;
let img_tensor = Tensor::cat(&[&img_tensor, &img_tensor], 0)?.contiguous()?;
let (img_tensor, grid_thw) = self.process_vision_tensor(&img_tensor)?;
pixel_values_vec.push(img_tensor);
vision_grid_thws_vec.push(grid_thw);
}
let pixel_values = Tensor::cat(&pixel_values_vec, 0)?;
let vision_grid_thws = Tensor::cat(&vision_grid_thws_vec, 0)?;
Ok(VisionInput {
data: pixel_values,
grid_thw: vision_grid_thws,
})
}
pub fn process_videos(
&self,
data: Vec<Tensor>,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<VisionInput> {
let mut pixel_values_vec = Vec::new();
let mut vision_grid_thws_vec = Vec::new();
for single_video in data {
// 0-255 rescale to 0-1
let video_tensor = single_video.to_dtype(self.dtype)?.affine(1.0 / 255.0, 0.)?;
// normalize
let video_tensor = video_tensor
.broadcast_sub(img_mean)?
.broadcast_div(img_std)?
.contiguous()?;
// t
let (video_tensor, video_grid_thw) = self.process_vision_tensor(&video_tensor)?;
pixel_values_vec.push(video_tensor);
vision_grid_thws_vec.push(video_grid_thw);
}
let pixel_values = Tensor::cat(&pixel_values_vec, 0)?.contiguous()?;
let vision_grid_thws = Tensor::cat(&vision_grid_thws_vec, 0)?.contiguous()?;
Ok(VisionInput {
data: pixel_values,
grid_thw: vision_grid_thws,
})
}
#[allow(unused)]
fn calculate_timestamps(
&self,
frames_indices: Vec<u32>,
fps: f32,
t_merge_size: usize,
) -> Result<Vec<f32>> {
let indices = if !frames_indices.len().is_multiple_of(t_merge_size) {
let mut frames_indices = frames_indices.clone();
let last = frames_indices[frames_indices.len() - 1];
let pad_len = t_merge_size - frames_indices.len() % t_merge_size;
for _ in 0..pad_len {
frames_indices.push(last);
}
frames_indices
} else {
frames_indices.clone()
};
let timestamps: Vec<f32> = indices.iter().map(|&x| x as f32 / fps).collect();
let mut stamps = Vec::new();
for i in (0..timestamps.len()).step_by(t_merge_size) {
let stamp = (timestamps[i] + timestamps[i + t_merge_size - 1]) / 2.0;
stamps.push(stamp);
}
Ok(stamps)
}
#[allow(unused)]
pub fn process_info(
&self,
messages: &ChatCompletionParameters,
text: &str,
) -> Result<GeneralInput> {
let mut pixel_values = None;
let mut image_grid_thw = None;
let mut pixel_values_video = None;
let mut video_grid_thw: Option<Tensor> = None;
let mut video_metadata: Option<Vec<VideoMetadata>> = None;
let vision_map = self.extract_vision_info(messages)?;
let img_mean =
Tensor::from_slice(&self.img_process_cfg.image_mean, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
let img_std = Tensor::from_slice(&self.img_process_cfg.image_std, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
for (key, vec) in vision_map {
// println!("key: {}, \nvalue: {:?}", key, vec);
if key.eq("image") {
let mut file_vec = Vec::new();
for file in &vec {
let image = get_image(file);
match image {
Ok(img) => file_vec.push(img),
Err(e) => println!("get_image err: {e:?}"),
};
}
if !file_vec.is_empty() {
let vision_input = self.process_images(file_vec, &img_mean, &img_std);
match vision_input {
Ok(img_input) => {
pixel_values = Some(img_input.data);
image_grid_thw = Some(img_input.grid_thw);
}
Err(e) => println!("img process_images err: {e:?}"),
};
}
}
#[cfg(feature = "ffmpeg")]
if key.eq("video") {
let mut file_vec = Vec::new();
let mut video_infos = Vec::new();
for file in &vec {
let video_data = get_video_data(
file,
self.video_process_cfg.patch_size as u32,
self.video_process_cfg.temporal_patch_size as u32,
self.video_process_cfg.merge_size as u32,
self.fps,
self.min_frames,
self.max_frames,
self.video_process_cfg.size.shortest_edge as u32,
self.video_process_cfg.size.longest_edge as u32,
&self.device,
);
match video_data {
Ok((tensor, video_info)) => {
file_vec.push(tensor);
video_infos.push(video_info);
}
Err(e) => println!("get_video_data err: {:?}", e),
};
}
if !file_vec.is_empty() {
let vision_input = self.process_videos(file_vec, &img_mean, &img_std);
match vision_input {
Ok(video_input) => {
pixel_values_video = Some(video_input.data);
video_grid_thw = Some(video_input.grid_thw);
video_metadata = Some(video_infos);
}
Err(e) => println!("video process_videos err: {:?}", e),
};
}
}
}
let merge_length = self.img_process_cfg.merge_size.pow(2);
let mut text = text.to_string();
if let Some(ref image_grid_thw) = image_grid_thw {
let mut index = 0;
while text.contains(&self.image_token) {
let grid_i = image_grid_thw.i(index)?;
let repeat_num =
grid_i.to_vec1::<u32>()?.iter().product::<u32>() as usize / merge_length;
let replace = "<|placeholder|>".repeat(repeat_num);
text = text.replacen(&self.image_token, &replace, 1);
index += 1;
}
text = text.replace("<|placeholder|>", &self.image_token);
}
#[cfg(feature = "ffmpeg")]
if let Some(ref video_grid_thw) = video_grid_thw {
let mut index = 0;
while text.contains(&self.video_token) {
let grid_i = video_grid_thw.i(index)?;
let video_info = &video_metadata.as_ref().unwrap()[index];
let curr_timestamp = self.calculate_timestamps(
video_info.frame_indices.clone(),
video_info.fps,
self.img_process_cfg.merge_size,
)?;
let mut video_placeholder = "".to_string();
let [t, h, w] = grid_i.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let frame_seqlen = h * w / merge_length as u32;
for frame_idx in 0..t {
let curr_time = curr_timestamp[frame_idx as usize];
video_placeholder += format!("<{:.1} seconds>", curr_time).as_str();
video_placeholder += self.vision_start_token.as_str();
video_placeholder += "<|placeholder|>".repeat(frame_seqlen as usize).as_str();
video_placeholder += self.vision_end_token.as_str();
}
let three_token = format!(
"{}{}{}",
self.vision_start_token, self.video_token, self.vision_end_token
);
if text.contains(&three_token) {
text = text.replacen(&three_token, &video_placeholder, 1);
} else {
text = text.replacen(&self.video_token, &video_placeholder, 1);
}
index += 1;
}
text = text.replace("<|placeholder|>", &self.video_token);
}
let input = GeneralInput {
replace_text: text,
pixel_values,
image_grid_thw,
pixel_values_video,
video_grid_thw,
};
Ok(input)
}
}
pub fn video_smart_resize(
num_frames: u32,
height: u32,
width: u32,
temporal_factor: u32,
factor: u32,
min_pixels: u32,
max_pixels: u32,
video_ratio: Option<u32>,
) -> Result<(u32, u32)> {
if num_frames < temporal_factor {
return Err(anyhow!(format!(
"{num_frames} must be larger than temporal_factor {temporal_factor}"
)));
}
if height < factor || width < factor {
return Err(anyhow!(format!(
"height:{height} or width:{width} must be larger than factor:{factor}"
)));
}
if std::cmp::max(height, width) / std::cmp::min(height, width) > 200 {
return Err(anyhow!(format!(
"absolute aspect ratio mush be smaller than {}, got {}",
200,
std::cmp::max(height, width) / std::cmp::min(height, width)
)));
}
let mut image_factor = factor;
if let Some(ratio) = video_ratio {
image_factor = lcm(image_factor, ratio);
}
let mut h_bar = round_by_factor(height, image_factor);
let mut w_bar = round_by_factor(width, image_factor);
let t_bar = round_by_factor(num_frames, temporal_factor);
if t_bar * h_bar * w_bar > max_pixels {
let beta = ((num_frames * height * width) as f32 / max_pixels as f32).sqrt();
h_bar = std::cmp::max(
image_factor,
floor_by_factor(height as f32 / beta, image_factor),
);
w_bar = std::cmp::max(
image_factor,
floor_by_factor(width as f32 / beta, image_factor),
);
} else if t_bar * h_bar * w_bar < min_pixels {
let beta = (min_pixels as f32 / (num_frames * height * width) as f32).sqrt();
h_bar = ceil_by_factor(height as f32 * beta, image_factor);
w_bar = ceil_by_factor(width as f32 * beta, image_factor);
}
Ok((h_bar, w_bar))
}
#[cfg(feature = "ffmpeg")]
pub fn get_video_data(
file: &String,
patch_size: u32,
temporal_patch_size: u32,
merge_size: u32,
fps: u32,
min_frames: u32,
max_frames: u32,
min_pixels: u32,
max_pixels: u32,
device: &Device,
) -> Result<(Tensor, VideoMetadata)> {
ffmpeg::init().map_err(|e| anyhow!(format!("Failed to initialize ffmpeg: {}", e)))?;
let mut ictx = ffmpeg::format::input(&file)
.map_err(|e| anyhow!(format!("Failed to open video file: {}", e)))?;
let input = ictx
.streams()
.best(ffmpeg::media::Type::Video)
.ok_or_else(|| anyhow!(format!("No video stream found")))?;
let video_stream_index = input.index();
let context_decoder = ffmpeg::codec::context::Context::from_parameters(input.parameters())
.map_err(|e| anyhow!(format!("Failed to crate decoder context: {}", e)))?;
let mut decoder = context_decoder
.decoder()
.video()
.map_err(|e| anyhow!(format!("Failed to decoder video: {}", e)))?;
let video_h = decoder.height();
let video_w = decoder.width();
let format = decoder.format();
let frames = input.frames();
let rate = input.rate().0 as f32 / input.rate().1 as f32;
let duration = frames as f32 * 1.0 / rate;
// 1s取两帧
let nframes = (frames as f32 / rate * fps as f32).round() as u32;
let nframes = std::cmp::min(
std::cmp::min(std::cmp::max(nframes, min_frames), max_frames),
frames as u32,
);
let sample_interval = (frames as f32 / nframes as f32).round() as u32;
let mut frame_indices = Vec::new();
let mut frame_id = 0_u32;
// 图片帧使用scaler reshape的时候需要保证宽高是16的倍数,不然reshape出来的是损坏的图片
// 所以计算resize的目标宽高时,需要用16和image_factor的最小公倍数
let (resize_h, resize_w) = video_smart_resize(
nframes,
video_h,
video_w,
temporal_patch_size,
patch_size * merge_size,
min_pixels,
max_pixels,
Some(16),
)?;
let mut scaler = ffmpeg::software::scaling::context::Context::get(
format,
video_w,
video_h,
ffmpeg::format::Pixel::RGB24,
resize_w,
resize_h,
ffmpeg::software::scaling::flag::Flags::BILINEAR
| ffmpeg::software::scaling::flag::Flags::ACCURATE_RND,
)
.map_err(|e| anyhow!(format!("Failed to crate scaler: {}", e)))?;
let mut frames_vec = Vec::new();
let mut receive_and_process_decoded_frames =
|decoder: &mut ffmpeg::decoder::Video| -> Result<()> {
let mut decoded = ffmpeg::frame::Video::empty();
while decoder.receive_frame(&mut decoded).is_ok() {
if frame_id.is_multiple_of(sample_interval) {
frame_indices.push(frame_id);
let mut rgb_frame = ffmpeg::frame::Video::empty();
scaler
.run(&decoded, &mut rgb_frame)
.map_err(|e| anyhow!(format!("Failed to scaler run decoded: {}", e)))?;
// save_file(&rgb_frame, frame_id as usize);
let frame_data = rgb_frame.data(0);
let frame_tensor = Tensor::from_slice(
frame_data,
(resize_h as usize, resize_w as usize, 3),
device,
)?
.permute((2, 0, 1))?;
frames_vec.push(frame_tensor);
}
frame_id += 1;
}
Ok(())
};
for (stream, packet) in ictx.packets() {
if stream.index() == video_stream_index {
decoder
.send_packet(&packet)
.map_err(|e| anyhow!(format!("Failed to send packet: {}", e)))?;
receive_and_process_decoded_frames(&mut decoder)?;
}
}
decoder
.send_eof()
.map_err(|e| anyhow!(format!("Failed to decoder.send_eof(): {}", e)))?;
receive_and_process_decoded_frames(&mut decoder)?;
if frames_vec.is_empty() {
return Err(anyhow!("No frames extracted from video".to_string()));
}
// (t, c, h, w)
let frames_tensor = Tensor::stack(&frames_vec, 0)?.contiguous()?;
let video_info = VideoMetadata {
total_num_frames: frames as u32,
fps: rate,
width: video_w,
height: video_h,
duration,
frame_indices,
};
Ok((frames_tensor, video_info))
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen3vl/model.rs | src/models/qwen3vl/model.rs | use anyhow::{Result, anyhow};
use candle_core::{D, DType, IndexOp, Shape, Tensor};
use candle_nn::{
Activation, Embedding, Init, LayerNorm, Linear, Module, RmsNorm, VarBuilder, embedding, linear,
linear_no_bias, rms_norm,
};
use crate::{
models::{
common::{GateUpDownMLP, TwoLinearMLP, eager_attention_forward, get_layer_norm},
qwen3vl::config::{Qwen3VLConfig, Qwen3VLTextConfig, Qwen3VLVisionConfig},
},
position_embed::rope::{
Qwen2_5VisionRotaryEmbedding, Qwen3VLTextRotaryEmbedding, apply_rotary_pos_emb,
apply_rotary_pos_emb_vision,
},
utils::tensor_utils::{
bitor_tensor, get_vision_next_indices, linspace, mask_index_add, masked_scatter_dim0,
nonzero_index, prepare_causal_attention_mask, prod_tensor_last_dim, split_tensor,
zero_index,
},
};
pub struct Qwen3VLVisionPatchEmbed {
conv3d_weight: Tensor,
conv3d_bias: Tensor,
}
impl Qwen3VLVisionPatchEmbed {
pub fn new(cfg: &Qwen3VLVisionConfig, vb: VarBuilder) -> Result<Self> {
let patch_size = cfg.patch_size;
let temporal_patch_size = cfg.temporal_patch_size;
let in_channels = cfg.in_channels;
let embed_dim = cfg.hidden_size;
// conv3d weight key: visual.patch_embed.proj.weight, value: Tensor[dims 1024, 3, 2, 16, 16; bf16, cuda:0]
// (1024, 3, 2, 16, 16) -> (1024, 1536) -> (1536, 1024)
let conv3d_weight = vb
.get_with_hints(
(
embed_dim,
in_channels,
temporal_patch_size,
patch_size,
patch_size,
),
"proj.weight",
Init::Const(1.),
)?
.flatten(1, 4)?
.t()?;
// (1024) -> (1, 1024)
let conv3d_bias = vb
.get_with_hints((embed_dim,), "proj.bias", Init::Const(0.))?
.unsqueeze(0)?;
Ok(Self {
conv3d_weight,
conv3d_bias,
})
}
pub fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
// hidden_states shape: (grid_t*grid_h*grid_w, c*temporal_patch_size*patch_size*patch_size)
// ((), 1536) matmul (1536, 1024) -> ((), 1024)
let hidden_states = hidden_states.matmul(&self.conv3d_weight)?;
let hidden_states = hidden_states.broadcast_add(&self.conv3d_bias)?;
Ok(hidden_states)
}
}
pub struct Qwen3VLVisionPatchMerger {
hidden_size: usize,
use_postshuffle_norm: bool,
norm: LayerNorm,
linear_fc1: Linear,
act_fn: Activation,
linear_fc2: Linear,
}
impl Qwen3VLVisionPatchMerger {
pub fn new(
config: &Qwen3VLVisionConfig,
vb: VarBuilder,
use_postshuffle_norm: bool,
) -> Result<Self> {
let hidden_size = config.hidden_size * config.spatial_merge_size.pow(2);
let norm_size = if use_postshuffle_norm {
hidden_size
} else {
config.hidden_size
};
let norm = get_layer_norm(vb.pp("norm"), 1e-6, norm_size)?;
let linear_fc1 = linear(hidden_size, hidden_size, vb.pp("linear_fc1"))?;
let act_fn = Activation::Gelu;
let linear_fc2 = linear(hidden_size, config.out_hidden_size, vb.pp("linear_fc2"))?;
Ok(Self {
hidden_size,
use_postshuffle_norm,
norm,
linear_fc1,
act_fn,
linear_fc2,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = if self.use_postshuffle_norm {
xs.reshape(((), self.hidden_size))?
} else {
xs.clone()
};
let xs = self.norm.forward(&xs)?.reshape(((), self.hidden_size))?;
let xs = self
.linear_fc2
.forward(&self.act_fn.forward(&self.linear_fc1.forward(&xs)?)?)?;
Ok(xs)
}
}
pub struct Qwen3VLVisionAttention {
num_heads: usize,
qkv: Linear,
proj: Linear,
scaling: f64,
}
impl Qwen3VLVisionAttention {
pub fn new(config: Qwen3VLVisionConfig, vb: VarBuilder) -> Result<Self> {
let hidden_size = config.hidden_size;
let num_heads = config.num_heads;
let head_dim = hidden_size / num_heads;
let qkv = linear(hidden_size, hidden_size * 3, vb.pp("qkv"))?;
let proj = linear(hidden_size, hidden_size, vb.pp("proj"))?;
let scaling = 1.0 / (head_dim as f64).sqrt();
Ok(Self {
num_heads,
qkv,
proj,
scaling,
})
}
pub fn forward(
&self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
cu_seqlens: &Tensor,
) -> Result<Tensor> {
// xs: (seq_len, hidden_size)
let seq_length = xs.dim(0)?;
// (seq_len, hidden_size) -> (seq_len, hidden_size*3)
// -> (seq_len, 3, num_heads, head_dim)
// -> (3, seq_len, num_heads, head_dim)
let qkv_states = xs
.apply(&self.qkv)?
.reshape((seq_length, 3, self.num_heads, ()))?
.permute((1, 0, 2, 3))?;
// (seq_len, num_heads, head_dim)
let query_states = qkv_states.i(0)?.contiguous()?;
let key_states = qkv_states.i(1)?.contiguous()?;
let value_states = qkv_states.i(2)?.contiguous()?;
let (query_states, key_states) =
apply_rotary_pos_emb_vision(&query_states, &key_states, cos, sin)?;
// (seq_len, num_heads, head_dim) -> (num_heads, seq_len, head_dim) -> (1, num_heads, seq_len, head_dim)
let query_states = query_states.transpose(0, 1)?.unsqueeze(0)?.contiguous()?;
let key_states = key_states.transpose(0, 1)?.unsqueeze(0)?.contiguous()?;
let value_states = value_states.transpose(0, 1)?.unsqueeze(0)?.contiguous()?;
let cu_last_id = cu_seqlens.dim(0)? - 1;
let lengths = cu_seqlens.i(1..)?.sub(&cu_seqlens.i(..cu_last_id)?)?;
let chunks: Vec<usize> = lengths
.to_vec1::<u32>()?
.iter()
.map(|&x| x as usize)
.collect();
let q_splits = split_tensor(&query_states, &chunks, 2)?;
let k_splits = split_tensor(&key_states, &chunks, 2)?;
let v_splits = split_tensor(&value_states, &chunks, 2)?;
let mut attn_outputs = Vec::new();
for (q, (k, v)) in q_splits.iter().zip(k_splits.iter().zip(v_splits.iter())) {
let output = eager_attention_forward(q, k, v, None, None, self.scaling)?;
attn_outputs.push(output);
}
let attn_output = Tensor::cat(&attn_outputs, 1)?;
let attn_output = attn_output.reshape((seq_length, ()))?.contiguous()?;
let attn_ouput = attn_output.apply(&self.proj)?;
Ok(attn_ouput)
}
}
pub struct Qwen3VLVisionBlock {
norm1: LayerNorm,
norm2: LayerNorm,
attn: Qwen3VLVisionAttention,
mlp: TwoLinearMLP,
}
impl Qwen3VLVisionBlock {
pub fn new(config: Qwen3VLVisionConfig, vb: VarBuilder) -> Result<Self> {
let norm1 = get_layer_norm(vb.pp("norm1"), 1e-6, config.hidden_size)?;
let norm2 = get_layer_norm(vb.pp("norm2"), 1e-6, config.hidden_size)?;
let attn = Qwen3VLVisionAttention::new(config.clone(), vb.pp("attn"))?;
let mlp = TwoLinearMLP::new(
vb.pp("mlp"),
config.hidden_size,
config.intermediate_size,
config.hidden_act,
true,
"linear_fc1",
"linear_fc2",
)?;
Ok(Self {
norm1,
norm2,
attn,
mlp,
})
}
pub fn forward(
&self,
xs: &Tensor,
cu_seqlens: &Tensor,
cos: &Tensor,
sin: &Tensor,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.norm1.forward(xs)?;
let xs = self.attn.forward(&xs, cos, sin, cu_seqlens)?;
let xs = (residual + xs)?;
let residual = xs.clone();
let xs = self.mlp.forward(&self.norm2.forward(&xs)?)?;
let xs = (residual + xs)?;
Ok(xs)
}
}
pub struct Qwen3VLVisionModel {
spatial_merge_size: usize,
patch_embed: Qwen3VLVisionPatchEmbed,
pos_embed: Embedding,
num_grid_per_side: u32,
rotary_pos_emb: Qwen2_5VisionRotaryEmbedding,
blocks: Vec<Qwen3VLVisionBlock>,
merger: Qwen3VLVisionPatchMerger,
deepstack_visual_indexes: Vec<usize>,
deepstack_merger_list: Vec<Qwen3VLVisionPatchMerger>,
dtype: DType,
}
impl Qwen3VLVisionModel {
pub fn new(config: Qwen3VLVisionConfig, vb: VarBuilder) -> Result<Self> {
let spatial_merge_size = config.spatial_merge_size;
let patch_embed = Qwen3VLVisionPatchEmbed::new(&config, vb.pp("patch_embed"))?;
let pos_embed = embedding(
config.num_position_embeddings,
config.hidden_size,
vb.pp("pos_embed"),
)?;
let num_grid_per_side = (config.num_position_embeddings as f32).sqrt() as u32;
let head_dim = config.hidden_size / config.num_heads;
let rotary_pos_emb = Qwen2_5VisionRotaryEmbedding::new(head_dim / 2, None);
let mut blocks = Vec::new();
let vb_blocks = vb.pp("blocks");
for i in 0..config.depth {
let block = Qwen3VLVisionBlock::new(config.clone(), vb_blocks.pp(i))?;
blocks.push(block);
}
let merger = Qwen3VLVisionPatchMerger::new(&config, vb.pp("merger"), false)?;
let deepstack_visual_indexes = config.deepstack_visual_indexes.clone();
let mut deepstack_merger_list = Vec::new();
let vb_deepstack = vb.pp("deepstack_merger_list");
for i in 0..deepstack_visual_indexes.len() {
let merger_i = Qwen3VLVisionPatchMerger::new(&config, vb_deepstack.pp(i), true)?;
deepstack_merger_list.push(merger_i);
}
Ok(Self {
spatial_merge_size,
patch_embed,
pos_embed,
num_grid_per_side,
rotary_pos_emb,
blocks,
merger,
deepstack_visual_indexes,
deepstack_merger_list,
dtype: vb.dtype(),
})
}
pub fn fast_pos_embed_interpolate(&self, grid_thw: &Tensor) -> Result<Tensor> {
let mut idx_list = vec![vec![]; 4];
let mut weight_list = vec![vec![]; 4];
let mut split_idx = vec![];
for i in 0..grid_thw.dim(0)? {
let [_, h, w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
split_idx.push((h * w) as usize);
let num_grid_per_side_sub_one = (self.num_grid_per_side - 1) as f32;
let h_idxs = linspace(
0.0,
num_grid_per_side_sub_one,
h as usize,
grid_thw.device(),
)?;
let w_idxs = linspace(
0.0,
num_grid_per_side_sub_one,
w as usize,
grid_thw.device(),
)?;
let h_idxs_floor = h_idxs.to_dtype(candle_core::DType::U32)?;
let w_idxs_floor = w_idxs.to_dtype(candle_core::DType::U32)?;
let h_idxs_ceil = h_idxs_floor
.affine(1.0, 1.0)?
.clamp(0u32, num_grid_per_side_sub_one as u32)?;
let w_idxs_ceil = w_idxs_floor
.affine(1.0, 1.0)?
.clamp(0u32, num_grid_per_side_sub_one as u32)?;
let dh = h_idxs
.sub(&h_idxs_floor.to_dtype(h_idxs.dtype())?)?
.unsqueeze(D::Minus1)?;
let dw = w_idxs
.sub(&w_idxs_floor.to_dtype(h_idxs.dtype())?)?
.unsqueeze(0)?;
let base_h = h_idxs_floor
.affine(self.num_grid_per_side as f64, 0.0)?
.unsqueeze(D::Minus1)?;
let base_h_ceil = h_idxs_ceil
.affine(self.num_grid_per_side as f64, 0.0)?
.unsqueeze(D::Minus1)?;
idx_list[0].extend_from_slice(
&base_h
.broadcast_add(&w_idxs_floor.unsqueeze(0)?)?
.flatten_all()?
.to_vec1::<u32>()?,
);
idx_list[1].extend_from_slice(
&base_h
.broadcast_add(&w_idxs_ceil.unsqueeze(0)?)?
.flatten_all()?
.to_vec1::<u32>()?,
);
idx_list[2].extend_from_slice(
&base_h_ceil
.broadcast_add(&w_idxs_floor.unsqueeze(0)?)?
.flatten_all()?
.to_vec1::<u32>()?,
);
idx_list[3].extend_from_slice(
&base_h_ceil
.broadcast_add(&w_idxs_ceil.unsqueeze(0)?)?
.flatten_all()?
.to_vec1::<u32>()?,
);
let one_sub_dh = Tensor::ones_like(&dh)?.sub(&dh)?;
let one_sub_dw = Tensor::ones_like(&dw)?.sub(&dw)?;
weight_list[0].extend_from_slice(
&one_sub_dh
.broadcast_mul(&one_sub_dw)?
.flatten_all()?
.to_vec1::<f32>()?,
);
weight_list[1].extend_from_slice(
&one_sub_dh
.broadcast_mul(&dw)?
.flatten_all()?
.to_vec1::<f32>()?,
);
weight_list[2].extend_from_slice(
&dh.broadcast_mul(&one_sub_dw)?
.flatten_all()?
.to_vec1::<f32>()?,
);
weight_list[3]
.extend_from_slice(&dh.broadcast_mul(&dw)?.flatten_all()?.to_vec1::<f32>()?);
}
let idx_tensor = Tensor::new(idx_list, grid_thw.device())?;
let weight_tensor = Tensor::new(weight_list, grid_thw.device())?.to_dtype(self.dtype)?;
let pos_embeds = self
.pos_embed
.forward(&idx_tensor)?
.broadcast_mul(&weight_tensor.unsqueeze(D::Minus1)?)?;
let patch_pos_embeds = pos_embeds
.i(0)?
.add(&pos_embeds.i(1)?)?
.add(&pos_embeds.i(2)?)?
.add(&pos_embeds.i(3)?)?;
let mut patch_pos_embeds_permute = vec![];
let patch_pos_embeds = split_tensor(&patch_pos_embeds, &split_idx, 0)?;
let merge_size = self.spatial_merge_size;
for (i, pos_embed) in patch_pos_embeds.iter().enumerate() {
let [t, h, w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
// let pos_embed = &patch_pos_embeds[i];
let pos_emebd_last_dim = pos_embed.dim(D::Minus1)?;
let pos_embed = pos_embed.repeat((t as usize, 1))?;
let shape = Shape::from(vec![
t as usize,
h as usize / merge_size,
merge_size,
w as usize / merge_size,
merge_size,
pos_emebd_last_dim,
]);
let pos_embed = pos_embed
.reshape(shape)?
.permute((0, 1, 3, 2, 4, 5))?
.flatten(0, 4)?;
patch_pos_embeds_permute.push(pos_embed);
}
let patch_pos_embeds = Tensor::cat(&patch_pos_embeds_permute, 0)?;
Ok(patch_pos_embeds)
}
pub fn rot_pos_emb(&self, grid_thw: &Tensor) -> Result<Tensor> {
let merge_size = self.spatial_merge_size;
let max_hw = grid_thw.i((.., 1..))?.max_all()?.to_scalar::<u32>()?;
let freq_table = self
.rotary_pos_emb
.forward(max_hw as usize, grid_thw.device())?;
let mut pos_ids_vec = vec![];
for i in 0..grid_thw.dim(0)? {
let [t, h, w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let merged_h = h / merge_size as u32;
let merged_w = w / merge_size as u32;
let blocks_rows = Tensor::arange(0, merged_h, grid_thw.device())?;
let blocks_cols = Tensor::arange(0, merged_w, grid_thw.device())?;
let intra_row = Tensor::arange(0, merge_size as u32, grid_thw.device())?;
let intra_col = Tensor::arange(0, merge_size as u32, grid_thw.device())?;
let row_idx = blocks_rows
.reshape(((), 1, 1, 1))?
.contiguous()?
.affine(merge_size as f64, 0.0)?
.broadcast_add(&intra_row.reshape((1, 1, (), 1))?.contiguous()?)?;
let col_idx = blocks_cols
.reshape((1, (), 1, 1))?
.contiguous()?
.affine(merge_size as f64, 0.0)?
.broadcast_add(&intra_col.reshape((1, 1, 1, ()))?.contiguous()?)?;
let row_idx = row_idx
.expand((merged_h as usize, merged_w as usize, merge_size, merge_size))?
.flatten_all()?;
let col_idx = col_idx
.expand((merged_h as usize, merged_w as usize, merge_size, merge_size))?
.flatten_all()?;
let mut coords = Tensor::stack(&[row_idx, col_idx], D::Minus1)?.contiguous()?;
if t > 1 {
coords = coords.repeat((t as usize, 1))?;
}
pos_ids_vec.push(coords);
}
let pos_ids = Tensor::cat(&pos_ids_vec, 0)?;
let pos_ids_h = pos_ids.i((.., 0))?.contiguous()?;
// 第二列是w维度的索引
let pos_ids_w = pos_ids.i((.., 1))?.contiguous()?;
let rotary_pos_emb_h = freq_table.index_select(&pos_ids_h, 0)?;
let rotary_pos_emb_w = freq_table.index_select(&pos_ids_w, 0)?;
// 每个patch融合h索引和w索引两个的位置编码信息
let rotary_pos_emb = Tensor::cat(&[rotary_pos_emb_h, rotary_pos_emb_w], 1)?.contiguous()?;
Ok(rotary_pos_emb)
}
pub fn forward(
&self,
hidden_states: &Tensor,
grid_thw: &Tensor,
) -> Result<(Tensor, Vec<Tensor>)> {
let hidden_states = self.patch_embed.forward(hidden_states)?;
let pos_embeds = self.fast_pos_embed_interpolate(grid_thw)?;
let hidden_states = hidden_states.broadcast_add(&pos_embeds)?;
let rotary_pos_emb = self.rot_pos_emb(grid_thw)?;
let seq_len = hidden_states.dim(0)?;
let mut hidden_states = hidden_states.reshape((seq_len, ()))?;
let rotary_pos_emb = rotary_pos_emb.reshape((seq_len, ()))?;
let emb = Tensor::cat(&[&rotary_pos_emb, &rotary_pos_emb], D::Minus1)?;
let cos = emb.cos()?;
let sin = emb.sin()?;
let cu_seqlens = grid_thw.i((.., 1))?.mul(&grid_thw.i((.., 2))?)?;
let grid_t = grid_thw.i((.., 0))?.to_vec1::<u32>()?;
let mut cu_seqlens_repeat = Vec::new();
for (index, t) in grid_t.iter().enumerate() {
cu_seqlens_repeat.push(cu_seqlens.i(index)?.repeat(*t as usize)?);
}
let cu_seqlens_full = Tensor::cat(&cu_seqlens_repeat, 0)?.flatten_all()?;
let cu_seqlens = cu_seqlens_full
.to_dtype(DType::F64)?
.cumsum(0)?
.to_dtype(DType::U32)?
.pad_with_zeros(D::Minus1, 1, 0)?;
let mut deepstack_feature_lists = vec![];
for (layer_num, block) in self.blocks.iter().enumerate() {
hidden_states = block.forward(&hidden_states, &cu_seqlens, &cos, &sin)?;
if self.deepstack_visual_indexes.contains(&layer_num) {
if let Some(index) = self
.deepstack_visual_indexes
.iter()
.position(|&x| x == layer_num)
{
let deepstack_feature =
self.deepstack_merger_list[index].forward(&hidden_states)?;
deepstack_feature_lists.push(deepstack_feature);
} else {
println!("Value not found");
}
}
}
hidden_states = self.merger.forward(&hidden_states)?;
Ok((hidden_states, deepstack_feature_lists))
}
}
pub struct Qwen3VLTextAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
q_norm: RmsNorm,
k_norm: RmsNorm,
num_attention_heads: usize,
num_key_value_heads: usize,
num_kv_groups: usize,
head_dim: usize,
scaling: f64,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Qwen3VLTextAttention {
pub fn new(config: Qwen3VLTextConfig, vb: VarBuilder) -> Result<Self> {
let hidden_size = config.hidden_size;
let num_attention_heads = config.num_attention_heads;
let head_dim = config.head_dim;
let num_key_value_heads = config.num_key_value_heads;
let num_kv_groups = num_attention_heads / num_key_value_heads;
let scaling = 1f64 / f64::sqrt(head_dim as f64);
let (q_proj, k_proj, v_proj, o_proj) = if config.attention_bias {
let q_proj = linear(hidden_size, num_attention_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_size, num_key_value_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_size, num_key_value_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear(num_attention_heads * head_dim, hidden_size, vb.pp("o_proj"))?;
(q_proj, k_proj, v_proj, o_proj)
} else {
let q_proj =
linear_no_bias(hidden_size, num_attention_heads * head_dim, vb.pp("q_proj"))?;
let k_proj =
linear_no_bias(hidden_size, num_key_value_heads * head_dim, vb.pp("k_proj"))?;
let v_proj =
linear_no_bias(hidden_size, num_key_value_heads * head_dim, vb.pp("v_proj"))?;
let o_proj =
linear_no_bias(num_attention_heads * head_dim, hidden_size, vb.pp("o_proj"))?;
(q_proj, k_proj, v_proj, o_proj)
};
let q_norm = rms_norm(head_dim, config.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = rms_norm(head_dim, config.rms_norm_eps, vb.pp("k_norm"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_attention_heads,
num_key_value_heads,
num_kv_groups,
head_dim,
scaling,
kv_cache: None,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?.reshape((
b_sz,
q_len,
self.num_attention_heads,
self.head_dim,
))?;
let query_states = self.q_norm.forward(&query_states)?.transpose(1, 2)?;
let key_states = self.k_proj.forward(xs)?.reshape((
b_sz,
q_len,
self.num_key_value_heads,
self.head_dim,
))?;
let key_states = self.k_norm.forward(&key_states)?.transpose(1, 2)?;
let value_states = self.v_proj.forward(xs)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
apply_rotary_pos_emb(&query_states, &key_states, cos, sin, false)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let attn_output = eager_attention_forward(
&query_states,
&key_states,
&value_states,
Some(self.num_kv_groups),
attention_mask,
self.scaling,
)?;
let attn_output =
attn_output.reshape((b_sz, q_len, self.num_attention_heads * self.head_dim))?;
let attn_output = attn_output.apply(&self.o_proj)?;
Ok(attn_output)
}
pub fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
pub struct Qwen3VLTextDecoderLayer {
self_attn: Qwen3VLTextAttention,
mlp: GateUpDownMLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl Qwen3VLTextDecoderLayer {
pub fn new(config: Qwen3VLTextConfig, vb: VarBuilder) -> Result<Self> {
let self_attn = Qwen3VLTextAttention::new(config.clone(), vb.pp("self_attn"))?;
let mlp = GateUpDownMLP::new(
vb.pp("mlp"),
config.hidden_size,
config.intermediate_size,
config.hidden_act,
false,
)?;
let input_layernorm = rms_norm(
config.hidden_size,
config.rms_norm_eps,
vb.pp("input_layernorm"),
)?;
let post_attention_layernorm = rms_norm(
config.hidden_size,
config.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, cos, sin, attention_mask)?;
let xs = residual.add(&xs)?;
let residual = xs.clone();
let xs = self.post_attention_layernorm.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
let xs = residual.add(&xs)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
pub struct Qwen3VLTextModel {
embed_tokens: Embedding,
layers: Vec<Qwen3VLTextDecoderLayer>,
norm: RmsNorm,
rotary_emb: Qwen3VLTextRotaryEmbedding,
mrope_section: Vec<usize>,
}
impl Qwen3VLTextModel {
pub fn new(config: Qwen3VLTextConfig, vb: VarBuilder) -> Result<Self> {
let vocab_size = config.vocab_size;
let embed_tokens = embedding(vocab_size, config.hidden_size, vb.pp("embed_tokens"))?;
let mut layers = vec![];
let vb_l = vb.pp("layers");
for layer_idx in 0..config.num_hidden_layers {
let layer = Qwen3VLTextDecoderLayer::new(config.clone(), vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(config.hidden_size, config.rms_norm_eps, vb.pp("norm"))?;
let head_dim = config.head_dim;
let rotary_emb = Qwen3VLTextRotaryEmbedding::new(head_dim, config.rope_theta);
let mrope_section = config.rope_scaling.mrope_section.clone();
Ok(Self {
embed_tokens,
layers,
norm,
rotary_emb,
mrope_section,
})
}
pub fn forward(
&mut self,
inputs_embeds: &Tensor,
seqlen_offset: usize,
position_ids: Option<&Tensor>,
visual_pos_masks: Option<&Tensor>,
deepstack_visual_embeds: Option<Vec<Tensor>>,
) -> Result<Tensor> {
let (b_size, seq_len, _) = inputs_embeds.dims3()?;
let position_ids = match position_ids {
Some(ids) => ids.clone(),
None => Tensor::arange(
seqlen_offset as u32,
(seq_len + seqlen_offset) as u32,
inputs_embeds.device(),
)?
.unsqueeze(0)?
.unsqueeze(0)?
.broadcast_as((3, b_size, seq_len))?,
};
let (cos, sin) = self.rotary_emb.forward(
&position_ids,
inputs_embeds.dtype(),
self.mrope_section.clone(),
)?;
let mut xs = inputs_embeds.clone();
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
b_size,
seq_len,
0,
inputs_embeds.device(),
)?)
}
};
for (layer_idx, layer) in self.layers.iter_mut().enumerate() {
xs = layer.forward(&xs, &cos, &sin, attention_mask.as_ref())?;
if let Some(deepstack_embeds) = deepstack_visual_embeds.as_ref()
&& layer_idx < deepstack_embeds.len()
{
xs = mask_index_add(
&xs.squeeze(0)?,
&visual_pos_masks.unwrap().squeeze(0)?,
&deepstack_embeds[layer_idx],
)?
.unsqueeze(0)?;
}
}
let xs = xs.apply(&self.norm)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
pub struct Qwen3VLModel {
config: Qwen3VLConfig,
visual: Qwen3VLVisionModel,
language_model: Qwen3VLTextModel,
lm_head: Linear,
rope_deltas: Option<Tensor>,
}
impl Qwen3VLModel {
pub fn new(config: Qwen3VLConfig, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let config = config.clone();
let visual = Qwen3VLVisionModel::new(config.vision_config.clone(), vb_m.pp("visual"))?;
let language_model =
Qwen3VLTextModel::new(config.text_config.clone(), vb_m.pp("language_model"))?;
let lm_head = if config.tie_word_embeddings {
Linear::new(language_model.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(
config.text_config.hidden_size,
config.text_config.vocab_size,
vb.pp("lm_head"),
)?
};
Ok(Self {
config,
visual,
language_model,
lm_head,
rope_deltas: None,
})
}
fn get_vision_features(
&self,
pixel_values: &Tensor,
image_grid_thw: &Tensor,
) -> Result<(Vec<Tensor>, Vec<Tensor>)> {
let (image_embeds, deepstack_image_embeds) =
self.visual.forward(pixel_values, image_grid_thw)?;
// torch.prod
let split_sizes: Vec<usize> = prod_tensor_last_dim(image_grid_thw)?
.to_vec1::<u32>()?
.iter()
.map(|&x| x as usize / self.visual.spatial_merge_size.pow(2))
.collect();
let image_embeds = split_tensor(&image_embeds, &split_sizes, 0)?;
Ok((image_embeds, deepstack_image_embeds))
}
fn get_placeholder_mask(&self, input_ids: &Tensor, is_image: bool) -> Result<Tensor> {
let special_token = if is_image {
Tensor::new(vec![self.config.image_token_id as u32], input_ids.device())?
} else {
Tensor::new(vec![self.config.video_token_id as u32], input_ids.device())?
};
let special_mask = input_ids
.broadcast_eq(&special_token)?
.to_dtype(candle_core::DType::U32)?;
Ok(special_mask)
}
fn get_rope_index(
&self,
input_ids: &Tensor,
image_grid_thw: Option<&Tensor>,
video_grid_thw: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let video_grid_thw = match video_grid_thw {
Some(thw) => {
let grid_t = thw.i((.., 0))?.to_vec1::<u32>()?;
let mut v_thw_vec = Vec::new();
for (index, t) in grid_t.iter().enumerate() {
let mut thw_i = thw.i(index)?.to_vec1::<u32>()?;
// [12, 30, 50]
// [1, 30, 50]*t
thw_i[0] = 1;
v_thw_vec.push(
Tensor::new(thw_i, thw.device())?
.repeat(*t as usize)?
.reshape((*t as usize, ()))?,
);
}
Some(Tensor::cat(&v_thw_vec, 0)?)
}
None => None,
};
let spatial_merge_size = self.config.vision_config.spatial_merge_size;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | true |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen3vl/mod.rs | src/models/qwen3vl/mod.rs | pub mod config;
pub mod generate;
pub mod model;
pub mod processor;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/qwen3vl/generate.rs | src/models/qwen3vl/generate.rs | use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use rocket::async_stream::stream;
use rocket::futures::Stream;
use crate::{
chat_template::ChatTemplate,
models::{
GenerateModel,
qwen3vl::{
config::{Qwen3VLConfig, Qwen3VLGenerationConfig},
model::Qwen3VLModel,
processor::Qwen3VLProcessor,
},
},
tokenizer::TokenizerModel,
utils::{
build_completion_chunk_response, build_completion_response, find_type_files, get_device,
get_dtype, get_logit_processor,
},
};
pub struct Qwen3VLGenerateModel<'a> {
chat_template: ChatTemplate<'a>,
tokenizer: TokenizerModel,
pre_processor: Qwen3VLProcessor,
qwen3_vl: Qwen3VLModel,
device: Device,
eos_token_id1: u32,
eos_token_id2: u32,
generation_config: Qwen3VLGenerationConfig,
model_name: String,
}
impl<'a> Qwen3VLGenerateModel<'a> {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let chat_template = ChatTemplate::init(path)?;
let tokenizer = TokenizerModel::init(path)?;
let config_path = path.to_string() + "/config.json";
let cfg: Qwen3VLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
let device = get_device(device);
let cfg_dtype = cfg.text_config.dtype.as_str();
let dtype = get_dtype(dtype, cfg_dtype);
let pre_processor = Qwen3VLProcessor::new(path, &device, dtype)?;
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, &device)? };
let qwen3_vl = Qwen3VLModel::new(cfg, vb)?;
let generation_config_path = path.to_string() + "/generation_config.json";
let generation_config: Qwen3VLGenerationConfig =
serde_json::from_slice(&std::fs::read(generation_config_path)?)?;
Ok(Self {
chat_template,
tokenizer,
pre_processor,
qwen3_vl,
device,
eos_token_id1: generation_config.eos_token_id[0] as u32,
eos_token_id2: generation_config.eos_token_id[1] as u32,
generation_config,
model_name: "qwen3vl".to_string(),
})
}
}
impl<'a> GenerateModel for Qwen3VLGenerateModel<'a> {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let temperature = match mes.temperature {
None => self.generation_config.temperature,
Some(tem) => tem,
};
let top_p = match mes.top_p {
None => self.generation_config.top_p,
Some(top_p) => top_p,
};
let top_k = self.generation_config.top_k;
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor =
get_logit_processor(Some(temperature), Some(top_p), Some(top_k), seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let input = self.pre_processor.process_info(&mes, &mes_render)?;
let mut input_ids = self
.tokenizer
.text_encode(input.replace_text.clone(), &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let mut pixel_values = input.pixel_values.as_ref();
let image_grid_thw = input.image_grid_thw.as_ref();
let mut pixel_values_video = input.pixel_values_video.as_ref();
let video_grid_thw = input.video_grid_thw.as_ref();
let mut cache_position = Tensor::arange(0u32, seq_len as u32, &self.device)?;
let mut generate = Vec::new();
let sample_len = mes.max_tokens.unwrap_or(1024);
for _ in 0..sample_len {
let logits = self.qwen3_vl.forward(
&input_ids,
pixel_values,
image_grid_thw,
pixel_values_video,
video_grid_thw,
Some(&cache_position),
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
generate.push(next_token);
if next_token == self.eos_token_id1 || next_token == self.eos_token_id2 {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
}
let num_token = generate.len() as u32;
let res = self.tokenizer.token_decode(generate)?;
self.qwen3_vl.clear_kv_cache();
let response = build_completion_response(res, &self.model_name, Some(num_token));
Ok(response)
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let temperature = match mes.temperature {
None => self.generation_config.temperature,
Some(tem) => tem,
};
let top_p = match mes.top_p {
None => self.generation_config.top_p,
Some(top_p) => top_p,
};
let top_k = self.generation_config.top_k;
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor =
get_logit_processor(Some(temperature), Some(top_p), Some(top_k), seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let input = self.pre_processor.process_info(&mes, &mes_render)?;
let mut input_ids = self
.tokenizer
.text_encode(input.replace_text.clone(), &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let pixel_values = input.pixel_values.clone();
let image_grid_thw = input.image_grid_thw.clone();
let pixel_values_video = input.pixel_values_video.clone();
let video_grid_thw = input.video_grid_thw.clone();
let mut cache_position = Tensor::arange(0u32, seq_len as u32, &self.device)?;
let sample_len = mes.max_tokens.unwrap_or(1024);
let stream = stream! {
let mut error_tokens = Vec::new();
let mut pixel_values = pixel_values.as_ref();
let image_grid_thw = image_grid_thw.as_ref();
let mut pixel_values_video = pixel_values_video.as_ref();
let video_grid_thw = video_grid_thw.as_ref();
let mut tool_call_id = None;
let mut tool_call_content = String::new();
for _ in 0..sample_len {
let logits = self.qwen3_vl.forward(
&input_ids,
pixel_values,
image_grid_thw,
pixel_values_video,
video_grid_thw,
Some(&cache_position),
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
let mut decode_ids = Vec::new();
if !error_tokens.is_empty() {
decode_ids.extend_from_slice(&error_tokens);
}
decode_ids.push(next_token);
let decoded_token = self.tokenizer.token_decode(decode_ids).map_err(|e| anyhow!(format!("stream decode error{e}")))?;
if decoded_token.contains("�") {
error_tokens.push(next_token);
if error_tokens.len() > 3 {
error_tokens.clear();
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
continue;
}
error_tokens.clear();
// 处理特殊标记和工具调用
match decoded_token.as_str() {
"<tool_call>" => {
// 开始工具调用
tool_call_id = Some(uuid::Uuid::new_v4().to_string());
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
continue;
}
"</tool_call>" => {
// 结束工具调用
let chunk = build_completion_chunk_response(
decoded_token,
&self.model_name,
tool_call_id.clone(),
Some(tool_call_content.clone())
);
tool_call_id = None;
tool_call_content = String::new();
yield Ok(chunk);
}
_ => {
if tool_call_id.is_some() {
// 在工具调用过程中,收集工具调用内容
tool_call_content.push_str(&decoded_token);
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
continue;
} else {
// 正常文本输出
let chunk = build_completion_chunk_response(
decoded_token,
&self.model_name,
None,
None
);
yield Ok(chunk);
}
}
}
if next_token == self.eos_token_id1 || next_token == self.eos_token_id2 {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
cache_position = Tensor::from_vec(vec![seqlen_offset as u32], 1, &self.device)?;
pixel_values = None;
pixel_values_video = None;
}
self.qwen3_vl.clear_kv_cache();
};
Ok(Box::new(Box::pin(stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/common/mod.rs | src/models/common/mod.rs | use anyhow::Result;
use candle_core::{D, Tensor};
use candle_nn::{
Activation, BatchNorm, BatchNormConfig, Conv2d, Conv2dConfig, LayerNorm, LayerNormConfig,
Linear, Module, RmsNorm, VarBuilder, batch_norm, conv2d, conv2d_no_bias, layer_norm, linear,
linear_no_bias, rms_norm,
};
use crate::{position_embed::rope::apply_rotary_pos_emb, utils::tensor_utils::repeat_kv};
#[derive(Debug, Clone)]
pub struct GateUpDownMLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl GateUpDownMLP {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
intermediate_size: usize,
act_fn: Activation,
bias: bool,
) -> Result<Self> {
let (gate_proj, up_proj, down_proj) = if bias {
(
linear(hidden_size, intermediate_size, vb.pp("gate_proj"))?,
linear(hidden_size, intermediate_size, vb.pp("up_proj"))?,
linear(intermediate_size, hidden_size, vb.pp("down_proj"))?,
)
} else {
(
linear_no_bias(hidden_size, intermediate_size, vb.pp("gate_proj"))?,
linear_no_bias(hidden_size, intermediate_size, vb.pp("up_proj"))?,
linear_no_bias(intermediate_size, hidden_size, vb.pp("down_proj"))?,
)
};
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn,
})
}
}
impl Module for GateUpDownMLP {
fn forward(&self, xs: &Tensor) -> candle_core::Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
pub struct TwoLinearMLP {
linear1: Linear,
linear2: Linear,
act: Activation,
}
impl TwoLinearMLP {
pub fn new(
vb: VarBuilder,
embedding_dim: usize,
mlp_dim: usize,
act: Activation,
bias: bool,
linear1_pp_name: &str,
linear2_pp_name: &str,
) -> Result<Self> {
let (linear1, linear2) = if bias {
(
linear(embedding_dim, mlp_dim, vb.pp(linear1_pp_name))?,
linear(mlp_dim, embedding_dim, vb.pp(linear2_pp_name))?,
)
} else {
(
linear_no_bias(embedding_dim, mlp_dim, vb.pp(linear1_pp_name))?,
linear_no_bias(mlp_dim, embedding_dim, vb.pp(linear2_pp_name))?,
)
};
Ok(Self {
linear1,
linear2,
act,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs
.apply(&self.linear1)?
.apply(&self.act)?
.apply(&self.linear2)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
// pub struct AttentionNobias {
pub struct NaiveAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
middle_size: usize,
kv_cache: Option<(Tensor, Tensor)>,
}
impl NaiveAttention {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: Option<usize>,
bias: bool,
o_proj_pp_name: Option<&str>,
) -> Result<Self> {
let num_kv_groups = num_attention_heads / num_key_value_heads;
let head_dim = match head_dim {
None => hidden_size / num_attention_heads,
Some(dim) => dim,
};
let o_proj_pp_name = o_proj_pp_name.unwrap_or("o_proj");
let (q_proj, k_proj, v_proj, o_proj) = if bias {
(
linear(hidden_size, num_attention_heads * head_dim, vb.pp("q_proj"))?,
linear(hidden_size, num_key_value_heads * head_dim, vb.pp("k_proj"))?,
linear(hidden_size, num_key_value_heads * head_dim, vb.pp("v_proj"))?,
linear(
num_attention_heads * head_dim,
hidden_size,
vb.pp(o_proj_pp_name),
)?,
)
} else {
(
linear_no_bias(hidden_size, num_attention_heads * head_dim, vb.pp("q_proj"))?,
linear_no_bias(hidden_size, num_key_value_heads * head_dim, vb.pp("k_proj"))?,
linear_no_bias(hidden_size, num_key_value_heads * head_dim, vb.pp("v_proj"))?,
linear_no_bias(
num_attention_heads * head_dim,
hidden_size,
vb.pp(o_proj_pp_name),
)?,
)
};
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads: num_attention_heads,
num_kv_heads: num_key_value_heads,
num_kv_groups,
head_dim,
middle_size: num_attention_heads * head_dim,
kv_cache: None,
})
}
pub fn forward(
&self,
xs: &Tensor,
cos: Option<&Tensor>,
sin: Option<&Tensor>,
attention_mask: Option<&Tensor>,
tof32: bool,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) = if let Some(cos) = cos
&& let Some(sin) = sin
{
apply_rotary_pos_emb(&query_states, &key_states, cos, sin, tof32)?
} else {
(query_states, key_states)
};
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_output = eager_attention_forward(
&query_states,
&key_states,
&value_states,
Some(self.num_kv_groups),
attention_mask,
scale,
)?;
let attn_output = attn_output.reshape((b_sz, q_len, self.middle_size))?;
let attn_output = attn_output.apply(&self.o_proj)?;
Ok(attn_output)
}
pub fn forward_with_cache(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
tof32: bool,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
apply_rotary_pos_emb(&query_states, &key_states, cos, sin, tof32)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_output = eager_attention_forward(
&query_states,
&key_states,
&value_states,
Some(self.num_kv_groups),
attention_mask,
scale,
)?;
let attn_output = attn_output.reshape((b_sz, q_len, self.middle_size))?;
let attn_output = attn_output.apply(&self.o_proj)?;
Ok(attn_output)
}
pub fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
pub struct NaiveAttnTwoLinearMLPBlock {
self_attn: NaiveAttention,
mlp: TwoLinearMLP,
input_layernorm: LayerNorm,
post_attention_layernorm: LayerNorm,
}
impl NaiveAttnTwoLinearMLPBlock {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
num_attention_heads: usize,
num_key_value_heads: Option<usize>,
head_dim: Option<usize>,
attn_bias: bool,
attn_pp_name: &str,
o_proj_pp_name: Option<&str>,
intermediate_size: usize,
hidden_act: Activation,
mlp_bias: bool,
mlp_pp_name: &str,
linear1_pp_name: &str,
linear2_pp_name: &str,
norm_eps: f64,
input_norm_pp_name: &str,
post_norm_pp_name: &str,
) -> Result<Self> {
let num_key_value_heads = match num_key_value_heads {
Some(heads) => heads,
None => num_attention_heads,
};
let self_attn = NaiveAttention::new(
vb.pp(attn_pp_name),
hidden_size,
num_attention_heads,
num_key_value_heads,
head_dim,
attn_bias,
o_proj_pp_name,
)?;
let mlp = TwoLinearMLP::new(
vb.pp(mlp_pp_name),
hidden_size,
intermediate_size,
hidden_act,
mlp_bias,
linear1_pp_name,
linear2_pp_name,
)?;
let input_layernorm = get_layer_norm(vb.pp(input_norm_pp_name), norm_eps, hidden_size)?;
let post_attention_layernorm =
get_layer_norm(vb.pp(post_norm_pp_name), norm_eps, hidden_size)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
pub fn forward(
&self,
xs: &Tensor,
cos: Option<&Tensor>,
sin: Option<&Tensor>,
attention_mask: Option<&Tensor>,
tof32: bool,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward(&xs, cos, sin, attention_mask, tof32)?;
let residual = residual.add(&xs)?;
let xs = self.post_attention_layernorm.forward(&residual)?;
let xs = self.mlp.forward(&xs)?;
let xs = residual.add(&xs)?;
Ok(xs)
}
}
pub struct NaiveAttnGateUpDownMLPBlock {
self_attn: NaiveAttention,
mlp: GateUpDownMLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl NaiveAttnGateUpDownMLPBlock {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
num_attention_heads: usize,
num_key_value_heads: Option<usize>,
head_dim: Option<usize>,
attn_bias: bool,
attn_pp_name: &str,
o_proj_pp_name: Option<&str>,
intermediate_size: usize,
hidden_act: Activation,
mlp_bias: bool,
mlp_pp_name: &str,
norm_eps: f64,
input_norm_pp_name: &str,
post_norm_pp_name: &str,
) -> Result<Self> {
let num_key_value_heads = match num_key_value_heads {
Some(heads) => heads,
None => num_attention_heads,
};
let self_attn = NaiveAttention::new(
vb.pp(attn_pp_name),
hidden_size,
num_attention_heads,
num_key_value_heads,
head_dim,
attn_bias,
o_proj_pp_name,
)?;
let mlp = GateUpDownMLP::new(
vb.pp(mlp_pp_name),
hidden_size,
intermediate_size,
hidden_act,
mlp_bias,
)?;
let input_layernorm = rms_norm(hidden_size, norm_eps, vb.pp(input_norm_pp_name))?;
let post_attention_layernorm = rms_norm(hidden_size, norm_eps, vb.pp(post_norm_pp_name))?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward_with_cache(&xs, cos, sin, attention_mask, false)?;
let residual = residual.add(&xs)?;
let xs = self.post_attention_layernorm.forward(&residual)?;
let xs = self.mlp.forward(&xs)?;
let xs = residual.add(&xs)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
pub fn eager_attention_forward(
query_states: &Tensor,
key_states: &Tensor,
value_states: &Tensor,
num_key_value_groups: Option<usize>,
attention_mask: Option<&Tensor>,
scaling: f64,
) -> Result<Tensor> {
// input q shape:(b, num_head, seq_len, dim)
// input k/v shape:(b, num_kv_head, seq_len, dim)
let key_states = match num_key_value_groups {
Some(g) => repeat_kv(key_states.clone(), g)?.contiguous()?,
None => key_states.clone(),
};
let value_states = match num_key_value_groups {
Some(g) => repeat_kv(value_states.clone(), g)?.contiguous()?,
None => value_states.clone(),
};
let query_states = query_states.contiguous()?;
let key_states = key_states.contiguous()?;
let value_states = value_states.contiguous()?;
let attn_output = {
#[cfg(not(feature = "flash-attn"))]
{
let attn_weights = query_states.matmul(&key_states.transpose(D::Minus2, D::Minus1)?)?;
let attn_weights = (attn_weights * scaling)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(&mask.to_dtype(attn_weights.dtype())?)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
}
#[cfg(feature = "flash-attn")]
{
// use flash-attn,
// flash-attn shape: (bs, seq_len, num_head, head_dim)
let query_states = query_states.transpose(1, 2)?;
let key_states = key_states.transpose(1, 2)?;
let value_states = value_states.transpose(1, 2)?;
let attn_output = candle_flash_attn::flash_attn(
&query_states,
&key_states,
&value_states,
scaling as f32,
attention_mask.is_some(),
)?
.transpose(1, 2)?;
attn_output
}
};
//(b, n_head, seq_len, dim) -> (b, seq_len, n_head, dim)
let attn_output = attn_output.transpose(1, 2)?.contiguous()?;
Ok(attn_output)
}
pub fn get_conv2d(
vb: VarBuilder,
in_c: usize,
out_c: usize,
kernel_size: usize,
padding: usize,
stride: usize,
dilation: usize,
groups: usize,
bias: bool,
) -> Result<Conv2d> {
let cfg = Conv2dConfig {
padding,
stride,
dilation,
groups,
cudnn_fwd_algo: None,
};
let conv2d = if bias {
conv2d(in_c, out_c, kernel_size, cfg, vb)?
} else {
conv2d_no_bias(in_c, out_c, kernel_size, cfg, vb)?
};
Ok(conv2d)
}
pub fn get_layer_norm(vb: VarBuilder, eps: f64, dim: usize) -> Result<LayerNorm> {
let ln_config = LayerNormConfig {
eps,
remove_mean: true, // true for layernorm, false for RMSNorm
affine: true, // true for with bias, false for without bias
};
let norm = layer_norm(dim, ln_config, vb)?;
Ok(norm)
}
pub fn get_batch_norm(vb: VarBuilder, eps: f64, dim: usize) -> Result<BatchNorm> {
let bn_config = BatchNormConfig {
eps,
remove_mean: true,
affine: true,
momentum: 0.1,
};
let norm = batch_norm(dim, bn_config, vb)?;
Ok(norm)
}
pub fn deform_conv2d_kernel(
input: &Tensor,
weight: &Tensor,
bias: Option<&Tensor>,
offset: &Tensor,
mask: Option<&Tensor>,
stride: usize,
padding: usize,
) -> Result<Tensor> {
// 不考虑空洞卷积, bs = 1
let (_, in_c, in_h, in_w) = input.dims4()?;
let (out_channel, _, ker_h, ker_w) = weight.dims4()?;
let out_h = ((in_h + 2 * padding - ker_h) / stride) + 1;
let out_w = ((in_w + 2 * padding - ker_w) / stride) + 1;
let num_kernels = in_c * out_h * out_w;
let mask_vec = if let Some(mask) = mask {
Some(mask.squeeze(0)?.to_vec3::<f32>()?)
} else {
None
};
let offset_vec = offset.squeeze(0)?.to_vec3::<f32>()?;
let input_vec = input.squeeze(0)?.to_vec3::<f32>()?;
let mut columns_vec = vec![vec![0.0f32; out_h * out_w]; in_c * ker_h * ker_w];
for index in 0..num_kernels {
let out_x = index % out_w;
let out_y = (index / out_w) % out_h;
let in_c = index / (out_w * out_h);
let out_c = in_c * ker_h * ker_w;
for i in 0..ker_h {
for j in 0..ker_w {
let mask_idx = i * ker_w + j;
let offset_idx = 2 * mask_idx;
let mask_value = if mask.is_some() {
mask_vec.as_ref().unwrap()[mask_idx][out_y][out_x]
} else {
1.0
};
let offset_h = offset_vec[offset_idx][out_y][out_x];
let offset_w = offset_vec[offset_idx + 1][out_y][out_x];
let y = ((out_y * stride - padding) + i) as f32 + offset_h;
let x = ((out_x * stride - padding) + j) as f32 + offset_w;
let val = if y <= -1.0 || in_h as f32 <= y || x <= -1.0 || in_w as f32 <= x {
0.0
} else {
let h_low = y.floor();
let w_low = x.floor();
let h_high = h_low + 1.0;
let w_high = w_low + 1.0;
let lh = y - h_low;
let lw = x - w_low;
let hh = 1.0 - lh;
let hw = 1.0 - lw;
let w1 = hh * hw;
let w2 = hh * lw;
let w3 = lh * hw;
let w4 = lh * lw;
let v1 = if h_low >= 0.0 && w_low >= 0.0 {
input_vec[in_c][h_low as usize][w_low as usize]
} else {
0.0
};
let v2 = if h_low >= 0.0 && w_high <= (in_w - 1) as f32 {
input_vec[in_c][h_low as usize][w_high as usize]
} else {
0.0
};
let v3 = if h_high <= (in_h - 1) as f32 && w_low >= 0.0 {
input_vec[in_c][h_high as usize][w_low as usize]
} else {
0.0
};
let v4 = if h_high <= (in_h - 1) as f32 && w_high <= (in_w - 1) as f32 {
input_vec[in_c][h_high as usize][w_high as usize]
} else {
0.0
};
w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
};
columns_vec[out_c + i * ker_w + j][out_y * out_w + out_x] = mask_value * val;
}
}
}
let columns = Tensor::new(columns_vec, weight.device())?;
let mut out =
weight
.flatten_from(1)?
.matmul(&columns)?
.reshape((1, out_channel, out_h, out_w))?;
if let Some(bias) = bias {
out = out.broadcast_add(bias)?;
}
Ok(out)
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/deepseek_ocr/config.rs | src/models/deepseek_ocr/config.rs | #[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct DeepseekV2Config {
pub bos_token_id: u32,
pub eos_token_id: u32,
pub first_k_dense_replace: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub kv_lora_rank: Option<usize>,
pub lm_head: bool,
pub max_position_embeddings: usize,
pub moe_intermediate_size: usize,
#[serde(default = "default_moe_layer_freq")]
pub moe_layer_freq: usize,
#[serde(default = "default_routed_scaling_factor")]
pub routed_scaling_factor: f64,
#[serde(default = "default_scoring_func")]
pub scoring_func: String,
#[serde(default = "default_aux_loss_alpha")]
pub aux_loss_alpha: f32,
#[serde(default = "default_true")]
pub seq_aux: bool,
#[serde(default = "default_false")]
pub norm_topk_prob: bool,
pub n_group: usize,
pub n_routed_experts: usize,
pub n_shared_experts: usize,
pub num_attention_heads: usize,
pub num_experts_per_tok: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub q_lora_rank: Option<usize>,
pub qk_nope_head_dim: usize,
pub qk_rope_head_dim: usize,
pub rm_head: bool,
pub topk_group: usize,
pub topk_method: String,
pub torch_dtype: String,
pub use_mla: bool,
pub v_head_dim: usize,
pub vocab_size: usize,
#[serde(default = "default_rms_norm_eps")]
pub rms_norm_eps: f64,
}
fn default_moe_layer_freq() -> usize {
1
}
fn default_routed_scaling_factor() -> f64 {
1.0
}
fn default_scoring_func() -> String {
"softmax".to_string()
}
fn default_aux_loss_alpha() -> f32 {
0.001
}
fn default_true() -> bool {
true
}
fn default_false() -> bool {
false
}
fn default_rms_norm_eps() -> f64 {
1e-6
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct ProjectorConfig {
pub input_dim: usize,
pub model_type: String,
pub n_embed: usize,
pub projector_type: String,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct ClipL14_224 {
pub heads: usize,
pub image_size: usize,
pub layers: usize,
pub patch_size: usize,
pub width: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct SamVitB {
pub downsample_channels: Vec<usize>,
pub global_attn_indexes: Vec<usize>,
pub heads: usize,
pub layers: usize,
pub width: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Width {
#[serde(rename = "clip-l-14-224")]
pub clip_l_14_224: ClipL14_224,
pub sam_vit_b: SamVitB,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct DeepseekOCRVisionConfig {
pub image_size: usize,
pub mlp_ratio: f32,
pub width: Width,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct DeepseekOCRConfig {
pub language_config: DeepseekV2Config,
pub projector_config: ProjectorConfig,
pub torch_dtype: String,
pub vision_config: DeepseekOCRVisionConfig,
pub bos_token_id: u32,
pub eos_token_id: u32,
pub first_k_dense_replace: u32,
pub hidden_size: usize,
pub intermediate_size: usize,
pub kv_lora_rank: Option<usize>,
pub lm_head: bool,
pub max_position_embeddings: usize,
pub moe_intermediate_size: usize,
pub n_group: usize,
pub n_routed_experts: usize,
pub n_shared_experts: usize,
pub num_attention_heads: usize,
pub num_experts_per_tok: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub q_lora_rank: Option<usize>,
pub qk_nope_head_dim: usize,
pub qk_rope_head_dim: usize,
pub rm_head: bool,
pub topk_group: usize,
pub topk_method: String,
pub use_mla: bool,
pub v_head_dim: usize,
pub vocab_size: usize,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/deepseek_ocr/processor.rs | src/models/deepseek_ocr/processor.rs | use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use candle_core::{DType, Device, Tensor};
use crate::utils::img_utils::dynamic_preprocess;
use crate::{
tokenizer::TokenizerModel,
utils::{
extract_mes,
img_utils::{extract_images, img_transform, resize_with_edge_padding},
},
};
pub struct DeepseekOCRProcessor {
device: Device,
dtype: DType,
image_token: String,
image_token_id: u32,
patch_size: u32,
downsample_ratio: u32,
}
impl DeepseekOCRProcessor {
pub fn new(device: &Device, dtype: DType) -> Result<Self> {
Ok(Self {
device: device.clone(),
dtype,
image_token: "<image>".to_string(),
image_token_id: 128815,
patch_size: 16,
downsample_ratio: 4,
})
}
fn get_prompt(&self, mes_vec: Vec<(String, String)>) -> Result<String> {
let sep = "\n";
let sep2 = "";
let mut ret = "".to_string();
for (i, (_, message)) in mes_vec.iter().enumerate() {
if message.chars().count() > 0 {
if i % 2 == 0 {
ret = ret + message + sep;
} else {
ret = ret + message + sep2;
}
}
}
ret = ret.trim().to_string();
Ok(ret)
}
pub fn process_info(
&self,
mes: &ChatCompletionParameters,
tokenizer: &TokenizerModel,
base_size: u32,
image_size: u32,
crop_mode: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor)> {
let imgs = extract_images(mes)?;
let mes_vec = extract_mes(mes)?;
let prompt = self.get_prompt(mes_vec.clone())?;
let text_splits: Vec<&str> = prompt.split(&self.image_token).collect();
let img_mean =
Tensor::from_slice(&[0.5, 0.5, 0.5], (3, 1, 1), &self.device)?.to_dtype(self.dtype)?;
let img_std =
Tensor::from_slice(&[0.5, 0.5, 0.5], (3, 1, 1), &self.device)?.to_dtype(self.dtype)?;
let mut images_list = Vec::new();
let mut images_crop_list = Vec::new();
let mut images_seq_mask = vec![0u32];
let mut tokenized_id = vec![0u32];
let mut images_spatial_crop = Vec::new();
for (text_seq, image) in text_splits.iter().zip(imgs) {
if !text_seq.is_empty() {
let token_ids = tokenizer.text_encode_vec(text_seq.to_string(), false)?;
tokenized_id.extend_from_slice(&token_ids);
let seq_mask = vec![0u32; token_ids.len()];
images_seq_mask.extend_from_slice(&seq_mask);
}
if crop_mode {
let mut images_crop_raw = Vec::new();
let crop_ratio = if image.height() <= 640 && image.width() <= 640 {
(1u32, 1u32)
} else {
let (img_crop, ratio) = dynamic_preprocess(&image, image_size, false)?;
images_crop_raw = img_crop.clone();
ratio
};
let gloabal_view =
resize_with_edge_padding(&image, base_size, base_size, [127u8; 3]);
let global_img_trans =
img_transform(&gloabal_view, &img_mean, &img_std, &self.device, self.dtype)?;
images_list.push(global_img_trans);
images_spatial_crop.push(vec![crop_ratio.0, crop_ratio.1]);
if crop_ratio.0 > 1 || crop_ratio.1 > 1 {
for img in images_crop_raw {
let img_t =
img_transform(&img, &img_mean, &img_std, &self.device, self.dtype)?;
images_crop_list.push(img_t);
}
}
let num_queries = image_size / self.patch_size / self.downsample_ratio;
let num_queries_base = base_size / self.patch_size / self.downsample_ratio;
let mut token_repeat = num_queries_base.pow(2) + num_queries_base + 1;
if crop_ratio.0 > 1 || crop_ratio.1 > 1 {
token_repeat += (num_queries * crop_ratio.0 + 1) * (num_queries * crop_ratio.1);
}
let tokenized_image = vec![self.image_token_id; token_repeat as usize];
tokenized_id.extend_from_slice(&tokenized_image);
let seq_mask = vec![1u32; tokenized_image.len()];
images_seq_mask.extend_from_slice(&seq_mask);
} else {
let global_view = if image_size <= 640 {
image.resize_exact(
image_size,
image_size,
image::imageops::FilterType::CatmullRom,
)
} else {
resize_with_edge_padding(&image, image_size, image_size, [127u8; 3])
};
let global_img_trans =
img_transform(&global_view, &img_mean, &img_std, &self.device, self.dtype)?;
images_list.push(global_img_trans);
images_spatial_crop.push(vec![1, 1]);
let num_queries = image_size / self.patch_size / self.downsample_ratio;
let token_repeat = num_queries.pow(2) + num_queries + 1;
let tokenized_image = vec![self.image_token_id; token_repeat as usize];
tokenized_id.extend_from_slice(&tokenized_image);
let seq_mask = vec![1u32; tokenized_image.len()];
images_seq_mask.extend_from_slice(&seq_mask);
}
}
let token_ids =
tokenizer.text_encode_vec(text_splits[text_splits.len() - 1].to_string(), false)?;
tokenized_id.extend_from_slice(&token_ids);
let seq_mask = vec![0u32; token_ids.len()];
images_seq_mask.extend_from_slice(&seq_mask);
let input_ids = Tensor::new(tokenized_id, &self.device)?.unsqueeze(0)?;
let image_seq_mask = Tensor::new(images_seq_mask, &self.device)?.unsqueeze(0)?;
let (images_ori, images_spatial_crop_t, image_crop) = if images_list.is_empty() {
let images_ori = Tensor::zeros(
(1usize, 3usize, image_size as usize, image_size as usize),
self.dtype,
&self.device,
)?;
let images_spatial_crop_t = Tensor::zeros((1, 2), DType::F64, &self.device)?;
let image_crop = Tensor::zeros(
(1usize, 3usize, base_size as usize, base_size as usize),
self.dtype,
&self.device,
)?;
(images_ori, images_spatial_crop_t, image_crop)
} else {
let images_ori = Tensor::stack(&images_list, 0)?;
let images_spatial_crop_t = Tensor::new(images_spatial_crop, &self.device)?;
let image_crop = if !images_crop_list.is_empty() {
Tensor::stack(&images_crop_list, 0)?
} else {
Tensor::zeros(
(1usize, 3usize, base_size as usize, base_size as usize),
self.dtype,
&self.device,
)?
};
(images_ori, images_spatial_crop_t, image_crop)
};
Ok((
input_ids,
images_ori,
image_crop,
image_seq_mask,
images_spatial_crop_t,
))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/deepseek_ocr/model.rs | src/models/deepseek_ocr/model.rs | use anyhow::Result;
use candle_core::{D, IndexOp, Tensor};
use candle_nn::{
Activation, Conv2d, Embedding, Init, LayerNorm, Linear, Module, RmsNorm, VarBuilder, embedding,
linear, linear_no_bias,
ops::{sigmoid, softmax},
rms_norm,
};
use candle_transformers::models::segment_anything::LayerNorm2d;
use crate::{
models::{
common::{
GateUpDownMLP, NaiveAttention, TwoLinearMLP, eager_attention_forward, get_conv2d,
get_layer_norm,
},
deepseek_ocr::config::{DeepseekOCRConfig, DeepseekV2Config},
},
position_embed::rope::RoPE,
utils::tensor_utils::{
index_select_2d, interpolate_bicubic, interpolate_linear_1d, masked_scatter_dim0, nonzero,
onehot, prepare_causal_attention_mask, quick_gelu, topk,
},
};
pub struct PatchEmbed {
proj: Conv2d,
}
impl PatchEmbed {
pub fn new(
vb: VarBuilder,
in_chans: usize,
embed_dim: usize,
kernel_size: usize,
stride: usize,
padding: usize,
) -> Result<Self> {
let proj = get_conv2d(
vb.pp("proj"),
in_chans,
embed_dim,
kernel_size,
padding,
stride,
1,
1,
true,
)?;
Ok(Self { proj })
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.proj.forward(xs)?;
let xs = xs.permute((0, 2, 3, 1))?;
Ok(xs)
}
}
pub struct Attention {
num_heads: usize,
// head_dim: usize,
qkv: Linear,
proj: Linear,
scaling: f64,
use_rel_pos: bool,
rel_pos_h: Option<Tensor>,
rel_pos_w: Option<Tensor>,
}
impl Attention {
pub fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
use_rel_pos: bool,
input_size: Option<(usize, usize)>,
) -> Result<Self> {
let head_dim = dim / num_heads;
let scaling = 1.0 / (head_dim as f64).sqrt();
let qkv = if qkv_bias {
linear(dim, dim * 3, vb.pp("qkv"))?
} else {
linear_no_bias(dim, dim * 3, vb.pp("qkv"))?
};
let proj = linear(dim, dim, vb.pp("proj"))?;
let mut rel_pos_h = None;
let mut rel_pos_w = None;
if use_rel_pos {
if input_size.is_none() {
return Err(anyhow::anyhow!(
"Input size must be provided if using relative positional encoding."
));
}
let input_size = input_size.unwrap();
let h_len = 2 * input_size.0 - 1;
let w_len = 2 * input_size.1 - 1;
rel_pos_h = Some(vb.get_with_hints((h_len, head_dim), "rel_pos_h", Init::Const(0.))?);
rel_pos_w = Some(vb.get_with_hints((w_len, head_dim), "rel_pos_w", Init::Const(0.))?);
}
Ok(Self {
num_heads,
// head_dim,
qkv,
proj,
scaling,
use_rel_pos,
rel_pos_h,
rel_pos_w,
})
}
fn get_rel_pos(&self, q_size: usize, k_size: usize, rel_pos: &Tensor) -> Result<Tensor> {
let max_rel_dist = 2 * std::cmp::max(q_size, k_size) - 1;
let rel_pos_resized = if rel_pos.dim(0)? != max_rel_dist {
let rel_pos_t = rel_pos
.to_dtype(candle_core::DType::F32)?
.t()?
.unsqueeze(0)?
.contiguous()?;
let rel_pos_resized = interpolate_linear_1d(&rel_pos_t, max_rel_dist, None)?;
rel_pos_resized
.squeeze(0)?
.t()?
.contiguous()?
.to_dtype(rel_pos.dtype())?
} else {
rel_pos.clone()
};
let q_coords = Tensor::arange(0 as f32, q_size as f32, rel_pos.device())?
.unsqueeze(D::Minus1)?
.affine((k_size as f64 / q_size as f64).max(1.0), 0.0)?;
let k_coords = Tensor::arange(0 as f32, k_size as f32, rel_pos.device())?
.unsqueeze(0)?
.affine((q_size as f64 / k_size as f64).max(1.0), 0.0)?;
let relative_coords = q_coords
.broadcast_sub(&k_coords)?
.affine(1.0, (k_size - 1) as f64)?
.affine((q_size as f64 / k_size as f64).max(1.0), 0.0)?;
let relative_coords = relative_coords
.to_dtype(candle_core::DType::U32)?
.contiguous()?;
let rel_pos_resized = rel_pos_resized.contiguous()?;
let res = index_select_2d(&rel_pos_resized, &relative_coords)?;
Ok(res)
}
fn add_decomposed_rel_pos(
&self,
q: &Tensor,
rel_pos_h: &Tensor,
rel_pos_w: &Tensor,
q_size: (usize, usize),
k_size: (usize, usize),
) -> Result<(Tensor, Tensor)> {
let (q_h, q_w) = q_size;
let (k_h, k_w) = k_size;
let rh = self.get_rel_pos(q_h, k_h, rel_pos_h)?; // (q_h, k_h, dim)
let rw = self.get_rel_pos(q_w, k_w, rel_pos_w)?; // (q_w, k_w, dim)
let (b, _, dim) = q.dims3()?;
let r_q = q.reshape((b, q_h, q_w, dim))?.contiguous()?;
let r_q_ = r_q.unsqueeze(D::Minus2)?; // (b, q_h, q_w, 1, dim)
// rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
// rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
let rh_ = rh.unsqueeze(1)?.unsqueeze(0)?; // (1, h, 1, k, dim)
let rel_h = r_q_.broadcast_mul(&rh_)?.sum(D::Minus1)?;
let rw_ = rw.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, w, k, dim)
let rel_w = r_q_.broadcast_mul(&rw_)?.sum(D::Minus1)?;
let rel_h = rel_h
.unsqueeze(D::Minus1)?
.reshape((b, q_h * q_w, k_h, 1))?;
let rel_w = rel_w
.unsqueeze(D::Minus2)?
.reshape((b, q_h * q_w, 1, k_w))?;
Ok((rel_h, rel_w))
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, h, w, _) = xs.dims4()?;
// (3, B, n_head, h*w, head_dim)
let qkv = self
.qkv
.forward(xs)?
.reshape((b, h * w, 3, self.num_heads, ()))?
.permute((2, 0, 3, 1, 4))?
.contiguous()?;
let query_states = qkv.i(0)?.contiguous()?;
let key_states = qkv.i(1)?.contiguous()?;
let value_states = qkv.i(2)?.contiguous()?;
let xs = if self.use_rel_pos {
let q_reshape = query_states.reshape((b * self.num_heads, h * w, ()))?;
let (rel_h, rel_w) = self.add_decomposed_rel_pos(
&q_reshape,
self.rel_pos_h.as_ref().unwrap(),
self.rel_pos_w.as_ref().unwrap(),
(h, w),
(h, w),
)?;
let (_, rel_h_dim1, rel_h_dim2, rel_h_dim3) = rel_h.dims4()?;
let rel_h = rel_h.reshape((b, self.num_heads, rel_h_dim1, rel_h_dim2, rel_h_dim3))?;
let (_, rel_w_dim1, rel_w_dim2, rel_w_dim3) = rel_w.dims4()?;
let rel_w = rel_w.reshape((b, self.num_heads, rel_w_dim1, rel_w_dim2, rel_w_dim3))?;
let attn_bias = rel_h.broadcast_add(&rel_w)?.reshape((
b,
self.num_heads,
rel_h_dim1,
rel_h_dim2 * rel_w_dim3,
))?;
eager_attention_forward(
&query_states,
&key_states,
&value_states,
None,
Some(&attn_bias),
self.scaling,
)?
} else {
eager_attention_forward(
&query_states,
&key_states,
&value_states,
None,
None,
self.scaling,
)?
};
// (b, h*w, n_head, dim)
let xs = xs.reshape((b, h * w, ()))?.reshape((b, h, w, ()))?;
let xs = self.proj.forward(&xs)?;
Ok(xs)
}
}
pub struct Block {
norm1: LayerNorm,
attn: Attention,
norm2: LayerNorm,
mlp: TwoLinearMLP,
window_size: usize,
}
impl Block {
pub fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
mlp_ratio: f32,
qkv_bias: bool,
eps: f64,
act: Activation,
use_rel_pos: bool,
// rel_pos_zero_init: bool,
window_size: usize,
input_size: Option<(usize, usize)>,
) -> Result<Self> {
let norm1 = get_layer_norm(vb.pp("norm1"), eps, dim)?;
let input_size = if window_size == 0 {
input_size
} else {
Some((window_size, window_size))
};
let attn = Attention::new(
vb.pp("attn"),
dim,
num_heads,
qkv_bias,
use_rel_pos,
input_size,
)?;
let norm2 = get_layer_norm(vb.pp("norm2"), eps, dim)?;
let mlp_dim = (dim as f32 * mlp_ratio) as usize;
let mlp = TwoLinearMLP::new(vb.pp("mlp"), dim, mlp_dim, act, true, "lin1", "lin2")?;
Ok(Self {
norm1,
attn,
norm2,
mlp,
window_size,
})
}
pub fn window_partition(
&self,
x: &Tensor,
window_size: usize,
) -> Result<(Tensor, (usize, usize))> {
let (b, h, w, c) = x.dims4()?;
let pad_h = (window_size - h % window_size) % window_size;
let pad_w = (window_size - w % window_size) % window_size;
let x = if pad_h > 0 || pad_w > 0 {
let x = x.pad_with_zeros(1, 0, pad_h)?;
x.pad_with_zeros(2, 0, pad_w)?
} else {
x.clone()
};
let hp = h + pad_h;
let wp = w + pad_w;
let x = x.reshape((
b,
hp / window_size,
window_size,
wp / window_size,
window_size,
c,
))?;
let windows = x.permute((0, 1, 3, 2, 4, 5))?.contiguous()?.reshape((
(),
window_size,
window_size,
c,
))?;
Ok((windows, (hp, wp)))
}
pub fn window_unpartition(
&self,
windows: &Tensor,
window_size: usize,
pad_hw: (usize, usize),
hw: (usize, usize),
) -> Result<Tensor> {
let (hp, wp) = pad_hw;
let (h, w) = hw;
let b = windows.dim(0)? / (hp * wp / window_size / window_size);
let last_dim = windows.dim(D::Minus1)?;
let x = windows.reshape(&[
b,
hp / window_size,
wp / window_size,
window_size,
window_size,
last_dim,
])?;
let mut x = x
.permute((0, 1, 3, 2, 4, 5))?
.contiguous()?
.reshape((b, hp, wp, ()))?;
if hp > h || wp > w {
x = x.i((.., 0..h, 0..w, ..))?
}
Ok(x)
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let shortcut = xs.clone();
let xs = self.norm1.forward(xs)?;
let xs = if self.window_size > 0 {
let h = xs.dim(1)?;
let w = xs.dim(2)?;
let (x, (hp, wp)) = self.window_partition(&xs, self.window_size)?;
let x = self.attn.forward(&x)?;
self.window_unpartition(&x, self.window_size, (hp, wp), (h, w))?
} else {
self.attn.forward(&xs)?
};
let x = shortcut.add(&xs)?;
let x = x.add(&self.mlp.forward(&self.norm2.forward(&x)?)?)?;
Ok(x)
}
}
pub struct Neck {
conv2d_0: Conv2d,
layernorm_1: LayerNorm2d,
conv2d_2: Conv2d,
layernorm_3: LayerNorm2d,
}
impl Neck {
pub fn new(vb: VarBuilder, embed_dim: usize, out_chans: usize) -> Result<Self> {
let conv2d_0 = get_conv2d(vb.pp("0"), embed_dim, out_chans, 1, 0, 1, 1, 1, false)?;
let layernorm_1 = LayerNorm2d::new(out_chans, 0.000001, vb.pp("1"))?;
let conv2d_2 = get_conv2d(vb.pp("2"), out_chans, out_chans, 3, 1, 1, 1, 1, false)?;
let layernorm_3 = LayerNorm2d::new(out_chans, 0.000001, vb.pp("3"))?;
Ok(Self {
conv2d_0,
layernorm_1,
conv2d_2,
layernorm_3,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.conv2d_0.forward(xs)?;
let xs = self.layernorm_1.forward(&xs)?;
let xs = self.conv2d_2.forward(&xs)?;
let xs = self.layernorm_3.forward(&xs)?;
Ok(xs)
}
}
pub struct ImageEncoderViT {
// img_size: usize,
patch_embed: PatchEmbed,
pos_embed: Option<Tensor>,
blocks: Vec<Block>,
neck: Neck,
net_2: Conv2d,
net_3: Conv2d,
}
impl ImageEncoderViT {
pub fn new(
vb: VarBuilder,
img_size: usize,
patch_size: usize,
in_chans: usize,
embed_dim: usize,
depth: usize,
num_heads: usize,
mlp_ratio: f32,
out_chans: usize,
qkv_bias: bool,
act: Activation,
use_abs_pos: bool,
use_rel_pos: bool,
// rel_pos_zero_init: bool,
window_size: usize,
global_attn_indexes: Vec<usize>,
) -> Result<Self> {
let patch_embed = PatchEmbed::new(
vb.pp("patch_embed"),
in_chans,
embed_dim,
patch_size,
patch_size,
0,
)?;
let pos_embed = if use_abs_pos {
Some(vb.get_with_hints(
(1, img_size / patch_size, img_size / patch_size, embed_dim),
"pos_embed",
Init::Const(0.),
)?)
} else {
None
};
let mut blocks = Vec::new();
let vb_blocks = vb.pp("blocks");
for i in 0..depth {
let window_size = if global_attn_indexes.contains(&i) {
0
} else {
window_size
};
let block = Block::new(
vb_blocks.pp(i),
embed_dim,
num_heads,
mlp_ratio,
qkv_bias,
1e-6,
act,
use_rel_pos,
// rel_pos_zero_init,
window_size,
Some((img_size / patch_size, img_size / patch_size)),
)?;
blocks.push(block);
}
let neck = Neck::new(vb.pp("neck"), embed_dim, out_chans)?;
let net_2 = get_conv2d(vb.pp("net_2"), 256, 512, 3, 1, 2, 1, 1, false)?;
let net_3 = get_conv2d(vb.pp("net_3"), 512, 1024, 3, 1, 2, 1, 1, false)?;
Ok(Self {
// img_size,
patch_embed,
pos_embed,
blocks,
neck,
net_2,
net_3,
})
}
fn get_abs_pos_sam(&self, abs_pos: &Tensor, tgt_size: usize) -> Result<Tensor> {
let src_size = abs_pos.dim(1)?;
if src_size != tgt_size {
let old_pos_embed = abs_pos.permute((0, 3, 1, 2))?;
let new_pos_embed = interpolate_bicubic(
&old_pos_embed,
(tgt_size, tgt_size),
Some(true),
Some(false),
)?;
let new_pos_embed = new_pos_embed.permute((0, 2, 3, 1))?;
Ok(new_pos_embed)
} else {
Ok(abs_pos.clone())
}
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut x = self.patch_embed.forward(xs)?;
if self.pos_embed.is_some() {
let dim1 = x.dim(1)?;
let pos = self.get_abs_pos_sam(self.pos_embed.as_ref().unwrap(), dim1)?;
x = x.broadcast_add(&pos)?;
}
for blk in &self.blocks {
x = blk.forward(&x)?;
}
let x = x.permute((0, 3, 1, 2))?;
let x = self.neck.forward(&x)?;
let x = self.net_2.forward(&x)?;
let x = self.net_3.forward(&x)?;
Ok(x)
}
}
pub struct CLIPVisionEmbeddings {
class_embedding: Tensor,
patch_embedding: Conv2d,
// position_embedding: Embedding,
// position_ids: Tensor,
pos_embeds: Tensor,
embed_dim: usize,
}
impl CLIPVisionEmbeddings {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
image_size: usize,
patch_size: usize,
num_channels: usize,
) -> Result<Self> {
let class_embedding =
vb.get_with_hints(hidden_size, "class_embedding", Init::Const(0.0))?;
let patch_embedding = get_conv2d(
vb.pp("patch_embedding"),
num_channels,
hidden_size,
patch_size,
0,
patch_size,
1,
1,
false,
)?;
let num_patches = (image_size / patch_size).pow(2);
let num_positions = num_patches + 1;
let position_embedding =
embedding(num_positions, hidden_size, vb.pp("position_embedding"))?;
let position_ids = Tensor::arange(0u32, num_positions as u32, vb.device())?;
let pos_embeds = position_embedding.forward(&position_ids)?;
Ok(Self {
class_embedding,
patch_embedding,
// position_embedding,
// position_ids,
pos_embeds,
embed_dim: hidden_size,
})
}
fn get_abs_pos(&self, tgt_size: usize) -> Result<Tensor> {
// println!("self.pos_embeds: {:?}", self.pos_embeds);
let abs_pos_new = self.pos_embeds.clone();
let (len, dim) = abs_pos_new.dims2()?;
let src_size = ((len - 1) as f32).sqrt() as usize;
let tgt_size = (tgt_size as f32).sqrt() as usize;
let pos_embeds = if src_size != tgt_size {
let cls_token = abs_pos_new.i(0)?.unsqueeze(0)?;
let old_pos_embed = abs_pos_new.i(1..)?;
let old_pos_embed = old_pos_embed
.reshape((1, src_size, src_size, dim))?
.permute((0, 3, 1, 2))?
.contiguous()?;
let new_pos_embed = interpolate_bicubic(
&old_pos_embed,
(tgt_size, tgt_size),
Some(true),
Some(false),
)?;
let new_pos_embed = new_pos_embed
.permute((0, 2, 3, 1))?
.reshape((tgt_size * tgt_size, dim))?;
Tensor::cat(&[cls_token, new_pos_embed], 0)?.unsqueeze(0)?
} else {
self.pos_embeds.clone()
};
Ok(pos_embeds)
}
pub fn forward(&self, pixel_values: &Tensor, patch_embeds: Option<&Tensor>) -> Result<Tensor> {
let bs = pixel_values.dim(0)?;
let patch_embeds = match patch_embeds {
Some(t) => t.clone(),
None => self.patch_embedding.forward(pixel_values)?,
};
let patch_embeds = patch_embeds.flatten(2, D::Minus1)?.transpose(1, 2)?;
let class_embeds = self.class_embedding.expand((bs, 1, self.embed_dim))?;
let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?;
let pos_embeds = self.get_abs_pos(embeddings.dim(1)?)?;
let embeddings = embeddings.broadcast_add(&pos_embeds)?;
Ok(embeddings)
}
}
pub struct NoTPAttention {
num_heads: usize,
head_dim: usize,
qkv_proj: Linear,
out_proj: Linear,
scaling: f64,
}
impl NoTPAttention {
pub fn new(vb: VarBuilder, hidden_size: usize, num_heads: usize) -> Result<Self> {
let qkv_proj = linear(hidden_size, hidden_size * 3, vb.pp("qkv_proj"))?;
let out_proj = linear(hidden_size, hidden_size, vb.pp("out_proj"))?;
let head_dim = hidden_size / num_heads;
let scaling = 1.0 / (head_dim as f64).sqrt();
Ok(Self {
num_heads,
head_dim,
qkv_proj,
out_proj,
scaling,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (bs, seq_len, _) = xs.dims3()?;
let qkv = self.qkv_proj.forward(xs)?;
let qkv = qkv
.reshape((bs, seq_len, 3, self.num_heads, self.head_dim))?
.permute((2, 0, 3, 1, 4))?;
let q = qkv.i(0)?.contiguous()?;
let k = qkv.i(1)?.contiguous()?;
let v = qkv.i(2)?.contiguous()?;
let output = eager_attention_forward(&q, &k, &v, None, None, self.scaling)?;
let output = output.reshape((bs, seq_len, ()))?;
let output = self.out_proj.forward(&output)?;
Ok(output)
}
}
pub struct NoTPFeedForward {
fc1: Linear,
fc2: Linear,
}
impl NoTPFeedForward {
pub fn new(vb: VarBuilder, dim: usize, hidden_dim: usize) -> Result<Self> {
let fc1 = linear(dim, hidden_dim, vb.pp("fc1"))?;
let fc2 = linear(hidden_dim, dim, vb.pp("fc2"))?;
Ok(Self { fc1, fc2 })
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let output = self.fc1.forward(xs)?;
let output = quick_gelu(&output)?;
let output = self.fc2.forward(&output)?;
Ok(output)
}
}
pub struct NoTPTransformerBlock {
self_attn: NoTPAttention,
mlp: NoTPFeedForward,
layer_norm1: LayerNorm,
layer_norm2: LayerNorm,
}
impl NoTPTransformerBlock {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
num_heads: usize,
ffn_hidden_size: usize,
eps: f64,
) -> Result<Self> {
let self_attn = NoTPAttention::new(vb.pp("self_attn"), hidden_size, num_heads)?;
let mlp = NoTPFeedForward::new(vb.pp("mlp"), hidden_size, ffn_hidden_size)?;
let layer_norm1 = get_layer_norm(vb.pp("layer_norm1"), eps, hidden_size)?;
let layer_norm2 = get_layer_norm(vb.pp("layer_norm2"), eps, hidden_size)?;
Ok(Self {
self_attn,
mlp,
layer_norm1,
layer_norm2,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let x = self.layer_norm1.forward(xs)?;
let x = self.self_attn.forward(&x)?;
let res = x.add(xs)?;
let x = self.layer_norm2.forward(&res)?;
let x = self.mlp.forward(&x)?;
let out = x.add(&res)?;
Ok(out)
}
}
pub struct NoTPTransformer {
layers: Vec<NoTPTransformerBlock>,
}
impl NoTPTransformer {
pub fn new(
vb: VarBuilder,
num_layers: usize,
hidden_size: usize,
num_heads: usize,
ffn_hidden_size: usize,
eps: f64,
) -> Result<Self> {
let mut layers = Vec::new();
let vb_layers = vb.pp("layers");
for i in 0..num_layers {
let blocks = NoTPTransformerBlock::new(
vb_layers.pp(i),
hidden_size,
num_heads,
ffn_hidden_size,
eps,
)?;
layers.push(blocks);
}
Ok(Self { layers })
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut x = xs.clone();
for layer in &self.layers {
x = layer.forward(&x)?;
}
Ok(x)
}
}
pub struct VitModel {
embeddings: CLIPVisionEmbeddings,
transformer: NoTPTransformer,
pre_layrnorm: LayerNorm,
}
impl VitModel {
pub fn new(
vb: VarBuilder,
image_size: usize,
patch_size: usize,
num_channels: usize,
num_layers: usize,
hidden_size: usize,
num_heads: usize,
ffn_hidden_size: usize,
eps: f64,
) -> Result<Self> {
let embeddings = CLIPVisionEmbeddings::new(
vb.pp("embeddings"),
hidden_size,
image_size,
patch_size,
num_channels,
)?;
let transformer = NoTPTransformer::new(
vb.pp("transformer"),
num_layers,
hidden_size,
num_heads,
ffn_hidden_size,
eps,
)?;
let pre_layrnorm = get_layer_norm(vb.pp("pre_layrnorm"), eps, hidden_size)?;
Ok(Self {
embeddings,
transformer,
pre_layrnorm,
})
}
pub fn forward(&self, xs: &Tensor, patch_embeds: Option<&Tensor>) -> Result<Tensor> {
let x = self.embeddings.forward(xs, patch_embeds)?;
let hidden_states = self.pre_layrnorm.forward(&x)?;
let output = self.transformer.forward(&hidden_states)?;
Ok(output)
}
}
pub struct MoEGate {
top_k: usize,
// n_routed_experts: usize,
routed_scaling_factor: f64,
scoring_func: String,
// alpha: f32,
// seq_aux: bool,
topk_method: String,
// n_group: usize,
// topk_group: usize,
norm_topk_prob: bool,
// gating_dim: usize,
linear: Linear,
}
impl MoEGate {
pub fn new(vb: VarBuilder, config: &DeepseekV2Config) -> Result<Self> {
let linear = linear_no_bias(config.hidden_size, config.n_routed_experts, vb)?;
Ok(Self {
top_k: config.num_experts_per_tok,
// n_routed_experts: config.n_routed_experts,
routed_scaling_factor: config.routed_scaling_factor,
scoring_func: config.scoring_func.clone(),
// alpha: config.aux_loss_alpha,
// seq_aux: config.seq_aux,
topk_method: config.topk_method.clone(),
// n_group: config.n_group,
// topk_group: config.topk_group,
norm_topk_prob: config.norm_topk_prob,
// gating_dim: config.hidden_size,
linear,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<(Tensor, Tensor)> {
let (_, _, dim) = xs.dims3()?;
let xs = xs.reshape(((), dim))?;
let logits = self
.linear
.forward(&xs)?
.to_dtype(candle_core::DType::F32)?;
let scores = if self.scoring_func == "softmax" {
softmax(&logits, D::Minus1)?
} else if self.scoring_func == "sigmoid" {
sigmoid(&logits)?
} else {
return Err(anyhow::anyhow!(format!(
"insupportable scoring function for MoE gating: {}",
self.scoring_func
)));
};
let (topk_weight, topk_idx) = if self.topk_method == "greedy" {
topk(&scores, self.top_k)?
} else {
return Err(anyhow::anyhow!(format!(
"insupportable topk_method function for MoE gating: {}",
self.topk_method
)));
};
let topk_weight = if self.top_k > 1 && self.norm_topk_prob {
topk_weight
.broadcast_div(&topk_weight.sum_keepdim(D::Minus1)?.affine(1.0, 1e-20)?)?
.affine(self.routed_scaling_factor, 0.0)?
} else {
topk_weight.affine(self.routed_scaling_factor, 0.0)?
};
let topk_weight = topk_weight.to_dtype(xs.dtype())?;
Ok((topk_idx, topk_weight))
}
}
pub struct DeepseekV2MoE {
// num_experts_per_tok: usize,
// ep_size: usize,
// experts_per_rank: usize,
// ep_rank: usize,
experts: Vec<GateUpDownMLP>,
gate: MoEGate,
shared_experts: GateUpDownMLP,
}
impl DeepseekV2MoE {
pub fn new(vb: VarBuilder, config: &DeepseekV2Config) -> Result<Self> {
// let ep_size = 1;
// let experts_per_rank = config.n_routed_experts;
// let ep_rank = 0;
let mut experts = Vec::new();
let vb_experts = vb.pp("experts");
for i in 0..config.n_routed_experts {
let mlp = GateUpDownMLP::new(
vb_experts.pp(i),
config.hidden_size,
config.moe_intermediate_size,
Activation::Silu,
false,
)?;
experts.push(mlp);
}
let gate = MoEGate::new(vb.pp("gate"), config)?;
let shared_experts = GateUpDownMLP::new(
vb.pp("shared_experts"),
config.hidden_size,
config.moe_intermediate_size * config.n_shared_experts,
Activation::Silu,
false,
)?;
Ok(Self {
// num_experts_per_tok: config.num_experts_per_tok,
// ep_size,
// experts_per_rank,
// ep_rank,
experts,
gate,
shared_experts,
})
}
fn moe_infer(&self, xs: &Tensor, topk_idx: &Tensor, topk_weight: &Tensor) -> Result<Tensor> {
let expert_mask = onehot(topk_idx, self.experts.len())?
.permute((2, 1, 0))?
.to_dtype(candle_core::DType::U32)?;
let expert_hit = expert_mask.sum((D::Minus1, D::Minus2))?;
let expert_hit_vec = expert_hit.to_vec1::<u32>()?;
let expert_hit_vec: Vec<usize> = expert_hit_vec
.iter()
.enumerate()
.filter_map(|(i, &val)| if val > 0 { Some(i) } else { None })
.collect();
let mut final_xs = xs.zeros_like()?;
for i in expert_hit_vec {
let expert = &self.experts[i];
let tokens = expert_mask.i(i)?;
let (topk_id, token_id) = nonzero(&tokens)?;
let token_id_tensor = Tensor::new(token_id.as_slice(), xs.device())?;
let select_tokens = xs.index_select(&token_id_tensor, 0)?;
let select_xs = expert.forward(&select_tokens)?;
let select_weight = topk_weight.index_select(&token_id_tensor, 0)?.gather(
&Tensor::new(topk_id.as_slice(), xs.device())?.unsqueeze(D::Minus1)?,
D::Minus1,
)?;
let select_xs = select_xs.broadcast_mul(&select_weight)?;
final_xs = final_xs.index_add(&token_id_tensor, &select_xs, 0)?;
}
Ok(final_xs)
}
}
impl Module for DeepseekV2MoE {
fn forward(&self, xs: &Tensor) -> candle_core::Result<Tensor> {
let identity = xs.clone();
let (bs, seq_len, embedding_dim) = xs.dims3()?;
let (topk_idx, topk_weight) = self
.gate
.forward(xs)
.map_err(|e| candle_core::Error::Msg(format!("{e}")))?;
let xs = xs.reshape((bs * seq_len, embedding_dim))?;
let xs = self
.moe_infer(&xs, &topk_idx, &topk_weight)
.map_err(|e| candle_core::Error::Msg(format!("{e}")))?;
let xs = xs.reshape((bs, seq_len, embedding_dim))?;
let xs_shared_experts = self.shared_experts.forward(&identity)?;
let xs = xs.add(&xs_shared_experts)?;
Ok(xs)
}
}
pub enum DeepseekV2Proj {
MOE(DeepseekV2MoE),
MLP(GateUpDownMLP),
}
impl DeepseekV2Proj {
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
DeepseekV2Proj::MLP(model) => {
let xs = model.forward(xs)?;
Ok(xs)
}
DeepseekV2Proj::MOE(model) => {
let xs = model.forward(xs)?;
Ok(xs)
}
}
}
}
pub struct DeepseekV2DecoderLayer {
self_attn: NaiveAttention,
mlp: DeepseekV2Proj,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DeepseekV2DecoderLayer {
pub fn new(vb: VarBuilder, config: &DeepseekV2Config, layer_id: usize) -> Result<Self> {
let self_attn = NaiveAttention::new(
vb.pp("self_attn"),
config.hidden_size,
config.num_attention_heads,
config.num_key_value_heads,
None,
false,
None,
)?;
let mlp = if layer_id >= config.first_k_dense_replace
&& layer_id.is_multiple_of(config.moe_layer_freq)
{
DeepseekV2Proj::MOE(DeepseekV2MoE::new(vb.pp("mlp"), config)?)
} else {
DeepseekV2Proj::MLP(GateUpDownMLP::new(
vb.pp("mlp"),
config.hidden_size,
config.intermediate_size,
Activation::Silu,
false,
)?)
};
let input_layernorm = rms_norm(
config.hidden_size,
config.rms_norm_eps,
vb.pp("input_layernorm"),
)?;
let post_attention_layernorm = rms_norm(
config.hidden_size,
config.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward_with_cache(&xs, cos, sin, attention_mask, false)?;
let residual = residual.add(&xs)?;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | true |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/deepseek_ocr/mod.rs | src/models/deepseek_ocr/mod.rs | pub mod config;
pub mod generate;
pub mod model;
pub mod processor;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/deepseek_ocr/generate.rs | src/models/deepseek_ocr/generate.rs | use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use rocket::async_stream::stream;
use rocket::futures::Stream;
use crate::{
models::{
GenerateModel,
deepseek_ocr::{
config::DeepseekOCRConfig, model::DeepseekOCRModel, processor::DeepseekOCRProcessor,
},
},
tokenizer::TokenizerModel,
utils::{
build_completion_chunk_response, build_completion_response, find_type_files, get_device,
get_dtype, get_logit_processor,
},
};
pub struct DeepseekOCRGenerateModel {
tokenizer: TokenizerModel,
processor: DeepseekOCRProcessor,
deepseekocr_model: DeepseekOCRModel,
bos_token_id: u32,
eos_token_id: u32,
device: Device,
size: Vec<u32>,
model_name: String,
}
impl DeepseekOCRGenerateModel {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let tokenizer = TokenizerModel::init(path)?;
let config_path = path.to_string() + "/config.json";
let cfg: DeepseekOCRConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
let cfg_dtype = cfg.language_config.torch_dtype.clone();
let device = &get_device(device);
let dtype = get_dtype(dtype, &cfg_dtype);
let processor = DeepseekOCRProcessor::new(device, dtype)?;
let eos_token_id = cfg.eos_token_id;
let bos_token_id = cfg.bos_token_id;
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, device)? };
let deepseekocr_model = DeepseekOCRModel::new(vb, cfg)?;
let size = vec![512u32, 640, 1024, 1280];
Ok(Self {
tokenizer,
processor,
deepseekocr_model,
bos_token_id,
eos_token_id,
device: device.clone(),
size,
model_name: "deepseek-ocr".to_string(),
})
}
}
impl GenerateModel for DeepseekOCRGenerateModel {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let base_size = if let Some(map) = &mes.metadata
&& map.contains_key("base_size")
{
let size = map.get("base_size").unwrap();
let size = size.parse::<u32>().unwrap_or(640);
if self.size.contains(&size) { size } else { 640 }
} else {
640
};
let image_size = if let Some(map) = &mes.metadata
&& map.contains_key("image_size")
{
let size = map.get("image_size").unwrap();
let size = size.parse::<u32>().unwrap_or(640);
if self.size.contains(&size) { size } else { 640 }
} else {
640
};
let crop_mode = if let Some(map) = &mes.metadata
&& map.contains_key("crop_mode")
{
let size = map.get("crop_mode").unwrap();
size.parse::<bool>().unwrap_or(false)
} else {
false
};
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let (mut input_ids, images_ori, image_crop, images_seq_mask, images_spatial_crop_t) = self
.processor
.process_info(&mes, &self.tokenizer, base_size, image_size, crop_mode)?;
let mut images_ori = Some(&images_ori);
let mut image_crop = Some(&image_crop);
let mut images_seq_mask = Some(&images_seq_mask);
let mut images_spatial_crop_t = Some(&images_spatial_crop_t);
let mut seqlen_offset = 0;
let mut seq_len = input_ids.dim(1)?;
let mut generate = Vec::new();
let sample_len = mes.max_tokens.unwrap_or(1024);
for _ in 0..sample_len {
let logits = self.deepseekocr_model.forward(
&input_ids,
images_ori,
image_crop,
images_seq_mask,
images_spatial_crop_t,
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
generate.push(next_token);
if next_token == self.bos_token_id || next_token == self.eos_token_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
images_ori = None;
image_crop = None;
images_seq_mask = None;
images_spatial_crop_t = None;
}
let num_token = generate.len() as u32;
let res = self.tokenizer.token_decode(generate)?;
self.deepseekocr_model.clear_kv_cache();
let response = build_completion_response(res, &self.model_name, Some(num_token));
Ok(response)
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let base_size = if let Some(map) = &mes.metadata
&& map.contains_key("base_size")
{
let size = map.get("base_size").unwrap();
let size = size.parse::<u32>().unwrap_or(640);
if self.size.contains(&size) { size } else { 640 }
} else {
640
};
let image_size = if let Some(map) = &mes.metadata
&& map.contains_key("image_size")
{
let size = map.get("image_size").unwrap();
let size = size.parse::<u32>().unwrap_or(640);
if self.size.contains(&size) { size } else { 640 }
} else {
640
};
let crop_mode = if let Some(map) = &mes.metadata
&& map.contains_key("crop_mode")
{
let size = map.get("crop_mode").unwrap();
size.parse::<bool>().unwrap_or(false)
} else {
false
};
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let (mut input_ids, images_ori, image_crop, images_seq_mask, images_spatial_crop_t) = self
.processor
.process_info(&mes, &self.tokenizer, base_size, image_size, crop_mode)?;
let mut seqlen_offset = 0;
let mut seq_len = input_ids.dim(1)?;
let sample_len = mes.max_tokens.unwrap_or(1024);
let stream = stream! {
let mut error_tokens = Vec::new();
let mut images_ori = Some(&images_ori);
let mut image_crop = Some(&image_crop);
let mut images_seq_mask = Some(&images_seq_mask);
let mut images_spatial_crop_t = Some(&images_spatial_crop_t);
for _ in 0..sample_len {
let logits = self.deepseekocr_model.forward(
&input_ids,
images_ori,
image_crop,
images_seq_mask,
images_spatial_crop_t,
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
let mut decode_ids = Vec::new();
if !error_tokens.is_empty() {
decode_ids.extend_from_slice(&error_tokens);
}
decode_ids.push(next_token);
let decoded_token = self.tokenizer.token_decode(decode_ids).map_err(|e| anyhow!(format!("stream decode error{e}")))?;
if decoded_token.contains("�") {
error_tokens.push(next_token);
if error_tokens.len() > 3 {
error_tokens.clear();
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
images_ori = None;
image_crop = None;
images_seq_mask = None;
images_spatial_crop_t = None;
continue;
}
error_tokens.clear();
let chunk = build_completion_chunk_response(decoded_token, &self.model_name, None, None);
yield Ok(chunk);
if next_token == self.bos_token_id || next_token == self.eos_token_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
images_ori = None;
image_crop = None;
images_seq_mask = None;
images_spatial_crop_t = None;
}
self.deepseekocr_model.clear_kv_cache();
};
Ok(Box::new(Box::pin(stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/minicpm4/config.rs | src/models/minicpm4/config.rs | use candle_nn::Activation;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct RopeScalingConfig {
pub rope_type: String,
pub long_factor: Vec<f32>,
pub short_factor: Vec<f32>,
pub original_max_position_embeddings: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct MiniCPM4Config {
pub bos_token_id: u32,
pub eos_token_id: Vec<u32>,
pub hidden_act: Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_scaling: RopeScalingConfig,
pub torch_dtype: String,
pub vocab_size: usize,
pub scale_emb: f64,
pub dim_model_base: usize,
pub scale_depth: f32,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/minicpm4/model.rs | src/models/minicpm4/model.rs | use anyhow::{Ok, Result};
use candle_core::{D, Device, Tensor};
use candle_nn::{Embedding, Linear, Module, RmsNorm, VarBuilder, embedding, rms_norm};
use crate::{
models::{
common::{GateUpDownMLP, NaiveAttention},
minicpm4::config::MiniCPM4Config,
},
position_embed::rope::compute_default_rope_parameters,
utils::tensor_utils::prepare_causal_attention_mask,
};
pub struct MiniCPMLongRoPE {
short_factor: Vec<f32>,
long_factor: Vec<f32>,
original_max_position_embeddings: usize,
max_seq_len_cached: usize,
scaling_factor: f64,
inv_freq: Tensor,
cos_cached: Tensor,
sin_cached: Tensor,
device: Device,
}
impl MiniCPMLongRoPE {
pub fn new(cfg: &MiniCPM4Config, device: &Device) -> Result<Self> {
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
let rope_theta = 10000.0;
let short_factor = cfg.rope_scaling.short_factor.clone();
let long_factor = cfg.rope_scaling.short_factor.clone();
let original_max_position_embeddings = cfg.rope_scaling.original_max_position_embeddings;
let max_position_embeddings = cfg.max_position_embeddings;
let scale = max_position_embeddings as f64 / original_max_position_embeddings as f64;
let scaling_factor =
(1.0 + scale.ln() / (original_max_position_embeddings as f64).ln()).sqrt();
let inv_freq = compute_default_rope_parameters(head_dim, rope_theta);
let inv_freq = Tensor::from_slice(&inv_freq, (1, inv_freq.len()), device)?;
let max_seq_len_cached = max_position_embeddings;
let t = Tensor::arange(0.0_f32, max_position_embeddings as f32, device)?
.reshape((max_position_embeddings, 1))?;
// short_factor.len() = 32
// head_dim = 1024 / 16 = 64, inv_freq.len() = 32
let ext_factors = Tensor::from_slice(&short_factor, (1, short_factor.len()), device)?;
let ext_factors = Tensor::ones_like(&ext_factors)?.div(&ext_factors)?;
// (seq_len, 1) matmul (1, 32) -> (seq_len, 32) * (1, 32)-> (seq_len, 32)
let freqs = t.matmul(&ext_factors)?.broadcast_mul(&inv_freq)?;
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
let cos_cached = emb.cos()?.affine(scaling_factor, 0.0)?;
let sin_cached = emb.sin()?.affine(scaling_factor, 0.0)?;
Ok(Self {
short_factor,
long_factor,
original_max_position_embeddings,
max_seq_len_cached,
scaling_factor,
inv_freq,
cos_cached,
sin_cached,
device: device.clone(),
})
}
pub fn update_cos_sin_cache(&mut self, seqlen: usize) -> Result<()> {
self.max_seq_len_cached = seqlen;
let t = Tensor::arange(0.0_f32, seqlen as f32, &self.device)?.reshape((seqlen, 1))?;
let mut ext_factors = Tensor::from_slice(
&self.short_factor,
(1, self.short_factor.len()),
&self.device,
)?;
if seqlen > self.original_max_position_embeddings {
ext_factors =
Tensor::from_slice(&self.long_factor, (1, self.long_factor.len()), &self.device)?;
}
let ext_factors = Tensor::ones_like(&ext_factors)?.div(&ext_factors)?;
let freqs = t.matmul(&ext_factors)?.broadcast_mul(&self.inv_freq)?;
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
let cos_cached = emb.cos()?.affine(self.scaling_factor, 0.0)?;
let sin_cached = emb.sin()?.affine(self.scaling_factor, 0.0)?;
self.cos_cached = cos_cached;
self.sin_cached = sin_cached;
Ok(())
}
pub fn forward(&mut self, pos_offset: usize, seqlen: usize) -> Result<(Tensor, Tensor)> {
if pos_offset + seqlen > self.max_seq_len_cached {
self.update_cos_sin_cache(pos_offset + seqlen)?;
}
let cos = self.cos_cached.narrow(0, pos_offset, seqlen)?;
let sin = self.sin_cached.narrow(0, pos_offset, seqlen)?;
Ok((cos, sin))
}
}
pub struct MiniCPMDecoderLayer {
self_attn: NaiveAttention,
mlp: GateUpDownMLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
scale_depth: f32,
num_hidden_layers: usize,
}
impl MiniCPMDecoderLayer {
pub fn new(vb: VarBuilder, cfg: &MiniCPM4Config) -> Result<Self> {
let self_attn = NaiveAttention::new(
vb.pp("self_attn"),
cfg.hidden_size,
cfg.num_attention_heads,
cfg.num_key_value_heads,
None,
false,
None,
)?;
let mlp = GateUpDownMLP::new(
vb.pp("mlp"),
cfg.hidden_size,
cfg.intermediate_size,
cfg.hidden_act,
false,
)?;
let input_layernorm =
rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
scale_depth: cfg.scale_depth,
num_hidden_layers: cfg.num_hidden_layers,
})
}
pub fn forward(
&self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward(&xs, Some(cos), Some(sin), attention_mask, true)?;
let xs = (residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
))?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
let xs = (residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
)?)?;
Ok(xs)
}
pub fn forward_with_cache(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward_with_cache(&xs, cos, sin, attention_mask, true)?;
let xs = (residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
))?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
let xs = (residual
+ xs.affine(
self.scale_depth as f64 / (self.num_hidden_layers as f64).sqrt(),
0.0,
)?)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
pub struct MiniCPMModel {
cfg: MiniCPM4Config,
embed_tokens: Embedding,
layers: Vec<MiniCPMDecoderLayer>,
norm: RmsNorm,
rope_emb: MiniCPMLongRoPE,
lm_head: Linear,
}
impl MiniCPMModel {
pub fn new(vb: VarBuilder, cfg: MiniCPM4Config) -> Result<Self> {
let vb = vb.pp("model");
let embed_tokens = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_layers = vb.pp("layers");
for i in 0..cfg.num_hidden_layers {
let layer = MiniCPMDecoderLayer::new(vb_layers.pp(i), &cfg)?;
layers.push(layer);
}
let norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("norm"))?;
let rope_emb = MiniCPMLongRoPE::new(&cfg, vb.device())?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
cfg,
embed_tokens,
layers,
norm,
rope_emb,
lm_head,
})
}
pub fn forward(&mut self, input_ids: &Tensor, position_id: usize) -> Result<Tensor> {
let (bs, seq_len) = input_ids.dims2()?;
let input_embeds = self
.embed_tokens
.forward(input_ids)?
.affine(self.cfg.scale_emb, 0.0)?;
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
bs,
seq_len,
0,
input_ids.device(),
)?)
}
};
let (cos, sin) = self.rope_emb.forward(position_id, seq_len)?;
let mut hidden_states = input_embeds;
for decode_layer in &self.layers {
hidden_states =
decode_layer.forward(&hidden_states, &cos, &sin, attention_mask.as_ref())?;
}
hidden_states = self.norm.forward(&hidden_states)?;
let hidden_state = hidden_states.narrow(1, seq_len - 1, 1)?;
let hidden_state = hidden_state.affine(
1.0 / (self.cfg.hidden_size / self.cfg.dim_model_base) as f64,
0.0,
)?;
let logits = self.lm_head.forward(&hidden_state)?;
Ok(logits)
}
pub fn forward_with_cache(&mut self, input_ids: &Tensor, position_id: usize) -> Result<Tensor> {
let (bs, seq_len) = input_ids.dims2()?;
let input_embeds = self
.embed_tokens
.forward(input_ids)?
.affine(self.cfg.scale_emb, 0.0)?;
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
bs,
seq_len,
0,
input_ids.device(),
)?)
}
};
let (cos, sin) = self.rope_emb.forward(position_id, seq_len)?;
let mut hidden_states = input_embeds;
for decode_layer in &mut self.layers {
hidden_states = decode_layer.forward_with_cache(
&hidden_states,
&cos,
&sin,
attention_mask.as_ref(),
)?;
}
hidden_states = self.norm.forward(&hidden_states)?;
let hidden_state = hidden_states.narrow(1, seq_len - 1, 1)?;
let hidden_state = hidden_state.affine(
1.0 / (self.cfg.hidden_size / self.cfg.dim_model_base) as f64,
0.0,
)?;
let logits = self.lm_head.forward(&hidden_state)?;
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/minicpm4/mod.rs | src/models/minicpm4/mod.rs | pub mod config;
pub mod generate;
pub mod model;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/minicpm4/generate.rs | src/models/minicpm4/generate.rs | use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use rocket::async_stream::stream;
use rocket::futures::Stream;
use crate::models::minicpm4::config::MiniCPM4Config;
use crate::models::minicpm4::model::MiniCPMModel;
// use crate::models::GenerateStream;
use crate::utils::{
build_completion_chunk_response, build_completion_response, find_type_files, get_device,
get_dtype, get_logit_processor,
};
use crate::{chat_template::ChatTemplate, models::GenerateModel, tokenizer::TokenizerModel};
pub struct MiniCPMGenerateModel<'a> {
chat_template: ChatTemplate<'a>,
tokenizer: TokenizerModel,
minicpm: MiniCPMModel,
device: Device,
endoftext_id: u32,
im_end_id: u32,
model_name: String,
}
impl<'a> MiniCPMGenerateModel<'a> {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let chat_template = ChatTemplate::init(path)?;
let tokenizer = TokenizerModel::init(path)?;
let config_path = path.to_string() + "/config.json";
let cfg: MiniCPM4Config = serde_json::from_slice(&std::fs::read(config_path)?)?;
let device = &get_device(device);
let cfg_dtype = cfg.torch_dtype.as_str();
let dtype = get_dtype(dtype, cfg_dtype);
let endoftext_id = cfg.eos_token_id[0];
let im_end_id = cfg.eos_token_id[1];
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, device)? };
let minicpm = MiniCPMModel::new(vb, cfg)?;
Ok(MiniCPMGenerateModel {
chat_template,
tokenizer,
minicpm,
device: device.clone(),
endoftext_id,
im_end_id,
model_name: "minicpm4".to_string(),
})
}
}
impl<'a> GenerateModel for MiniCPMGenerateModel<'a> {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let mut input_ids = self.tokenizer.text_encode(mes_render, &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let mut generate = Vec::new();
let sample_len = mes.max_tokens.unwrap_or(2048);
for _ in 0..sample_len {
let logits = self.minicpm.forward_with_cache(&input_ids, seqlen_offset)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
generate.push(next_token);
if next_token == self.endoftext_id || next_token == self.im_end_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
}
let num_token = generate.len() as u32;
let res = self.tokenizer.token_decode(generate)?;
self.minicpm.clear_kv_cache();
let response = build_completion_response(res, &self.model_name, Some(num_token));
Ok(response)
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor = get_logit_processor(mes.temperature, mes.top_p, None, seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let mut input_ids = self.tokenizer.text_encode(mes_render, &self.device)?;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let sample_len = mes.max_tokens.unwrap_or(512);
let stream = stream! {
let mut error_tokens = Vec::new();
for _ in 0..sample_len {
let logits = self.minicpm.forward_with_cache(
&input_ids,
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
let mut decode_ids = Vec::new();
if !error_tokens.is_empty(){
decode_ids.extend_from_slice(&error_tokens);
}
decode_ids.push(next_token);
let decoded_token = self.tokenizer.token_decode(decode_ids).map_err(|e| anyhow!(format!("stream decode error{e}")))?;
if decoded_token.contains("�") {
error_tokens.push(next_token);
if error_tokens.len() > 3 {
error_tokens.clear();
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
continue;
}
error_tokens.clear();
let chunk = build_completion_chunk_response(decoded_token, &self.model_name, None, None);
yield Ok(chunk);
if next_token == self.endoftext_id || next_token == self.im_end_id {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
}
self.minicpm.clear_kv_cache();
};
Ok(Box::new(Box::pin(stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/hunyuan_ocr/config.rs | src/models/hunyuan_ocr/config.rs | use candle_nn::Activation;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct HunYuanVLConfig {
pub attention_bias: bool,
pub attention_dropout: f64,
pub attention_head_dim: usize,
pub bos_token_id: u32,
pub eod_token_id: u32,
pub eos_token_id: u32,
pub head_dim: usize,
pub hidden_act: Activation,
pub hidden_size: usize,
pub image_start_token_id: u32,
pub image_end_token_id: u32,
pub image_token_id: u32,
pub image_newline_token_id: u32,
pub initializer_range: f64,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub mlp_bias: bool,
pub norm_type: String,
pub num_attention_heads: usize,
pub num_experts: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub org_vocab_size: usize,
pub pad_id: i32,
pub pad_token_id: i32,
pub pretraining_tp: i32,
pub rms_norm_eps: f64,
pub rope_scaling: HunYuanVLRopeScaling,
pub rope_theta: f64,
pub routed_scaling_factor: f64,
pub sep_token_id: u32,
pub text_end_id: u32,
pub text_start_id: u32,
pub tie_word_embeddings: bool,
pub dtype: String,
pub use_cache: bool,
pub use_qk_norm: bool,
pub use_cla: bool,
pub vision_config: HunYuanVLVisionConfig,
pub vocab_size: usize,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct HunYuanVLRopeScaling {
pub alpha: f64,
pub beta_fast: i32,
pub beta_slow: i32,
pub factor: f64,
pub mscale: f64,
pub mscale_all_dim: f64,
#[serde(rename = "type")]
pub type_field: String,
pub xdrope_section: Vec<usize>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct HunYuanVLVisionConfig {
pub add_patchemb_bias: bool,
pub attention_dropout: f64,
pub cat_extra_token: i32,
pub hidden_act: Activation,
pub hidden_dropout: f64,
pub hidden_size: usize,
pub img_max_token_num: usize,
pub intermediate_size: usize,
pub interpolate_mode: String,
pub max_image_size: usize,
pub max_vit_seq_len: usize,
pub num_attention_heads: usize,
pub num_channels: usize,
pub num_hidden_layers: usize,
pub out_hidden_size: usize,
pub patch_size: usize,
pub rms_norm_eps: f64,
pub spatial_merge_size: usize,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct HunyuanOCRGenerationConfig {
pub bos_token_id: usize,
pub pad_token_id: usize,
pub do_sample: bool,
pub eos_token_id: Vec<usize>,
pub top_p: f32,
pub top_k: usize,
pub temperature: f32,
pub repetition_penalty: f32,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct HunyuanOCRPreprocessorConfig {
pub min_pixels: usize,
pub max_pixels: usize,
pub patch_size: usize,
pub resample: usize,
pub temporal_patch_size: usize,
pub merge_size: usize,
pub image_mean: Vec<f32>,
pub image_std: Vec<f32>,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/hunyuan_ocr/processor.rs | src/models/hunyuan_ocr/processor.rs | use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use candle_core::{DType, Device, IndexOp, Shape, Tensor};
use image::DynamicImage;
use crate::{
models::hunyuan_ocr::config::HunyuanOCRPreprocessorConfig,
tokenizer::TokenizerModel,
utils::{
img_utils::{extract_images, img_smart_resize, img_transform},
tensor_utils::{get_eq_indices, get_equal_mask},
},
};
pub struct HunyuanVLProcessor {
image_token_id: u32,
image_token: String,
placeholder_token: String,
process_cfg: HunyuanOCRPreprocessorConfig,
device: Device,
dtype: DType,
}
impl HunyuanVLProcessor {
pub fn new(path: &str, device: &Device, dtype: DType) -> Result<Self> {
let path = path.to_string();
assert!(
std::path::Path::new(&path).exists(),
"model path file not exists"
);
let process_cfg_file = path.clone() + "/preprocessor_config.json";
assert!(
std::path::Path::new(&process_cfg_file).exists(),
"preprocessor_config.json not exists in model path"
);
let process_cfg: HunyuanOCRPreprocessorConfig =
serde_json::from_slice(&std::fs::read(process_cfg_file)?)?;
let image_token_id = 120120u32;
let image_token = "<|hy_place▁holder▁no▁102|>".to_string();
let placeholder_token = "<|hy_place▁holder▁no▁799|>".to_string();
// let pad_id = 120002u32;
Ok(Self {
image_token_id,
image_token,
placeholder_token,
process_cfg,
device: device.clone(),
dtype,
})
}
pub fn process_img(
&self,
img: &DynamicImage,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<Tensor> {
let img_h = img.height();
let img_w = img.width();
// h,w resize成 32的倍数
let (resize_h, resize_w) = img_smart_resize(
img_h,
img_w,
(self.process_cfg.patch_size * self.process_cfg.merge_size) as u32,
self.process_cfg.min_pixels as u32,
self.process_cfg.max_pixels as u32,
)?;
let img = img.resize_exact(resize_w, resize_h, image::imageops::FilterType::CatmullRom);
let img_tensor = img_transform(&img, img_mean, img_std, &self.device, self.dtype)?;
// (c, h, w) => (1, c, h, w)
let img_tensor = img_tensor.unsqueeze(0)?;
Ok(img_tensor)
}
pub fn process_vision_tensor(&self, img_tensor: &Tensor) -> Result<(Tensor, Tensor)> {
let channel = img_tensor.dim(1)?;
// img_temsor.dim[0] = 1, temporal_patch_size = 1, grid_t = 1
let grid_t = img_tensor.dim(0)? / self.process_cfg.temporal_patch_size;
let grid_h = img_tensor.dim(2)? / self.process_cfg.patch_size;
let grid_w = img_tensor.dim(3)? / self.process_cfg.patch_size;
let shape = Shape::from(vec![
grid_t,
channel,
grid_h / self.process_cfg.merge_size,
self.process_cfg.merge_size,
self.process_cfg.patch_size,
grid_w / self.process_cfg.merge_size,
self.process_cfg.merge_size,
self.process_cfg.patch_size,
]);
let img_tensor = img_tensor.reshape(shape)?;
// shape to // grid_t,
// grid_h / merge_size,
// merge_size,
// grid_w / merge_size,
// merge_size,
// channel,
// patch_size,
// patch_size,
let img_tensor = img_tensor.permute(vec![0, 2, 3, 5, 6, 1, 4, 7])?;
let img_tensor = img_tensor
.reshape((
grid_t * grid_h * grid_w,
channel * self.process_cfg.patch_size * self.process_cfg.patch_size,
))?
.contiguous()?;
let grid_thw = Tensor::from_vec(
vec![grid_t as u32, grid_h as u32, grid_w as u32],
(1, 3),
&self.device,
)?;
Ok((img_tensor, grid_thw))
}
pub fn process_images(
&self,
imgs: &Vec<DynamicImage>,
img_mean: &Tensor,
img_std: &Tensor,
) -> Result<(Tensor, Tensor)> {
let mut pixel_values_vec = Vec::new();
let mut vision_grid_thws_vec = Vec::new();
for img in imgs {
let img_tensor = self.process_img(img, img_mean, img_std)?;
let (img_tensor, grid_thw) = self.process_vision_tensor(&img_tensor)?;
pixel_values_vec.push(img_tensor);
vision_grid_thws_vec.push(grid_thw);
}
let pixel_values = Tensor::cat(&pixel_values_vec, 0)?;
let vision_grid_thws = Tensor::cat(&vision_grid_thws_vec, 0)?;
Ok((pixel_values, vision_grid_thws))
}
pub fn process_info(
&self,
messages: &ChatCompletionParameters,
tokenizer: &TokenizerModel,
text: &str,
) -> Result<HunyuanData> {
let imgs = extract_images(messages)?;
let img_mean = Tensor::from_slice(&self.process_cfg.image_mean, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
let img_std = Tensor::from_slice(&self.process_cfg.image_std, (3, 1, 1), &self.device)?
.to_dtype(self.dtype)?;
let (pixel_values, image_grid_thw) = if !imgs.is_empty() {
let (pixel_values, image_grid_thw) = self.process_images(&imgs, &img_mean, &img_std)?;
(Some(pixel_values), Some(image_grid_thw))
} else {
(None, None)
};
let mut image_tokens_cumsum = vec![0];
let mut text = text.to_string();
if !imgs.is_empty()
&& let Some(grid_thw) = image_grid_thw.as_ref()
{
let mut index = 0;
while text.contains(&self.image_token) {
let grid_i = grid_thw.i(index)?;
let grid_h = grid_i.i(1)?.to_scalar::<u32>()?;
let grid_w = grid_i.i(2)?.to_scalar::<u32>()?;
let patch_h = grid_h / self.process_cfg.merge_size as u32;
let patch_w = grid_w / self.process_cfg.merge_size as u32;
let num_image_tokens = patch_h * (patch_w + 1) + 2;
let num_id = image_tokens_cumsum[image_tokens_cumsum.len() - 1] + num_image_tokens;
image_tokens_cumsum.push(num_id);
let replace = self.placeholder_token.repeat(num_image_tokens as usize);
text = text.replacen(&self.image_token, &replace, 1);
index += 1;
}
}
text = text.replace(&self.placeholder_token, &self.image_token);
let input_ids = tokenizer.text_encode(text, &self.device)?;
let seq_len = input_ids.dim(1)?;
let position_ids = Tensor::arange(0, seq_len as u32, &self.device)?;
let mut position_ids_w = Tensor::arange(0, seq_len as u32, &self.device)?;
let mut position_ids_h = Tensor::arange(0, seq_len as u32, &self.device)?;
let mut position_ids_t = Tensor::arange(0, seq_len as u32, &self.device)?;
if !imgs.is_empty()
&& let Some(grid_thw) = image_grid_thw.as_ref()
{
let image_token_pos_indices = get_eq_indices(&input_ids.i(0)?, self.image_token_id)?;
for i in 0..grid_thw.dim(0)? {
let grid_i = grid_thw.i(i)?;
let grid_h = grid_i.i(1)?.to_scalar::<u32>()?;
let grid_w = grid_i.i(2)?.to_scalar::<u32>()?;
let patch_h = grid_h / self.process_cfg.merge_size as u32;
let patch_w = grid_w / self.process_cfg.merge_size as u32;
let start_pos = image_token_pos_indices
.i(image_tokens_cumsum[i] as usize)?
.to_scalar::<u32>()? as usize
+ 1;
let replace_num = ((patch_w + 1) * patch_h) as usize;
let pos_w: Vec<u32> = (0..patch_h).flat_map(|_| 0u32..patch_w + 1).collect();
position_ids_w = position_ids_w.slice_assign(
&[start_pos..start_pos + replace_num],
&Tensor::new(pos_w, &self.device)?,
)?;
let pos_h: Vec<u32> = (0..patch_h)
.flat_map(|h| vec![h; (patch_w + 1) as usize])
.collect();
position_ids_h = position_ids_h.slice_assign(
&[start_pos..start_pos + replace_num],
&Tensor::new(pos_h, &self.device)?,
)?;
position_ids_t = position_ids_t.slice_assign(
&[start_pos..start_pos + replace_num],
&Tensor::new(vec![0u32; replace_num], &self.device)?,
)?;
}
}
let position_ids = Tensor::stack(
&[position_ids, position_ids_h, position_ids_w, position_ids_t],
0,
)?
.unsqueeze(0)?;
let image_mask = get_equal_mask(&input_ids, self.image_token_id)?;
let data = HunyuanData {
input_ids,
position_ids,
image_mask,
pixel_values,
image_grid_thw,
};
Ok(data)
}
}
pub struct HunyuanData {
pub input_ids: Tensor,
pub position_ids: Tensor,
pub image_mask: Tensor,
pub pixel_values: Option<Tensor>,
pub image_grid_thw: Option<Tensor>,
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/hunyuan_ocr/model.rs | src/models/hunyuan_ocr/model.rs | use anyhow::{Result, anyhow};
use candle_core::{D, IndexOp, Tensor};
use candle_nn::{
Conv2d, Embedding, Init, Linear, Module, RmsNorm, VarBuilder, embedding, linear,
linear_no_bias, rms_norm,
};
use crate::{
models::{
common::{GateUpDownMLP, NaiveAttnTwoLinearMLPBlock, eager_attention_forward, get_conv2d},
hunyuan_ocr::config::{HunYuanVLConfig, HunYuanVLVisionConfig},
},
position_embed::rope::{RoPE, apply_rotary_pos_emb, get_xd_cos_sin},
utils::tensor_utils::{
interpolate_bilinear, masked_scatter_dim0, prepare_causal_attention_mask, split_tensor,
},
};
pub struct HunYuanVisionPatchEmbed {
patch_embedding: Conv2d,
// position_embedding: Embedding,
num_channels: usize,
patch_size: usize,
// num_positions: usize,
// position_edge: usize,
embed_dim: usize,
patch_pos_embed: Tensor,
}
impl HunYuanVisionPatchEmbed {
pub fn new(vb: VarBuilder, config: &HunYuanVLVisionConfig) -> Result<Self> {
let patch_embedding = get_conv2d(
vb.pp("patch_embedding"),
config.num_channels,
config.hidden_size,
config.patch_size,
0,
config.patch_size,
1,
1,
true,
)?;
let num_channels = config.num_channels;
let patch_size = config.patch_size;
let position_edge = config.max_image_size / patch_size;
let num_positions = (position_edge).pow(2) + 1;
let embed_dim = config.hidden_size;
let position_embedding = embedding(num_positions, embed_dim, vb.pp("position_embedding"))?;
let patch_pos_embed = position_embedding
.embeddings()
.i(1..)?
.reshape((1, position_edge, position_edge, embed_dim))?
.permute((0, 3, 1, 2))?;
Ok(Self {
patch_embedding,
// position_embedding,
num_channels,
patch_size,
// num_positions,
// position_edge,
embed_dim,
patch_pos_embed,
})
}
pub fn forward(&self, pixel_values: &Tensor, grid_thw: &Tensor) -> Result<Tensor> {
let (num_patches, _) = pixel_values.dims2()?;
let pixel_values = pixel_values.reshape((
num_patches,
self.num_channels,
self.patch_size,
self.patch_size,
))?;
let patch_embeds = self.patch_embedding.forward(&pixel_values)?;
let patch_embeds = patch_embeds
.squeeze(D::Minus1)?
.squeeze(D::Minus1)?
.unsqueeze(0)?;
let mut patch_pos_embed_list = vec![];
let img_num = grid_thw.dim(0)?;
for i in 0..img_num {
let grid_i = grid_thw.i(i)?;
let grid_h = grid_i.i(1)?.to_scalar::<u32>()? as usize;
let grid_w = grid_i.i(2)?.to_scalar::<u32>()? as usize;
let patch_pos_embed_ =
interpolate_bilinear(&self.patch_pos_embed, (grid_h, grid_w), Some(false))?;
let patch_pos_embed_ = patch_pos_embed_
.reshape((self.embed_dim, ()))?
.transpose(0, 1)?
.unsqueeze(0)?;
patch_pos_embed_list.push(patch_pos_embed_);
}
let patch_pos_embed = Tensor::cat(&patch_pos_embed_list, 1)?;
let embedding = patch_embeds.add(&patch_pos_embed)?;
Ok(embedding)
}
}
pub struct HunYuanVisionPatchMerger {
proj_0: Conv2d,
proj_2: Conv2d,
mlp: Linear,
image_newline: Tensor,
image_begin: Tensor,
image_end: Tensor,
// image_sep: Tensor,
before_rms: RmsNorm,
after_rms: RmsNorm,
}
impl HunYuanVisionPatchMerger {
pub fn new(vb: VarBuilder, config: &HunYuanVLVisionConfig) -> Result<Self> {
let proj_0 = get_conv2d(
vb.pp("proj.0"),
config.hidden_size,
config.hidden_size * 2,
config.spatial_merge_size,
0,
config.spatial_merge_size,
1,
1,
true,
)?;
let proj_2 = get_conv2d(
vb.pp("proj.2"),
config.hidden_size * 2,
config.hidden_size * 4,
1,
0,
1,
1,
1,
true,
)?;
let mlp = linear(config.hidden_size * 4, config.out_hidden_size, vb.pp("mlp"))?;
let image_newline =
vb.get_with_hints(config.hidden_size * 4, "image_newline", Init::Const(0.))?;
let image_begin =
vb.get_with_hints(config.out_hidden_size, "image_begin", Init::Const(0.))?;
let image_end = vb.get_with_hints(config.out_hidden_size, "image_end", Init::Const(0.))?;
// let image_sep = vb.get_with_hints(config.out_hidden_size, "image_sep", Init::Const(0.))?;
let before_rms = rms_norm(config.hidden_size, config.rms_norm_eps, vb.pp("before_rms"))?;
let after_rms = rms_norm(
config.out_hidden_size,
config.rms_norm_eps,
vb.pp("after_rms"),
)?;
Ok(Self {
proj_0,
proj_2,
mlp,
image_newline,
image_begin,
image_end,
// image_sep,
before_rms,
after_rms,
})
}
pub fn forward(&self, xs: &Tensor, size: (usize, usize)) -> Result<Tensor> {
let xs = self.before_rms.forward(xs)?;
let (h, w) = size;
let xs = xs.permute((0, 2, 1))?.reshape((xs.dim(0)?, (), h, w))?;
let xs = self.proj_0.forward(&xs)?.gelu()?;
let xs = self.proj_2.forward(&xs)?;
let (b, c, h, _) = xs.dims4()?;
let image_newline = self
.image_newline
.reshape((1, c, 1, 1))?
.broadcast_as((b, c, h, 1))?
.to_dtype(xs.dtype())?;
let xs = Tensor::cat(&[xs, image_newline], D::Minus1)?;
let xs = xs.reshape((b, c, ()))?.permute((0, 2, 1))?;
let xs = self.mlp.forward(&xs)?;
let begin = self
.image_begin
.reshape((1, 1, ()))?
.broadcast_as((b, 1, xs.dim(D::Minus1)?))?
.to_dtype(xs.dtype())?;
let end = self
.image_end
.reshape((1, 1, ()))?
.broadcast_as((b, 1, xs.dim(D::Minus1)?))?
.to_dtype(xs.dtype())?;
let xs = Tensor::cat(&[begin, xs, end], 1)?;
let xs = self.after_rms.forward(&xs)?;
Ok(xs)
}
}
pub struct HunYuanVisionTransformer {
embeddings: HunYuanVisionPatchEmbed,
layers: Vec<NaiveAttnTwoLinearMLPBlock>,
perceive: HunYuanVisionPatchMerger,
}
impl HunYuanVisionTransformer {
pub fn new(vb: VarBuilder, config: &HunYuanVLVisionConfig) -> Result<Self> {
let embeddings = HunYuanVisionPatchEmbed::new(vb.pp("embeddings"), config)?;
let mut layers = vec![];
let vb_layers = vb.pp("layers");
for i in 0..config.num_hidden_layers {
let layer_i = NaiveAttnTwoLinearMLPBlock::new(
vb_layers.pp(i),
config.hidden_size,
config.num_attention_heads,
None,
None,
true,
"self_attn",
None,
config.intermediate_size,
config.hidden_act,
true,
"mlp",
"dense_h_to_4h",
"dense_4h_to_h",
config.rms_norm_eps,
"input_layernorm",
"post_attention_layernorm",
)?;
layers.push(layer_i);
}
let perceive = HunYuanVisionPatchMerger::new(vb.pp("perceive"), config)?;
Ok(Self {
embeddings,
layers,
perceive,
})
}
pub fn forward(&self, xs: &Tensor, grid_thw: &Tensor) -> Result<Tensor> {
let mut hidden_states = self.embeddings.forward(xs, grid_thw)?;
for layer in &self.layers {
hidden_states = layer.forward(&hidden_states, None, None, None, false)?;
}
let mut cu_seqlens = vec![];
for i in 0..grid_thw.dim(0)? {
let [_, h, w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
cu_seqlens.push((h * w) as usize);
}
let split_items = split_tensor(&hidden_states, &cu_seqlens, 1)?;
let mut processed_item = vec![];
for i in 0..grid_thw.dim(0)? {
let [_, h, w] = grid_thw.i(i)?.to_vec1::<u32>()?[..] else {
return Err(anyhow!(format!("grid_thw Expected exactly 3 elements")));
};
let processed = self
.perceive
.forward(&split_items[i], (h as usize, w as usize))?;
processed_item.push(processed);
}
let xs = Tensor::cat(&processed_item, 1)?;
Ok(xs)
}
}
pub struct HunYuanVLAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
query_layernorm: RmsNorm,
key_layernorm: RmsNorm,
num_attention_heads: usize,
num_key_value_heads: usize,
num_kv_groups: usize,
head_dim: usize,
scaling: f64,
kv_cache: Option<(Tensor, Tensor)>,
}
impl HunYuanVLAttention {
pub fn new(
vb: VarBuilder,
hidden_size: usize,
head_dim: usize,
num_attention_heads: usize,
num_key_value_heads: usize,
attention_bias: bool,
rms_norm_eps: f64,
) -> Result<Self> {
let num_kv_groups = num_attention_heads / num_key_value_heads;
let scaling = 1f64 / f64::sqrt(head_dim as f64);
let (q_proj, k_proj, v_proj, o_proj) = if attention_bias {
let q_proj = linear(hidden_size, num_attention_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_size, num_key_value_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_size, num_key_value_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear(num_attention_heads * head_dim, hidden_size, vb.pp("o_proj"))?;
(q_proj, k_proj, v_proj, o_proj)
} else {
let q_proj =
linear_no_bias(hidden_size, num_attention_heads * head_dim, vb.pp("q_proj"))?;
let k_proj =
linear_no_bias(hidden_size, num_key_value_heads * head_dim, vb.pp("k_proj"))?;
let v_proj =
linear_no_bias(hidden_size, num_key_value_heads * head_dim, vb.pp("v_proj"))?;
let o_proj =
linear_no_bias(num_attention_heads * head_dim, hidden_size, vb.pp("o_proj"))?;
(q_proj, k_proj, v_proj, o_proj)
};
let query_layernorm = rms_norm(head_dim, rms_norm_eps, vb.pp("query_layernorm"))?;
let key_layernorm = rms_norm(head_dim, rms_norm_eps, vb.pp("key_layernorm"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
query_layernorm,
key_layernorm,
num_attention_heads,
num_key_value_heads,
num_kv_groups,
head_dim,
scaling,
kv_cache: None,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self
.q_proj
.forward(xs)?
.reshape((b_sz, q_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = self
.k_proj
.forward(xs)?
.reshape((b_sz, q_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = self.v_proj.forward(xs)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
apply_rotary_pos_emb(&query_states, &key_states, cos, sin, false)?;
let query_states = self.query_layernorm.forward(&query_states)?;
let key_states = self.key_layernorm.forward(&key_states)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let attn_output = eager_attention_forward(
&query_states,
&key_states,
&value_states,
Some(self.num_kv_groups),
attention_mask,
self.scaling,
)?;
let attn_output =
attn_output.reshape((b_sz, q_len, self.num_attention_heads * self.head_dim))?;
let attn_output = attn_output.apply(&self.o_proj)?;
Ok(attn_output)
}
pub fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
pub struct HunYuanVLDecoderLayer {
self_attn: HunYuanVLAttention,
mlp: GateUpDownMLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl HunYuanVLDecoderLayer {
pub fn new(config: &HunYuanVLConfig, vb: VarBuilder) -> Result<Self> {
let self_attn = HunYuanVLAttention::new(
vb.pp("self_attn"),
config.hidden_size,
config.head_dim,
config.num_attention_heads,
config.num_key_value_heads,
config.attention_bias,
config.rms_norm_eps,
)?;
let mlp = GateUpDownMLP::new(
vb.pp("mlp"),
config.hidden_size,
config.intermediate_size,
config.hidden_act,
false,
)?;
let input_layernorm = rms_norm(
config.hidden_size,
config.rms_norm_eps,
vb.pp("input_layernorm"),
)?;
let post_attention_layernorm = rms_norm(
config.hidden_size,
config.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
cos: &Tensor,
sin: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, cos, sin, attention_mask)?;
let xs = residual.add(&xs)?;
let residual = xs.clone();
let xs = self.post_attention_layernorm.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
let xs = residual.add(&xs)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
pub struct HunYuanVLTextModel {
embed_tokens: Embedding,
layers: Vec<HunYuanVLDecoderLayer>,
norm: RmsNorm,
rope: RoPE,
xdrope_section: Vec<usize>,
}
impl HunYuanVLTextModel {
pub fn new(vb: VarBuilder, config: &HunYuanVLConfig) -> Result<Self> {
let embed_tokens = embedding(config.vocab_size, config.hidden_size, vb.pp("embed_tokens"))?;
let mut layers = vec![];
let vb_layers = vb.pp("layers");
for i in 0..config.num_hidden_layers {
let layer = HunYuanVLDecoderLayer::new(config, vb_layers.pp(i))?;
layers.push(layer);
}
let norm = rms_norm(config.hidden_size, config.rms_norm_eps, vb.pp("norm"))?;
let base = config.rope_theta
* config
.rope_scaling
.alpha
.powf(config.head_dim as f64 / (config.head_dim - 2) as f64);
let rope = RoPE::new(config.head_dim, base as f32, vb.device())?;
let xdrope_section = config.rope_scaling.xdrope_section.clone();
Ok(Self {
embed_tokens,
layers,
norm,
rope,
xdrope_section,
})
}
pub fn forward(
&mut self,
inputs_embeds: &Tensor,
position_ids: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_size, seq_len, _) = inputs_embeds.dims3()?;
let attention_mask: Option<Tensor> = {
if seq_len <= 1 {
None
} else {
Some(prepare_causal_attention_mask(
b_size,
seq_len,
0,
inputs_embeds.device(),
)?)
}
};
let (cos, sin) = self
.rope
.forward(seqlen_offset, seq_len, inputs_embeds.device())?;
let mut xs = inputs_embeds.clone();
for (i, layer) in self.layers.iter_mut().enumerate() {
if i == 0
&& let Some(position_ids) = position_ids
{
let (cos, sin) =
get_xd_cos_sin(&cos, &sin, position_ids, self.xdrope_section.clone())?;
xs = layer.forward(&xs, &cos, &sin, attention_mask.as_ref())?;
} else {
xs = layer.forward(&xs, &cos, &sin, attention_mask.as_ref())?;
}
}
let xs = self.norm.forward(&xs)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
pub struct HunyuanVLModel {
// config: HunYuanVLConfig,
vit: HunYuanVisionTransformer,
model: HunYuanVLTextModel,
lm_head: Linear,
}
impl HunyuanVLModel {
pub fn new(vb: VarBuilder, config: HunYuanVLConfig) -> Result<Self> {
let vit = HunYuanVisionTransformer::new(vb.pp("vit"), &config.vision_config)?;
let model = HunYuanVLTextModel::new(vb.pp("model"), &config)?;
let lm_head = Linear::new(model.embed_tokens.embeddings().clone(), None);
Ok(Self {
// config,
vit,
model,
lm_head,
})
}
pub fn forward(
&mut self,
input_ids: &Tensor,
pixel_values: Option<&Tensor>,
image_grid_thw: Option<&Tensor>,
image_mask: Option<&Tensor>,
position_ids: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let mut inputs_embeds = self.model.embed_tokens.forward(input_ids)?;
if let Some(pixel_values) = pixel_values
&& let Some(grid_thw) = image_grid_thw
&& let Some(image_mask) = image_mask
{
let image_embeds = self.vit.forward(pixel_values, grid_thw)?.squeeze(0)?;
inputs_embeds = masked_scatter_dim0(&inputs_embeds, &image_embeds, image_mask)?;
}
let outputs = self
.model
.forward(&inputs_embeds, position_ids, seqlen_offset)?;
let seq_len = outputs.dim(1)?;
let hidden_state = outputs.narrow(1, seq_len - 1, 1)?;
let logits = self.lm_head.forward(&hidden_state)?;
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
self.model.clear_kv_cache();
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/hunyuan_ocr/mod.rs | src/models/hunyuan_ocr/mod.rs | pub mod config;
pub mod generate;
pub mod model;
pub mod processor;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/models/hunyuan_ocr/generate.rs | src/models/hunyuan_ocr/generate.rs | use aha_openai_dive::v1::resources::chat::{
ChatCompletionChunkResponse, ChatCompletionParameters, ChatCompletionResponse,
};
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use rocket::async_stream::stream;
use rocket::futures::Stream;
use crate::{
chat_template::ChatTemplate,
models::{
GenerateModel,
hunyuan_ocr::{
config::{HunYuanVLConfig, HunyuanOCRGenerationConfig},
model::HunyuanVLModel,
processor::HunyuanVLProcessor,
},
},
tokenizer::TokenizerModel,
utils::{
build_completion_chunk_response, build_completion_response, find_type_files, get_device,
get_dtype, get_logit_processor,
},
};
pub struct HunyuanOCRGenerateModel<'a> {
chat_template: ChatTemplate<'a>,
tokenizer: TokenizerModel,
pre_processor: HunyuanVLProcessor,
hunyuan_vl: HunyuanVLModel,
device: Device,
eos_token_id1: u32,
eos_token_id2: u32,
generation_config: HunyuanOCRGenerationConfig,
model_name: String,
}
impl<'a> HunyuanOCRGenerateModel<'a> {
pub fn init(path: &str, device: Option<&Device>, dtype: Option<DType>) -> Result<Self> {
let chat_template = ChatTemplate::init(path)?;
let tokenizer = TokenizerModel::init(path)?;
let config_path = path.to_string() + "/config.json";
let cfg: HunYuanVLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
let device = get_device(device);
let cfg_dtype = cfg.dtype.as_str();
let dtype = get_dtype(dtype, cfg_dtype);
let pre_processor = HunyuanVLProcessor::new(path, &device, dtype)?;
let model_list = find_type_files(path, "safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_list, dtype, &device)? };
let hunyuan_vl = HunyuanVLModel::new(vb, cfg.clone())?;
let generation_config_path = path.to_string() + "/generation_config.json";
let generation_config: HunyuanOCRGenerationConfig =
serde_json::from_slice(&std::fs::read(generation_config_path)?)?;
Ok(Self {
chat_template,
tokenizer,
pre_processor,
hunyuan_vl,
device,
eos_token_id1: generation_config.eos_token_id[0] as u32,
eos_token_id2: generation_config.eos_token_id[1] as u32,
generation_config,
model_name: "hunyuan_ocr".to_string(),
})
}
}
impl<'a> GenerateModel for HunyuanOCRGenerateModel<'a> {
fn generate(&mut self, mes: ChatCompletionParameters) -> Result<ChatCompletionResponse> {
let temperature = match mes.temperature {
None => self.generation_config.temperature,
Some(tem) => tem,
};
let top_p = match mes.top_p {
None => self.generation_config.top_p,
Some(top_p) => top_p,
};
let top_k = self.generation_config.top_k;
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor =
get_logit_processor(Some(temperature), Some(top_p), Some(top_k), seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let data = self
.pre_processor
.process_info(&mes, &self.tokenizer, &mes_render)?;
let mut input_ids = data.input_ids;
let mut position_ids = Some(&data.position_ids);
let mut image_mask = Some(&data.image_mask);
let mut pixel_values = data.pixel_values;
let mut image_grid_thw = data.image_grid_thw;
let mut seq_len = input_ids.dim(1)?;
let mut seqlen_offset = 0;
let mut generate: Vec<u32> = Vec::new();
let sample_len = mes.max_tokens.unwrap_or(1024);
for _ in 0..sample_len {
let logits = self.hunyuan_vl.forward(
&input_ids,
pixel_values.as_ref(),
image_grid_thw.as_ref(),
image_mask,
position_ids,
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
generate.push(next_token);
if next_token == self.eos_token_id1 || next_token == self.eos_token_id2 {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
position_ids = None;
image_mask = None;
pixel_values = None;
image_grid_thw = None;
}
let num_token = generate.len() as u32;
let res = self.tokenizer.token_decode(generate)?;
self.hunyuan_vl.clear_kv_cache();
let response = build_completion_response(res, &self.model_name, Some(num_token));
Ok(response)
}
fn generate_stream(
&mut self,
mes: ChatCompletionParameters,
) -> Result<
Box<
dyn Stream<Item = Result<ChatCompletionChunkResponse, anyhow::Error>>
+ Send
+ Unpin
+ '_,
>,
> {
let temperature = match mes.temperature {
None => self.generation_config.temperature,
Some(tem) => tem,
};
let top_p = match mes.top_p {
None => self.generation_config.top_p,
Some(top_p) => top_p,
};
let top_k = self.generation_config.top_k;
let seed = match mes.seed {
None => 34562u64,
Some(s) => s as u64,
};
let mut logit_processor =
get_logit_processor(Some(temperature), Some(top_p), Some(top_k), seed);
let mes_render = self.chat_template.apply_chat_template(&mes)?;
let data = self
.pre_processor
.process_info(&mes, &self.tokenizer, &mes_render)?;
let mut seqlen_offset = 0;
let sample_len = mes.max_tokens.unwrap_or(1024);
let stream = stream! {
let mut error_tokens = Vec::new();
let mut input_ids = data.input_ids;
let mut position_ids = Some(&data.position_ids);
let mut image_mask = Some(&data.image_mask);
let mut pixel_values = data.pixel_values;
let mut image_grid_thw = data.image_grid_thw;
let mut seq_len = input_ids.dim(1)?;
for _ in 0..sample_len {
let logits = self.hunyuan_vl.forward(
&input_ids,
pixel_values.as_ref(),
image_grid_thw.as_ref(),
image_mask,
position_ids,
seqlen_offset,
)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let next_token = logit_processor.sample(&logits)?;
let mut decode_ids = Vec::new();
if !error_tokens.is_empty() {
decode_ids.extend_from_slice(&error_tokens);
}
decode_ids.push(next_token);
let decoded_token = self.tokenizer.token_decode(decode_ids).map_err(|e| anyhow!(format!("stream decode error{e}")))?;
if decoded_token.contains("�") {
error_tokens.push(next_token);
if error_tokens.len() > 3 {
error_tokens.clear();
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
position_ids = None;
image_mask = None;
pixel_values = None;
image_grid_thw = None;
continue;
}
error_tokens.clear();
let chunk = build_completion_chunk_response(decoded_token, &self.model_name, None, None);
yield Ok(chunk);
if next_token == self.eos_token_id1 || next_token == self.eos_token_id2 {
break;
}
seqlen_offset += seq_len;
seq_len = 1;
input_ids = Tensor::from_vec(vec![next_token], (1, 1), &self.device)?;
position_ids = None;
image_mask = None;
pixel_values = None;
image_grid_thw = None;
}
self.hunyuan_vl.clear_kv_cache();
};
Ok(Box::new(Box::pin(stream)))
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/utils/audio_utils.rs | src/utils/audio_utils.rs | use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::{f64::consts::PI, io::Cursor};
use aha_openai_dive::v1::resources::chat::{
ChatCompletionParameters, ChatCompletionResponse, ChatMessage, ChatMessageContent,
ChatMessageContentPart,
};
use anyhow::{Result, anyhow};
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use candle_core::{D, Device, Tensor};
use candle_nn::{Conv1d, Conv1dConfig, Module};
use hound::{SampleFormat, WavReader};
use num::integer::gcd;
use crate::utils::get_default_save_dir;
// 重采样方法枚举
#[derive(Debug, Clone, Copy)]
pub enum ResamplingMethod {
SincInterpHann,
SincInterpKaiser,
}
// 零阶修正贝塞尔函数 I0
fn i0(x: f32) -> f32 {
let mut result = 1.0;
let mut term = 1.0;
let half_x_sq = x * x / 4.0;
for k in 1..50 {
term = term * half_x_sq / (k * k) as f32;
result += term;
if term < 1e-12 {
break;
}
}
result
}
// 获取sinc重采样核
pub fn get_sinc_resample_kernel(
orig_freq: i64,
new_freq: i64,
gcd_val: i64,
lowpass_filter_width: i64,
rolloff: f64,
resampling_method: ResamplingMethod,
beta: Option<f32>,
device: &Device,
) -> Result<(Tensor, i64)> {
if orig_freq <= 0 || new_freq <= 0 {
return Err(anyhow!("Frequencies must be positive".to_string()));
}
if lowpass_filter_width <= 0 {
return Err(anyhow!(
"Low pass filter width should be positive".to_string()
));
}
let orig_freq = orig_freq / gcd_val;
let new_freq = new_freq / gcd_val;
let base_freq = (orig_freq.min(new_freq) as f64) * rolloff;
let width_f = (lowpass_filter_width as f64) * (orig_freq as f64) / base_freq;
let width = width_f.ceil() as i64;
// 创建索引数组 [1, 1, 2*width + orig_freq]
let idx = Tensor::arange(-width as f32, (width + orig_freq) as f32, device)?
.affine(1.0 / orig_freq as f64, 0.0)?
.unsqueeze(0)?
.unsqueeze(0)?;
// 创建时间数组 t [new_freq, 1, idx_len]
let t = Tensor::arange_step(0.0, -new_freq as f32, -1.0, device)?
.affine(1.0 / new_freq as f64, 0.0)?
.unsqueeze(D::Minus1)?
.unsqueeze(D::Minus1)?
.broadcast_add(&idx)?
.affine(base_freq, 0.0)?;
let t = t.clamp(-lowpass_filter_width as f32, lowpass_filter_width as f32)?;
// 计算窗口函数
let window = match resampling_method {
ResamplingMethod::SincInterpHann => {
let window_arg = t.affine(PI / (lowpass_filter_width as f64) / 2.0, 0.0)?;
window_arg.cos()?.sqr()?
}
ResamplingMethod::SincInterpKaiser => {
let beta_val = beta.unwrap_or(14.769_656_f32);
let i0_beta = i0(beta_val);
let normalized_t = t.affine(1.0 / lowpass_filter_width as f64, 0.0)?;
let arg = (1.0 - normalized_t.sqr()?)?;
// 处理arg为负数的情况
let sqrt_arg = arg.relu()?.sqrt()?;
let sqrt_dims = sqrt_arg.dims();
let sqrt_arg_vec = sqrt_arg.flatten_all()?.to_vec1::<f32>()?;
let window_val: Vec<f32> = sqrt_arg_vec
.iter()
.map(|x| i0(beta_val * x) / i0_beta)
.collect();
Tensor::new(window_val, device)?.reshape(sqrt_dims)?
}
};
// 计算sinc核
let scale = base_freq / (orig_freq as f64);
let t_scaled = t.affine(PI, 0.0)?;
let t_zeros = Tensor::zeros_like(&t_scaled)?;
let t_ones = Tensor::ones_like(&t_scaled)?;
let mask = t_scaled.eq(&t_zeros)?;
let sinc = mask.where_cond(&t_ones, &t_scaled.sin()?.div(&t_scaled)?)?;
let kernels = sinc.mul(&window)?.affine(scale, 0.0)?;
Ok((kernels, width))
}
// 应用sinc重采样核
pub fn apply_sinc_resample_kernel(
waveform: &Tensor,
orig_freq: i64,
new_freq: i64,
gcd_val: i64,
kernel: &Tensor,
width: i64,
) -> Result<Tensor> {
let orig_freq = orig_freq / gcd_val;
let new_freq = new_freq / gcd_val;
// 获取波形形状
let dims = waveform.dims();
let waveform_flat = waveform.reshape(((), dims[dims.len() - 1]))?;
let (num_wavs, length) = waveform_flat.dims2()?;
let padded_waveform =
waveform.pad_with_zeros(D::Minus1, width as usize, (width + orig_freq) as usize)?;
// 添加通道维度 [batch_size, 1, padded_length]
let waveform_3d = padded_waveform.unsqueeze(1)?;
let config = Conv1dConfig {
padding: 0,
stride: orig_freq as usize,
dilation: 1,
groups: 1,
cudnn_fwd_algo: None,
};
let conv1d = Conv1d::new(kernel.clone(), None, config);
// 执行卷积
// kernel形状: [new_freq_reduced, 1, kernel_len]
// 输出形状: [batch_size, new_freq_reduced, output_length]
let conv_output = conv1d.forward(&waveform_3d)?;
// 转置并重塑 [batch_size, output_length * new_freq_reduced]
let conv_transposed = conv_output.transpose(1, 2)?.reshape((num_wavs, ()))?;
// 计算目标长度
let target_length = ((new_freq as f64 * length as f64) / orig_freq as f64).ceil() as usize;
// 截取目标长度
let resampled_flat =
conv_transposed.narrow(1, 0, target_length.min(conv_transposed.dim(1)?))?;
let mut new_dims = dims.to_vec();
let last_dim = new_dims.len() - 1;
new_dims[last_dim] = resampled_flat.dim(1)?;
// 恢复原始批次形状
let resampled = resampled_flat.reshape(new_dims)?;
Ok(resampled)
}
// 主要的重采样函数
pub fn resample(
waveform: &Tensor,
orig_freq: i64,
new_freq: i64,
lowpass_filter_width: i64,
rolloff: f64,
resampling_method: ResamplingMethod,
beta: Option<f32>,
) -> Result<Tensor> {
if orig_freq <= 0 || new_freq <= 0 {
return Err(anyhow!("Frequencies must be positive".to_string(),));
}
if orig_freq == new_freq {
return Ok(waveform.clone());
}
let gcd_val = gcd(orig_freq, new_freq);
let device = waveform.device();
let (kernel, width) = get_sinc_resample_kernel(
orig_freq,
new_freq,
gcd_val,
lowpass_filter_width,
rolloff,
resampling_method,
beta,
device,
)?;
let t = apply_sinc_resample_kernel(waveform, orig_freq, new_freq, gcd_val, &kernel, width)?;
Ok(t)
}
// 为方便使用提供的简化版本
pub fn resample_simple(waveform: &Tensor, orig_freq: i64, new_freq: i64) -> Result<Tensor> {
resample(
waveform,
orig_freq,
new_freq,
6,
0.99,
ResamplingMethod::SincInterpHann,
None,
)
}
pub fn load_audio_from_url(url: &str) -> Result<PathBuf> {
tokio::task::block_in_place(|| {
let client = reqwest::blocking::Client::new();
let response = client.get(url).send()?;
if !response.status().is_success() {
return Err(anyhow::anyhow!(
"Failed to download file: {}",
response.status()
));
}
let temp_dir = get_default_save_dir().expect("Failed to get home directory");
let temp_dir = PathBuf::from(temp_dir);
let temp_path = temp_dir.join("temp_audio.wav");
let mut file = std::fs::File::create(&temp_path)?;
let mut content = Cursor::new(response.bytes()?);
std::io::copy(&mut content, &mut file)?;
// Return the temp directory to keep it alive until the function ends
Ok(temp_path)
})
}
pub fn get_audio_path(path_str: &str) -> Result<PathBuf> {
if path_str.starts_with("http://") || path_str.starts_with("https://") {
// Download file from network
load_audio_from_url(path_str)
} else if path_str.starts_with("file://") {
// Convert file:// URL to local path
let path = url::Url::parse(path_str)?;
let path = path.to_file_path();
let path = match path {
Ok(path) => path,
Err(_) => {
let mut path = path_str.to_owned();
path = path.split_off(7);
PathBuf::from(path)
}
};
Ok(path)
} else if path_str.starts_with("data:audio") && path_str.contains("base64,") {
let data: Vec<&str> = path_str.split("base64,").collect();
let data = data[1];
let temp_dir = get_default_save_dir().expect("Failed to get home directory");
let temp_dir = PathBuf::from(temp_dir);
let temp_path = temp_dir.join("temp_audio.wav");
save_audio_from_base64(data, &temp_path)?;
Ok(temp_path)
} else {
Err(anyhow::anyhow!("get audio path error {}", path_str))
}
}
pub fn load_audio(path: &str, device: Device) -> Result<(Tensor, usize)> {
let audio_path = get_audio_path(path)?;
let mut reader = WavReader::open(audio_path)?;
let spec = reader.spec();
let samples: Vec<f32> = match spec.sample_format {
SampleFormat::Int => {
// 将整数样本转换为浮点数 [-1.0, 1.0]
// println!("spec.bits_per_sample: {}", spec.bits_per_sample);
match spec.bits_per_sample {
8 => reader
.samples::<i8>()
.map(|s| s.map(|sample| sample as f32 / i8::MAX as f32))
.collect::<Result<Vec<_>, _>>()?,
16 => reader
.samples::<i16>()
.map(|s| s.map(|sample| sample as f32 / i16::MAX as f32))
.collect::<Result<Vec<_>, _>>()?,
24 => reader
.samples::<i32>()
.map(|s| s.map(|sample| sample as f32 / 8388607.0))
.collect::<Result<Vec<_>, _>>()?,
_ => {
return Err(anyhow::anyhow!(
"Unsupported bit depth: {}",
spec.bits_per_sample
));
}
}
}
SampleFormat::Float => {
// 直接读取浮点数样本
reader.samples::<f32>().collect::<Result<Vec<_>, _>>()?
}
};
let sample_rate = spec.sample_rate;
let mut audio_tensor = Tensor::from_slice(
&samples,
(
samples.len() / spec.channels as usize,
spec.channels as usize,
),
&device,
)?
.t()?;
if spec.channels > 1 {
// 对channel通道求平均, channel维度变为1
audio_tensor = audio_tensor.mean_keepdim(0)?;
}
Ok((audio_tensor, sample_rate as usize))
}
pub fn load_audio_with_resample(
path: &str,
device: Device,
target_sample_rate: Option<usize>,
) -> Result<Tensor> {
let (mut audio, sr) = load_audio(path, device)?;
if let Some(target_sample_rate) = target_sample_rate
&& target_sample_rate != sr
{
audio = resample_simple(&audio, sr as i64, target_sample_rate as i64)?;
}
Ok(audio)
}
pub fn save_wav(audio: &Tensor, save_path: &str, sample_rate: u32) -> Result<()> {
let spec = hound::WavSpec {
channels: 1,
sample_rate,
bits_per_sample: 16,
sample_format: hound::SampleFormat::Int,
};
assert_eq!(audio.dim(0)?, 1, "audio channel must be 1");
let max = audio.abs()?.max_all()?;
let max = max.to_scalar::<f32>()?;
let ratio = if max > 1.0 { 32767.0 / max } else { 32767.0 };
let audio = audio.squeeze(0)?;
let audio_vec = audio.to_vec1::<f32>()?;
let mut writer = hound::WavWriter::create(save_path, spec).unwrap();
for i in audio_vec {
let sample_i16 = (i * ratio).round() as i16;
writer.write_sample(sample_i16).unwrap();
}
writer.finalize().unwrap();
Ok(())
}
pub fn get_audio_wav_u8(audio: &Tensor, sample_rate: u32) -> Result<Vec<u8>> {
let spec = hound::WavSpec {
channels: 1,
sample_rate,
bits_per_sample: 16,
sample_format: hound::SampleFormat::Int,
};
assert_eq!(audio.dim(0)?, 1, "audio channel must be 1");
let max = audio.abs()?.max_all()?;
let max = max.to_scalar::<f32>()?;
let ratio = if max > 1.0 { 32767.0 / max } else { 32767.0 };
let audio = audio.squeeze(0)?;
let audio_vec = audio.to_vec1::<f32>()?;
let mut cursor = Cursor::new(Vec::new());
let mut writer = hound::WavWriter::new(&mut cursor, spec)?;
for i in audio_vec {
let sample_i16 = (i * ratio).round() as i16;
writer.write_sample(sample_i16)?;
}
writer.finalize()?;
let wav_buffer = cursor.into_inner();
Ok(wav_buffer)
}
pub fn extract_audio_url(mes: &ChatCompletionParameters) -> Result<Vec<String>> {
let mut audio_vec = Vec::new();
for chat_mes in mes.messages.clone() {
if let ChatMessage::User { content, .. } = chat_mes.clone()
&& let ChatMessageContent::ContentPart(part_vec) = content
{
for part in part_vec {
if let ChatMessageContentPart::Audio(audio_part) = part {
let audio_url = audio_part.audio_url;
audio_vec.push(audio_url.url);
}
}
}
// if let ChatMessage::User { content, .. } = chat_mes.clone()
// && let ChatMessageContent::ContentPart(part_vec) = content
// {
// for part in part_vec {
// if let ChatMessageContentPart::Text(text_part) = part {
// let text = text_part.text;
// if text.chars().count() > 0 {
// ret = ret + &text + "\n"
// }
// }
// }
// }
}
Ok(audio_vec)
}
// 从 ChatCompletionResponse 中提取音频数据
pub fn extract_audio_base64_from_response(
response: &ChatCompletionResponse,
) -> Result<Vec<String>> {
let mut audio_data_list = Vec::new();
for choice in &response.choices {
if let ChatMessage::Assistant {
content: Some(ChatMessageContent::ContentPart(parts)),
..
} = &choice.message
{
for part in parts.clone() {
if let ChatMessageContentPart::Audio(audio_part) = part {
// if let Some(audio_data) = &audio_part.audio_url {
// audio_data_list.push(audio_data.data.clone());
// }
let audio_url = audio_part.audio_url;
audio_data_list.push(audio_url.url);
}
}
}
}
Ok(audio_data_list)
}
// 将 base64 音频数据解码并保存到文件
pub fn save_audio_from_base64<P: AsRef<Path>>(base64_data: &str, file_path: P) -> Result<()> {
// 解码 base64 数据
let data: Vec<&str> = base64_data.split("base64,").collect();
let data = data[1];
let decoded_data = BASE64_STANDARD.decode(data)?;
// 创建文件并写入数据
let mut file = File::create(file_path)?;
file.write_all(&decoded_data)?;
Ok(())
}
// 组合函数:从响应中提取音频并保存到文件
pub fn extract_and_save_audio_from_response(
response: &ChatCompletionResponse,
directory: &str,
) -> Result<Vec<String>> {
let audio_data_list = extract_audio_base64_from_response(response)?;
let mut saved_files = Vec::new();
for (index, audio_data) in audio_data_list.iter().enumerate() {
let file_path = format!("{}/audio_{}.wav", directory, index);
save_audio_from_base64(audio_data, &file_path)?;
saved_files.push(file_path);
}
Ok(saved_files)
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/utils/video_utils.rs | src/utils/video_utils.rs | // use std::{fs::File, io::Write};
// use ffmpeg_next as ffmpeg;
// #[allow(unused)]
// fn save_file(
// frame: &ffmpeg::frame::Video,
// index: usize,
// ) -> std::result::Result<(), std::io::Error> {
// let mut file = File::create(format!("frame{}.ppm", index))?;
// file.write_all(format!("P6\n{} {}\n255\n", frame.width(), frame.height()).as_bytes())?;
// file.write_all(frame.data(0))?;
// Ok(())
// }
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/utils/img_utils.rs | src/utils/img_utils.rs | use std::io::Cursor;
use std::{collections::HashSet, path::PathBuf};
use aha_openai_dive::v1::resources::chat::{
ChatCompletionParameters, ChatMessage, ChatMessageContent, ChatMessageContentPart,
};
use anyhow::{Result, anyhow};
use base64::{Engine, engine::general_purpose};
use candle_core::{DType, Device, Tensor};
use image::{DynamicImage, ImageBuffer, ImageReader, Rgb, RgbImage, imageops};
use rayon::prelude::*;
use crate::utils::{ceil_by_factor, floor_by_factor, round_by_factor};
pub fn load_image_from_url(url: &str) -> Result<DynamicImage> {
tokio::task::block_in_place(|| {
let response = reqwest::blocking::get(url)
.map_err(|e| anyhow!(format!("Failed to fetch image from url: {}", e)))?;
let bytes = response
.bytes()
.map_err(|e| anyhow!(format!("Failed to get image bytes: {}", e)))?;
let cursor = Cursor::new(bytes);
let img = ImageReader::new(cursor)
.with_guessed_format()
.map_err(|e| anyhow!(format!("Failed to read image format: {}", e)))?
.decode()
.map_err(|e| anyhow!(format!("Failed to decode image: {}", e)))?;
Ok(img)
})
}
pub fn load_image_from_base64(base64_data: &str) -> Result<DynamicImage> {
let image_data = general_purpose::STANDARD
.decode(base64_data)
.map_err(|e| anyhow!(format!("Failed to decode image: {}", e)))?;
let cursor = Cursor::new(image_data);
let img = ImageReader::new(cursor)
.with_guessed_format()
.map_err(|e| anyhow!(format!("Failed to read image format: {}", e)))?
.decode()
.map_err(|e| anyhow!(format!("Failed to decode image: {}", e)))?;
Ok(img)
}
pub fn get_image(file: &str) -> Result<DynamicImage> {
let mut img = None;
if file.starts_with("http://") || file.starts_with("https://") {
img = Some(load_image_from_url(file)?);
}
if file.starts_with("file://") {
// let mut path = file.to_owned();
// path = path.split_off(7);
let path = url::Url::parse(file)?;
let path = path.to_file_path();
let path = match path {
Ok(path) => path,
Err(_) => {
let mut path = file.to_owned();
path = path.split_off(7);
PathBuf::from(path)
}
};
img = Some(
ImageReader::open(path)
.map_err(|e| anyhow!(format!("Failed to open file: {}", e)))?
.decode()
.map_err(|e| anyhow!(format!("Failed to decode image: {}", e)))?,
);
}
if file.starts_with("data:image") && file.contains("base64,") {
let data: Vec<&str> = file.split("base64,").collect();
let data = data[1];
img = Some(load_image_from_base64(data)?);
}
if let Some(img) = img {
return Ok(img);
}
Err(anyhow!("get image from message failed".to_string()))
}
pub fn extract_image_url(mes: &ChatCompletionParameters) -> Vec<&String> {
let mut img_vec = Vec::new();
for chat_mes in &mes.messages {
if let ChatMessage::User { content, .. } = chat_mes
&& let ChatMessageContent::ContentPart(part_vec) = content
{
for part in part_vec {
if let ChatMessageContentPart::Image(img_part) = part {
img_vec.push(&img_part.image_url.url);
}
}
}
}
img_vec
}
pub fn extract_images(mes: &ChatCompletionParameters) -> Result<Vec<DynamicImage>> {
let img_url_vec = extract_image_url(mes);
// 并行下载图片
img_url_vec.par_iter().map(|url| get_image(url)).collect()
}
pub fn generate_target_ratios_sorted(min_num: u32, max_num: u32) -> Vec<(u32, u32)> {
let mut target_ratios = HashSet::new();
for n in min_num..=max_num {
for i in 1..=n {
for j in 1..=n {
let product = i * j;
if product <= max_num && product >= min_num {
target_ratios.insert((i, j));
}
}
}
}
// Convert to vector and sort by the product of elements (i*j)
let mut sorted_ratios: Vec<(u32, u32)> = target_ratios.into_iter().collect();
sorted_ratios.sort_by_key(|&(i, j)| i * j);
sorted_ratios
}
pub fn find_closest_aspect_ratio(
aspect_ratio: f64,
target_ratios: &[(u32, u32)],
width: u32,
height: u32,
image_size: u32,
) -> (u32, u32) {
let mut best_ratio_diff = f64::INFINITY;
let mut best_ratio = (1, 1);
let area = width * height;
for &ratio in target_ratios {
let target_aspect_ratio = ratio.0 as f64 / ratio.1 as f64;
let ratio_diff = (aspect_ratio - target_aspect_ratio).abs();
if ratio_diff < best_ratio_diff {
best_ratio_diff = ratio_diff;
best_ratio = ratio;
} else if (ratio_diff - best_ratio_diff).abs() < 1e-10 {
// 当多个候选比例具有相同的宽高比差异时,根据图像的实际面积来选择最优比例。
let target_area = 0.5 * (image_size as f64).powi(2) * (ratio.0 * ratio.1) as f64;
if area as f64 > target_area {
best_ratio = ratio;
}
}
}
best_ratio
}
pub fn dynamic_preprocess(
image: &DynamicImage,
image_size: u32,
use_thumbnail: bool,
) -> Result<(Vec<DynamicImage>, (u32, u32))> {
let orig_width = image.width();
let orig_height = image.height();
let aspect_ratio = orig_width as f64 / orig_height as f64;
// 控制分块数量在2-9之间
let target_ratios = generate_target_ratios_sorted(2, 9);
let target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio,
&target_ratios,
orig_width,
orig_height,
image_size,
);
let target_width = image_size * target_aspect_ratio.0;
let target_height = image_size * target_aspect_ratio.1;
let blocks = target_aspect_ratio.0 * target_aspect_ratio.1;
let mut resized_img = image.resize_exact(
target_width,
target_height,
image::imageops::FilterType::CatmullRom,
);
let mut processed_images = Vec::new();
let grid_width = target_width / image_size;
for i in 0..blocks {
// Calculate box coordinates
let x1 = (i % grid_width) * image_size;
let y1 = (i / grid_width) * image_size;
// Crop the image
let split_img = resized_img.crop(x1, y1, image_size, image_size);
processed_images.push(split_img);
}
assert_eq!(processed_images.len() as u32, blocks);
if use_thumbnail && processed_images.len() != 1 {
let thumbnail_img = image.resize_exact(
image_size,
image_size,
image::imageops::FilterType::CatmullRom,
);
processed_images.push(thumbnail_img);
}
Ok((processed_images, target_aspect_ratio))
}
pub fn resize_with_edge_padding(
img: &DynamicImage,
width: u32,
height: u32,
color: [u8; 3],
) -> DynamicImage {
// 按图像原比例resize,可能不是输入的宽高
let mut img = img.resize(width, height, image::imageops::FilterType::CatmullRom);
// 使用输入像素颜色填充为输入宽高
if img.height() != height || img.width() != width {
let (img_h, img_w) = (img.height(), img.width());
let img_buffer = img.to_rgb8();
let mut canvas: ImageBuffer<Rgb<u8>, Vec<u8>> =
RgbImage::from_pixel(width, height, Rgb(color));
let x_offset = (width - img_w) / 2;
let y_offset = (height - img_h) / 2;
imageops::overlay(&mut canvas, &img_buffer, x_offset as i64, y_offset as i64);
img = DynamicImage::ImageRgb8(canvas);
}
img
}
pub fn img_transform(
img: &DynamicImage,
mean: &Tensor,
std: &Tensor,
device: &Device,
dtype: DType,
) -> Result<Tensor> {
let img_h = img.height();
let img_w = img.width();
let img_vec = img.to_rgb8().into_raw();
// (h, w, c) => (c, h, w)
let img_tensor = Tensor::from_slice(&img_vec, (img_h as usize, img_w as usize, 3), device)?
.permute((2, 0, 1))?
.to_dtype(DType::F32)?;
// 0-255 rescale to 0-1
let img_tensor = img_tensor.affine(1.0 / 255.0, 0.)?;
// normalize
let img_tensor = img_tensor
.broadcast_sub(&mean.to_dtype(DType::F32)?)?
.broadcast_div(&std.to_dtype(DType::F32)?)?
.to_dtype(dtype)?;
Ok(img_tensor)
}
pub fn img_smart_resize(
img_h: u32,
img_w: u32,
factor: u32,
min_pixels: u32,
max_pixels: u32,
) -> Result<(u32, u32)> {
if std::cmp::max(img_h, img_w) / std::cmp::min(img_h, img_w) > 200 {
return Err(anyhow!(format!(
"absolute aspect ratio mush be smaller than {}, got {}",
200,
std::cmp::max(img_h, img_w) / std::cmp::min(img_h, img_w)
)));
}
let image_factor = factor;
let mut h_bar = std::cmp::max(image_factor, round_by_factor(img_h, image_factor));
let mut w_bar = std::cmp::max(image_factor, round_by_factor(img_w, image_factor));
if h_bar * w_bar > max_pixels {
let beta = ((img_h * img_w) as f32 / max_pixels as f32).sqrt();
h_bar = std::cmp::max(
image_factor,
floor_by_factor(img_h as f32 / beta, image_factor),
);
w_bar = std::cmp::max(
image_factor,
floor_by_factor(img_w as f32 / beta, image_factor),
);
} else if h_bar * w_bar < min_pixels {
let beta = (min_pixels as f32 / (img_h * img_w) as f32).sqrt();
h_bar = ceil_by_factor(img_h as f32 * beta, image_factor);
w_bar = ceil_by_factor(img_w as f32 * beta, image_factor);
}
Ok((h_bar, w_bar))
}
pub fn img_transform_with_resize(
img: &DynamicImage,
h: u32,
w: u32,
mean: &Tensor,
std: &Tensor,
device: &Device,
dtype: DType,
) -> Result<Tensor> {
let img_resize = img.resize_exact(w, h, imageops::FilterType::CatmullRom);
let img_tensor = img_transform(&img_resize, mean, std, device, dtype)?;
Ok(img_tensor)
}
pub fn float_tensor_to_dynamic_image(tensor: &Tensor) -> Result<DynamicImage> {
let tensor = tensor.affine(255.0, 0.0)?.clamp(0.0, 255.0)?;
let tensor_u8 = tensor.to_dtype(DType::U8)?.to_device(&Device::Cpu)?;
let (c, h, w) = tensor_u8.dims3()?;
match c {
1 => {
let tensor_u8 = tensor_u8.reshape((h, w))?;
let data: Vec<u8> = tensor_u8.flatten_all()?.to_vec1()?;
let img = ImageBuffer::from_raw(w as u32, h as u32, data)
.ok_or_else(|| anyhow!("Failed to create image buffer"))?;
Ok(DynamicImage::ImageLuma8(img))
}
3 => {
let tensor_u8 = tensor_u8.permute((1, 2, 0))?;
let data: Vec<u8> = tensor_u8.flatten_all()?.to_vec1()?;
let img = ImageBuffer::from_raw(w as u32, h as u32, data)
.ok_or_else(|| anyhow!("Failed to create image buffer"))?;
Ok(DynamicImage::ImageRgb8(img))
}
_ => Err(anyhow!(format!("Unsupported number of channels: {}", c))),
}
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/utils/mod.rs | src/utils/mod.rs | pub mod audio_utils;
pub mod img_utils;
pub mod tensor_utils;
pub mod video_utils;
use std::{fs, process::Command};
use aha_openai_dive::v1::resources::{
chat::{
AudioUrlType, ChatCompletionChoice, ChatCompletionChunkChoice, ChatCompletionChunkResponse,
ChatCompletionParameters, ChatCompletionResponse, ChatMessage, ChatMessageAudioContentPart,
ChatMessageContent, ChatMessageContentPart, ChatMessageImageContentPart, DeltaChatMessage,
DeltaFunction, DeltaToolCall, Function, ImageUrlType, ToolCall,
},
shared::{FinishReason, Usage},
};
use anyhow::Result;
use candle_core::{DType, Device};
use candle_transformers::generation::{LogitsProcessor, Sampling};
use dirs::home_dir;
pub fn get_device(device: Option<&Device>) -> Device {
match device {
Some(d) => d.clone(),
None => {
#[cfg(feature = "cuda")]
{
Device::new_cuda(0).unwrap_or(Device::Cpu)
}
#[cfg(not(feature = "cuda"))]
{
Device::Cpu
}
}
}
}
pub fn get_gpu_sm_arch() -> Result<f32> {
let output = Command::new("nvidia-smi")
.arg("--query-gpu=compute_cap")
.arg("--format=csv,noheader")
.output()
.map_err(|e| anyhow::anyhow!(format!("Failed to execute nvidia-smi: {}", e)))?;
if !output.status.success() {
return Err(anyhow::anyhow!(format!(
"nvidia-smi failed with status: {}\nError: {}",
output.status,
String::from_utf8_lossy(&output.stderr)
)));
}
let output_str = String::from_utf8_lossy(&output.stdout);
let output_str = output_str.trim();
let sm_float = match output_str.parse::<f32>() {
Ok(num) => num,
Err(_) => {
return Err(anyhow::anyhow!(format!(
"gpr sm arch: {} parse float32 error",
output_str
)));
}
};
Ok(sm_float)
}
pub fn get_dtype(dtype: Option<DType>, cfg_dtype: &str) -> DType {
match dtype {
Some(d) => d,
None => {
#[cfg(feature = "cuda")]
{
match cfg_dtype {
"float32" | "float" => DType::F32,
"float64" | "double" => DType::F64,
"float16" => DType::F16,
"bfloat16" => {
let arch = get_gpu_sm_arch();
match arch {
Err(_) => DType::F16,
Ok(a) => {
// nvidia显卡sm架构>=8.0的才支持BF16
if a >= 8.0 { DType::BF16 } else { DType::F16 }
}
}
}
"uint8" => DType::U8,
"int8" | "int16" | "int32" | "int64" => DType::I64,
_ => DType::F32,
}
}
#[cfg(not(feature = "cuda"))]
{
match cfg_dtype {
"float32" | "float" => DType::F32,
"float64" | "double" => DType::F64,
"float16" | "bfloat16" => DType::F16, // cpu上bfloat16有问题
"uint8" => DType::U8,
"int8" | "int16" | "int32" | "int64" => DType::I64,
_ => DType::F32,
}
}
}
}
}
pub fn string_to_static_str(s: String) -> &'static str {
Box::leak(s.into_boxed_str())
}
pub fn find_type_files(path: &str, extension_type: &str) -> Result<Vec<String>> {
let mut files = Vec::new();
for entry in std::fs::read_dir(path)? {
let entry = entry?;
let file_path = entry.path();
if file_path.is_file()
&& let Some(extension) = file_path.extension()
&& extension == extension_type
{
files.push(file_path.to_string_lossy().to_string());
}
}
Ok(files)
}
pub fn round_by_factor(num: u32, factor: u32) -> u32 {
let round = (num as f32 / factor as f32).round() as u32;
round * factor
}
pub fn floor_by_factor(num: f32, factor: u32) -> u32 {
let floor = (num / factor as f32).floor() as u32;
floor * factor
}
pub fn ceil_by_factor(num: f32, factor: u32) -> u32 {
let ceil = (num / factor as f32).ceil() as u32;
ceil * factor
}
pub fn build_img_completion_response(
base64vec: &Vec<String>,
model_name: &str,
) -> ChatCompletionResponse {
let id = uuid::Uuid::new_v4().to_string();
let mut response = ChatCompletionResponse {
id: Some(id),
choices: vec![],
created: chrono::Utc::now().timestamp() as u32,
model: model_name.to_string(),
service_tier: None,
system_fingerprint: None,
object: "chat.completion".to_string(),
usage: None,
};
let mut conten_part_vec = vec![];
for img_bas64 in base64vec {
let img_base64_prefix = "data:image/png;base64,".to_string() + img_bas64;
let part = ChatMessageContentPart::Image(ChatMessageImageContentPart {
r#type: "image".to_string(),
image_url: ImageUrlType {
url: img_base64_prefix,
detail: None,
},
});
conten_part_vec.push(part);
}
let choice = ChatCompletionChoice {
index: 0,
message: ChatMessage::Assistant {
content: Some(ChatMessageContent::ContentPart(conten_part_vec)),
reasoning_content: None,
refusal: None,
name: None,
audio: None,
tool_calls: None,
},
finish_reason: Some(FinishReason::StopSequenceReached),
logprobs: None,
};
response.choices.push(choice);
response
}
pub fn build_audio_completion_response(
base64_audio: &String,
model_name: &str,
) -> ChatCompletionResponse {
let id = uuid::Uuid::new_v4().to_string();
let mut response = ChatCompletionResponse {
id: Some(id),
choices: vec![],
created: chrono::Utc::now().timestamp() as u32,
model: model_name.to_string(),
service_tier: None,
system_fingerprint: None,
object: "chat.completion".to_string(),
usage: None,
};
let base64_audio = format!("data:audio/wav;base64,{}", base64_audio);
let conten_part_vec = vec![ChatMessageContentPart::Audio(ChatMessageAudioContentPart {
r#type: "audio".to_string(),
audio_url: AudioUrlType {
url: base64_audio.to_string(),
},
})];
let choice = ChatCompletionChoice {
index: 0,
message: ChatMessage::Assistant {
content: Some(ChatMessageContent::ContentPart(conten_part_vec)),
reasoning_content: None,
refusal: None,
name: None,
audio: None,
tool_calls: None,
},
finish_reason: Some(FinishReason::StopSequenceReached),
logprobs: None,
};
response.choices.push(choice);
response
}
pub fn build_completion_response(
res: String,
model_name: &str,
num_tokens: Option<u32>,
) -> ChatCompletionResponse {
let id = uuid::Uuid::new_v4().to_string();
let usage = num_tokens.map(|num| Usage {
prompt_tokens: None,
completion_tokens: None,
total_tokens: num,
prompt_tokens_details: None,
completion_tokens_details: None,
});
let mut response = ChatCompletionResponse {
id: Some(id),
choices: vec![],
created: chrono::Utc::now().timestamp() as u32,
model: model_name.to_string(),
service_tier: None,
system_fingerprint: None,
object: "chat.completion".to_string(),
usage,
};
let choice = if res.contains("<tool_call>") {
let mes: Vec<&str> = res.split("<tool_call>").collect();
let content = mes[0].to_string();
let mut tool_vec = Vec::new();
for (i, m) in mes.iter().enumerate().skip(1) {
let tool_mes = m.replace("</tool_call>", "");
let function = match serde_json::from_str::<serde_json::Value>(&tool_mes) {
Ok(json_value) => {
let name = json_value
.get("name")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_default();
let arguments = json_value
.get("arguments")
.map(|v| v.to_string())
.unwrap_or_default();
Function { name, arguments }
}
Err(_) => Function {
name: "".to_string(),
arguments: "".to_string(),
},
};
let tool_call = ToolCall {
id: (i - 1).to_string(),
r#type: "function".to_string(),
function,
};
tool_vec.push(tool_call);
}
ChatCompletionChoice {
index: 0,
message: ChatMessage::Assistant {
content: Some(ChatMessageContent::Text(content)),
reasoning_content: None,
refusal: None,
name: None,
audio: None,
tool_calls: Some(tool_vec),
},
finish_reason: Some(FinishReason::ToolCalls),
logprobs: None,
}
} else {
ChatCompletionChoice {
index: 0,
message: ChatMessage::Assistant {
content: Some(ChatMessageContent::Text(res)),
reasoning_content: None,
refusal: None,
name: None,
audio: None,
tool_calls: None,
},
finish_reason: Some(FinishReason::StopSequenceReached),
logprobs: None,
}
};
response.choices.push(choice);
response
}
pub fn build_completion_chunk_response(
res: String,
model_name: &str,
tool_call_id: Option<String>,
tool_call_content: Option<String>,
) -> ChatCompletionChunkResponse {
let id = uuid::Uuid::new_v4().to_string();
let mut response = ChatCompletionChunkResponse {
id: Some(id),
choices: vec![],
created: chrono::Utc::now().timestamp() as u32,
model: model_name.to_string(),
system_fingerprint: None,
object: "chat.completion.chunk".to_string(),
usage: None,
};
let choice = if let Some(tool_call_id) = tool_call_id {
let function = if let Some(content) = tool_call_content {
match serde_json::from_str::<serde_json::Value>(&content) {
Ok(json_value) => {
let name = json_value
.get("name")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
let arguments = json_value.get("arguments").map(|v| v.to_string());
DeltaFunction { name, arguments }
}
Err(_) => DeltaFunction {
name: None,
arguments: Some(content),
},
}
} else {
DeltaFunction {
name: None,
arguments: None,
}
};
ChatCompletionChunkChoice {
index: Some(0),
delta: DeltaChatMessage::Assistant {
content: None,
reasoning_content: None,
refusal: None,
name: None,
tool_calls: Some(vec![DeltaToolCall {
index: Some(0),
id: Some(tool_call_id),
r#type: Some("function".to_string()),
function,
}]),
},
finish_reason: None,
logprobs: None,
}
} else {
ChatCompletionChunkChoice {
index: Some(0),
delta: DeltaChatMessage::Assistant {
content: Some(ChatMessageContent::Text(res)),
reasoning_content: None,
refusal: None,
name: None,
tool_calls: None,
},
finish_reason: None,
logprobs: None,
}
};
response.choices.push(choice);
response
}
pub fn get_logit_processor(
temperature: Option<f32>,
top_p: Option<f32>,
top_k: Option<usize>,
seed: u64,
) -> LogitsProcessor {
let temperature = temperature.and_then(|v| if v < 1e-7 { None } else { Some(v) });
match top_k {
None => LogitsProcessor::new(
seed,
temperature.map(|temp| temp as f64),
top_p.map(|tp| tp as f64),
),
Some(k) => {
let sampling = match temperature {
None => Sampling::ArgMax,
Some(temperature) => match top_p {
None => Sampling::TopK {
k,
temperature: temperature as f64,
},
Some(p) => Sampling::TopKThenTopP {
k,
p: p as f64,
temperature: temperature as f64,
},
},
};
LogitsProcessor::from_sampling(seed, sampling)
}
}
}
pub fn extract_mes(mes: &ChatCompletionParameters) -> Result<Vec<(String, String)>> {
let mut mes_vec = Vec::new();
for chat_mes in mes.messages.clone() {
if let ChatMessage::User { content, .. } = chat_mes.clone()
&& let ChatMessageContent::ContentPart(part_vec) = content
{
for part in part_vec {
if let ChatMessageContentPart::Text(text_part) = part {
let text = text_part.text;
mes_vec.push(("<|User|>".to_string(), text));
}
}
} else if let ChatMessage::Assistant { content, .. } = chat_mes.clone()
&& let Some(cont) = content
&& let ChatMessageContent::Text(c) = cont
{
mes_vec.push(("<|Assistant|>".to_string(), c));
}
}
Ok(mes_vec)
}
pub fn extract_metadata_value<T>(
metadata: &Option<std::collections::HashMap<String, String>>,
key: &str,
) -> Option<T>
where
T: std::str::FromStr + Clone + PartialEq,
{
if let Some(map) = metadata
&& let Some(value_str) = map.get(key)
&& let Ok(value) = value_str.parse::<T>()
{
return Some(value);
}
None
}
pub fn extract_user_text(mes: &ChatCompletionParameters) -> Result<String> {
let mut ret = "".to_string();
for chat_mes in mes.messages.clone() {
if let ChatMessage::User { content, .. } = chat_mes.clone()
&& let ChatMessageContent::ContentPart(part_vec) = content
{
for part in part_vec {
if let ChatMessageContentPart::Text(text_part) = part {
let text = text_part.text;
if text.chars().count() > 0 {
ret = ret + &text + "\n"
}
}
}
}
}
ret = ret.trim().to_string();
Ok(ret)
}
pub fn get_default_save_dir() -> Option<String> {
home_dir().map(|mut path| {
path.push(".aha");
if let Err(e) = fs::create_dir_all(&path) {
eprintln!("Failed to create directory {:?}: {}", path, e);
}
path.to_string_lossy().to_string()
})
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/utils/tensor_utils.rs | src/utils/tensor_utils.rs | use anyhow::{Result, anyhow};
use candle_core::{D, DType, Device, IndexOp, Tensor, shape::Dim};
use candle_nn::ops::sigmoid;
pub fn prepare_causal_attention_mask(
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
device: &Device,
) -> Result<Tensor> {
// Sliding window mask?
// let mask: Vec<_> = (0..tgt_len)
// .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
// .collect();
// let mask = Tensor::from_vec(mask, (tgt_len, tgt_len), device)?;
let arange = Tensor::arange(0u32, tgt_len as u32, device)?;
let arange = arange.unsqueeze(1)?.broadcast_as((tgt_len, tgt_len))?;
let upper_triangle = arange.t()?.gt(&arange)?;
let mask = upper_triangle.where_cond(
&Tensor::new(f32::NEG_INFINITY, device)?.broadcast_as(arange.shape())?,
&Tensor::new(0f32, device)?.broadcast_as(arange.shape())?,
)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
let mask = mask
.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(DType::F32)?;
Ok(mask)
}
pub fn repeat_kv(xs: Tensor, n_rep: usize) -> Result<Tensor> {
if n_rep == 1 {
Ok(xs)
} else {
let (b_sz, n_kv_head, seq_len, head_dim) = xs.dims4()?;
// Using cat is faster than a broadcast as it avoids going through a potentially
// strided copy.
// https://github.com/huggingface/candle/pull/2043
let kv = Tensor::cat(&vec![&xs; n_rep], 2)?.reshape((
b_sz,
n_kv_head * n_rep,
seq_len,
head_dim,
))?;
Ok(kv)
}
}
pub fn split_tensor<D: Dim>(t: &Tensor, splits: &[usize], dim: D) -> Result<Vec<Tensor>> {
// 按给定长度切分tensor
// 例: t:(25), splits: [5, 10, 5, 5] dim: 0,
// 返回vec len=4, 其中tensor维度分别是:(5), (10), (5), (5)
let dim = dim.to_index(t.shape(), "split")?;
let mut split_res = Vec::new();
let mut index = 0;
for split in splits {
split_res.push(t.narrow(dim, index, *split)?);
index += *split;
}
Ok(split_res)
}
pub fn split_tensor_with_size<D: Dim>(
t: &Tensor,
splits_size: usize,
dim: D,
) -> Result<Vec<Tensor>> {
// 按给定size切分tensor
// 例: t:(25), splits: 5 dim: 0,
// 返回vec len=5, 其中tensor维度分别是:(5), (5), (5), (5), (5)
let dim = dim.to_index(t.shape(), "split")?;
let mut split_res = Vec::new();
let dim_size = t.dim(dim)?;
assert_eq!(
dim_size % splits_size,
0,
"input tensor dim size % splits_size must be equal to 0"
);
for split in (0..dim_size).step_by(splits_size) {
split_res.push(t.narrow(dim, split, splits_size)?);
}
Ok(split_res)
}
pub fn safe_arg_sort_last_dim(t: &Tensor, ascending: bool) -> Result<Tensor> {
// tensor在GPU上时,维度超过1024, arg_sort_last_dim方法会报错
// 所以维度大于1024时,放到CPU上处理
let last_dim = t.dims()[t.rank() - 1];
if last_dim <= 1024 {
let t = t.arg_sort_last_dim(ascending)?;
Ok(t)
} else {
let cpu_tensor = t.to_device(&Device::Cpu)?;
let sorted_indices = cpu_tensor.arg_sort_last_dim(ascending)?;
let t = sorted_indices.to_device(t.device())?;
Ok(t)
}
}
pub fn nonzero_index_vec(mask: &Tensor) -> Result<Vec<u32>> {
// 根据mask矩阵选出其中不为0的元素所在索引, 返回vec
// 只能处理1维数据
let mut mask = mask.clone();
if mask.dtype() != DType::U32 {
mask = mask.to_dtype(DType::U32)?;
}
match mask.rank() {
0 => Err(anyhow!(format!(
"input rank must > 0, the input tensor rank: {}",
mask.rank()
))),
1 => {
let mask_vector = mask.to_vec1::<u32>()?;
let indices: Vec<u32> = mask_vector
.iter()
.enumerate()
.filter_map(|(idx, &val)| if val != 0 { Some(idx as u32) } else { None })
.collect();
Ok(indices)
}
_ => Err(anyhow!(format!(
"input rank not support, the input tensor rank: {}",
mask.rank()
))),
}
}
pub fn nonzero_index(mask: &Tensor) -> Result<Tensor> {
// 根据mask矩阵选出其中不为1的元素所在索引, 返回Tensor
let indices_tensor = match mask.rank() {
0 => {
return Err(anyhow!(format!(
"input rank must > 0, the input tensor rank: {}",
mask.rank()
)));
}
1 => {
let index_vec = nonzero_index_vec(mask)?;
Tensor::from_slice(&index_vec, index_vec.len(), mask.device())?
}
_ => {
return Err(anyhow!(format!(
"input rank must == 1, the input tensor rank: {}",
mask.rank()
)));
}
};
Ok(indices_tensor)
}
pub fn zero_index_vec(mask: &Tensor) -> Result<Vec<u32>> {
// 根据mask矩阵选出其中为0的元素所在索引, 返回vec
// 只能处理1维数据
let mut mask = mask.clone();
if mask.dtype() != DType::U32 {
mask = mask.to_dtype(DType::U32)?;
}
match mask.rank() {
0 => Err(anyhow!(format!(
"input rank must > 0, the input tensor rank: {}",
mask.rank()
))),
1 => {
let mask_vector = mask.to_vec1::<u32>()?;
let indices: Vec<u32> = mask_vector
.iter()
.enumerate()
.filter_map(|(idx, &val)| if val == 0 { Some(idx as u32) } else { None })
.collect();
Ok(indices)
}
_ => Err(anyhow!(format!(
"input rank not support, the input tensor rank: {}",
mask.rank()
))),
}
}
pub fn zero_index(mask: &Tensor) -> Result<Tensor> {
let index_vec = zero_index_vec(mask)?;
let indices_tensor = Tensor::from_slice(&index_vec, index_vec.len(), mask.device())?;
Ok(indices_tensor)
}
pub fn nonzero_slice(mask: &Tensor) -> Result<Vec<(usize, usize)>> {
// 根据mask矩阵选出其中非0的元素所在索引
// 根据索引获取连续索引间隔
// 如不为零索引元素为[0, 3, 4, 5, 8, 9]
// 间隔为: [(0, 1), (3, 6), (8, 10)]
// 索引前闭后开
let mut index_vec = nonzero_index_vec(mask)?;
match index_vec.len() {
0 => Ok(vec![]),
1 => Ok(vec![(index_vec[0] as usize, (index_vec[0] + 1) as usize)]),
_ => {
let mut vec_slice = vec![];
let mut start = index_vec.remove(0);
let mut last = start;
for i in index_vec {
if i == (last + 1) {
last = i;
continue;
} else {
vec_slice.push((start as usize, (last + 1) as usize));
start = i;
last = i;
}
}
vec_slice.push((start as usize, (last + 1) as usize));
Ok(vec_slice)
}
}
}
pub fn masked_scatter_dim0(original: &Tensor, replace: &Tensor, mask: &Tensor) -> Result<Tensor> {
// 根据mask中非0元素所在索引,使用replace中的数据替换掉original中的数据
// original: rank = 3: (bs, seq_len, hidden_dim)
// replace: rank = 2: (seq_len, hidden_dim)
// mask: rank = 2: (bs, seq_len)
// 推理时bs=1,为了方便替换,将bs squeeze,替换后再unsqueeze
// 按行替换
if original.dim(0)? != 1 || mask.dim(0)? != 1 {
return Err(anyhow!(format!(
"masked_scatter_dim0 original bs: {} or mask bs :{} not equal to 1 ",
original.dim(0)?,
mask.dim(0)? != 1
)));
}
let mut original = original.squeeze(0)?;
let mask = mask.squeeze(0)?;
let slices = nonzero_slice(&mask)?;
let mut sub_start = 0usize;
let mut sub_end;
for (start, end) in slices {
sub_end = sub_start + (end - start);
let sub_replace = replace.i((sub_start..sub_end, ..))?;
original = original.slice_assign(&[(start..end), (0..original.dim(1)?)], &sub_replace)?;
sub_start = sub_end;
}
original = original.unsqueeze(0)?;
Ok(original)
}
pub fn get_not_equal_mask(input_ids: &Tensor, token_ids: u32) -> Result<Tensor> {
let image_token_id_tensor = Tensor::new(vec![token_ids], input_ids.device())?;
let mask = input_ids
.broadcast_ne(&image_token_id_tensor)?
.to_dtype(candle_core::DType::U32)?;
Ok(mask)
}
pub fn get_equal_mask(input_ids: &Tensor, token_ids: u32) -> Result<Tensor> {
let image_token_id_tensor =
Tensor::new(vec![token_ids], input_ids.device())?.to_dtype(input_ids.dtype())?;
let mask = input_ids
.broadcast_eq(&image_token_id_tensor)?
.to_dtype(candle_core::DType::U32)?;
Ok(mask)
}
pub fn get_eq_indices(input_ids: &Tensor, token_id: u32) -> Result<Tensor> {
// input_ids -> shape: (seq_len)
let mask = get_equal_mask(input_ids, token_id)?;
let indices = nonzero_index(&mask)?;
Ok(indices)
}
pub fn get_vision_next_indices(input_ids: &Tensor, token_id: u32) -> Result<Tensor> {
// input_ids -> shape: (seq_len)
let indices = get_eq_indices(input_ids, token_id)?;
let indices = indices.broadcast_add(&Tensor::new(vec![1u32], input_ids.device())?)?;
Ok(indices)
}
pub fn linspace(start: f32, end: f32, steps: usize, device: &Device) -> Result<Tensor> {
assert!(steps > 0, "steps must be > 0");
if steps == 1 {
let t = Tensor::from_slice(&[start], 1, device)?;
return Ok(t);
}
let step_size = (end - start) / (steps - 1) as f32;
let data: Vec<f32> = (0..steps).map(|i| start + i as f32 * step_size).collect();
let t = Tensor::from_slice(&data, steps, device)?;
Ok(t)
}
pub fn bitor_tensor(mask1: &Tensor, mask2: &Tensor) -> Result<Tensor> {
assert!(
mask1.shape() == mask2.shape(),
" bitor_tensor two tensor shape mask be equal"
);
let bitor = mask1.add(mask2)?.ne(&Tensor::zeros_like(mask1)?)?;
Ok(bitor)
}
pub fn prod_tensor_last_dim(t: &Tensor) -> Result<Tensor> {
let prod = match t.rank() {
0 => t.clone(),
1 => {
let data_type = t.dtype();
match data_type {
DType::U8 => {
let t_vec = t.to_vec1::<u8>()?;
let prod = t_vec.iter().product::<u8>();
Tensor::from_slice(&[prod], 1, t.device())?
}
DType::U32 => {
let t_vec = t.to_vec1::<u32>()?;
let prod = t_vec.iter().product::<u32>();
Tensor::from_slice(&[prod], 1, t.device())?
}
DType::I64 => {
let t_vec = t.to_vec1::<i64>()?;
let prod = t_vec.iter().product::<i64>();
Tensor::from_slice(&[prod], 1, t.device())?
}
DType::F64 => {
let t_vec = t.to_vec1::<f64>()?;
let prod = t_vec.iter().product::<f64>();
Tensor::from_slice(&[prod], 1, t.device())?
}
_ => {
let t_vec = t.to_vec1::<f32>()?;
let prod = t_vec.iter().product::<f32>();
Tensor::from_slice(&[prod], 1, t.device())?
}
}
}
2 => {
let data_type = t.dtype();
match data_type {
DType::U8 => {
let t_vec = t.to_vec2::<u8>()?;
let mut prod_vec = vec![];
for v in t_vec.iter() {
let prod = v.iter().product::<u8>();
prod_vec.push(prod);
}
Tensor::new(prod_vec, t.device())?
}
DType::U32 => {
let t_vec = t.to_vec2::<u32>()?;
let mut prod_vec = vec![];
for v in t_vec.iter() {
let prod = v.iter().product::<u32>();
prod_vec.push(prod);
}
Tensor::new(prod_vec, t.device())?
}
DType::I64 => {
let t_vec = t.to_vec2::<i64>()?;
let mut prod_vec = vec![];
for v in t_vec.iter() {
let prod = v.iter().product::<i64>();
prod_vec.push(prod);
}
Tensor::new(prod_vec, t.device())?
}
DType::F64 => {
let t_vec = t.to_vec2::<f64>()?;
let mut prod_vec = vec![];
for v in t_vec.iter() {
let prod = v.iter().product::<f64>();
prod_vec.push(prod);
}
Tensor::new(prod_vec, t.device())?
}
_ => {
let t_vec = t.to_vec2::<f32>()?;
let mut prod_vec = vec![];
for v in t_vec.iter() {
let prod = v.iter().product::<f32>();
prod_vec.push(prod);
}
Tensor::new(prod_vec, t.device())?
}
}
}
_ => {
return Err(anyhow!(format!("can not action this dim")));
}
};
Ok(prod)
}
pub fn mask_index_add(original: &Tensor, mask: &Tensor, add: &Tensor) -> Result<Tensor> {
let visual_nonzero_index = nonzero_index(mask)?;
let xs = original.index_add(&visual_nonzero_index, add, 0)?;
Ok(xs)
}
pub fn compute_1d_coords(
input_size: usize,
output_size: usize,
align_corner: Option<bool>,
) -> Result<Vec<f32>> {
if input_size == 1 {
Ok(vec![0f32; output_size])
} else if let Some(align_) = align_corner
&& align_
{
Ok((0..output_size)
.map(|i| i as f32 * (input_size - 1) as f32 / (output_size - 1) as f32)
.collect())
} else {
Ok((0..output_size)
.map(|i| {
(i as f32 + 0.5) * (input_size as f32 / output_size as f32) - 0.5
// coord.max(0.0).min((input_size - 1) as f32)
})
.collect())
}
}
pub fn interpolate_linear_1d(
t: &Tensor,
target_size: usize,
align_corner: Option<bool>,
) -> Result<Tensor> {
// t: [b, channels, features]
if t.rank() != 3 {
return Err(anyhow::anyhow!(
"Input rank must have equal to 3 dimensions"
));
}
let shape = t.dims();
let orig_size = shape[shape.len() - 1];
if orig_size == target_size {
return Ok(t.clone());
}
let (bs, channels, _) = t.dims3()?;
let mut output = Tensor::zeros((bs, channels, target_size), t.dtype(), t.device())?;
let coords = compute_1d_coords(orig_size, target_size, align_corner)?;
for b in 0..bs {
for c in 0..channels {
let input_slice = t.i((b, c))?;
let mut out_i = Vec::new();
// for x_out in 0..target_size {
for &coord in coords.iter().take(target_size) {
let coord = if coord < 0.0 { 0.0 } else { coord };
let x0 = coord.floor() as usize;
let x1 = std::cmp::min(x0 + 1, orig_size - 1);
let weight = (coord - x0 as f32) as f64;
let value0 = input_slice.get(x0)?;
let value1 = input_slice.get(x1)?;
let interpolated =
(value0.affine(1.0 - weight, 0.0)? + value1.affine(weight, 0.0)?)?;
out_i.push(interpolated);
}
let out_i = Tensor::stack(&out_i, 0)?.unsqueeze(0)?.unsqueeze(0)?;
output = output.slice_assign(&[(b..b + 1), (c..c + 1), (0..target_size)], &out_i)?;
}
}
output = output.contiguous()?;
Ok(output)
}
pub fn interpolate_bilinear(
input: &Tensor,
target_size: (usize, usize),
align_corner: Option<bool>,
) -> Result<Tensor> {
// input: [b, channels, height, width]
if input.rank() != 4 {
return Err(anyhow::anyhow!(
"Input rank must have equal to 4 dimensions [b, c, h, w]"
));
}
let (bs, channels, input_height, input_width) = input.dims4()?;
let (target_height, target_width) = target_size;
// If size is the same, return clone
if input_height == target_height && input_width == target_width {
return Ok(input.clone());
}
let align_corners = align_corner.unwrap_or(false);
// Compute scaling factors
let height_scale = if align_corners && target_height > 1 {
(input_height - 1) as f64 / (target_height - 1) as f64
} else {
input_height as f64 / target_height as f64
};
let width_scale = if align_corners && target_width > 1 {
(input_width - 1) as f64 / (target_width - 1) as f64
} else {
input_width as f64 / target_width as f64
};
let dim0 = bs * channels;
let input_3dim = input.reshape((dim0, input_height, input_width))?;
let input_data = input_3dim.to_dtype(DType::F32)?.to_vec3::<f32>()?;
let mut output_data = vec![vec![vec![0.0f32; target_width]; target_height]; dim0];
for c in 0..dim0 {
for out_y in 0..target_height {
let src_y = if align_corners {
out_y as f64 * height_scale
} else {
(out_y as f64 + 0.5) * height_scale - 0.5
};
let src_y = src_y.max(0.0).min((input_height - 1) as f64);
let y0 = src_y.floor() as usize;
let y1 = (y0 + 1).min(input_height - 1);
let dy = (src_y - y0 as f64) as f32;
for out_x in 0..target_width {
let src_x = if align_corners {
out_x as f64 * width_scale
} else {
(out_x as f64 + 0.5) * width_scale - 0.5
};
let src_x = src_x.max(0.0).min((input_width - 1) as f64);
let x0 = src_x.floor() as usize;
let x1 = (x0 + 1).min(input_width - 1);
let q00 = input_data[c][y0][x0];
let q01 = input_data[c][y0][x1];
let q10 = input_data[c][y1][x0];
let q11 = input_data[c][y1][x1];
let dx = (src_x - x0 as f64) as f32;
let interpolated = q00 * (1.0 - dx) * (1.0 - dy)
+ q01 * dx * (1.0 - dy)
+ q10 * (1.0 - dx) * dy
+ q11 * dx * dy;
output_data[c][out_y][out_x] = interpolated;
}
}
}
let output = Tensor::new(output_data, input.device())?
.reshape((bs, channels, target_height, target_width))?
.to_dtype(input.dtype())?;
Ok(output.contiguous()?)
}
fn compute_scale(input_size: usize, output_size: usize, align_corners: bool) -> f64 {
if align_corners && output_size > 1 {
(input_size - 1) as f64 / (output_size - 1) as f64
} else {
input_size as f64 / output_size as f64
}
}
fn bicubic_filter(x: f64) -> f64 {
let a = -0.75;
let x = x.abs();
if x < 1.0 {
((a + 2.0) * x - (a + 3.0)) * x * x + 1.0
} else if x < 2.0 {
(((x - 5.0) * x + 8.0) * x - 4.0) * a
} else {
0.0
}
}
pub fn interpolate_bicubic_antialias(
input: &Tensor,
batch_size: usize,
channels: usize,
input_height: usize,
input_width: usize,
output_height: usize,
output_width: usize,
height_scale: f64,
width_scale: f64,
align_corners: bool,
) -> Result<Tensor> {
// tensor没有to_vec4, 所以把bs和channels先合在一起
let dim0 = batch_size * channels;
let input_3dim = input.reshape((dim0, input_height, input_width))?;
let input_data = input_3dim.to_dtype(DType::F32)?.to_vec3::<f32>()?;
let mut output_data = vec![vec![vec![0.0f32; output_width]; output_height]; dim0];
let support = 2.0 * height_scale.max(width_scale);
for c in 0..dim0 {
for out_y in 0..output_height {
let center_y = if align_corners {
out_y as f64 * height_scale
} else {
(out_y as f64 + 0.5) * height_scale - 0.5
};
let start_y = (center_y - support).ceil() as isize;
let end_y = (center_y + support).floor() as isize;
for out_x in 0..output_width {
let center_x = if align_corners {
out_x as f64 * width_scale
} else {
(out_x as f64 + 0.5) * width_scale - 0.5
};
let mut sum = 0.0;
let mut weight_sum = 0.0;
let start_x = (center_x - support).ceil() as isize;
let end_x = (center_x + support).floor() as isize;
for iy in start_y..end_y {
for ix in start_x..end_x {
if iy >= 0
&& iy < input_height as isize
&& ix >= 0
&& ix < input_width as isize
{
let dx = (ix as f64 - center_x).abs();
let dy = (iy as f64 - center_y).abs();
let wx = bicubic_filter(dx / width_scale.max(1.0));
let wy = bicubic_filter(dy / height_scale.max(1.0));
let weight = (wx * wy) as f32;
sum += input_data[c][iy as usize][ix as usize] * weight;
weight_sum += weight;
}
}
}
if weight_sum > 0.0 {
output_data[c][out_y][out_x] = sum / weight_sum;
} else {
output_data[c][out_y][out_x] = 0.0;
}
}
}
}
let output = Tensor::new(output_data, input.device())?
.reshape((batch_size, channels, output_height, output_width))?
.to_dtype(input.dtype())?;
Ok(output)
}
fn get_cubic_coefficients(t: f64) -> [f64; 4] {
let a = -0.75;
let x1 = t;
let coeff0 = cubic_convolution2(x1 + 1.0, a);
let coeff1 = cubic_convolution1(x1, a);
let x2 = 1.0 - t;
let coeff2 = cubic_convolution1(x2, a);
let coeff3 = cubic_convolution2(x2 + 1.0, a);
[coeff0, coeff1, coeff2, coeff3]
}
// 三次卷积函数1
fn cubic_convolution1(x: f64, a: f64) -> f64 {
((a + 2.0) * x - (a + 3.0)) * x * x + 1.0
}
// 三次卷积函数2
fn cubic_convolution2(x: f64, a: f64) -> f64 {
((a * x - 5.0 * a) * x + 8.0 * a) * x - 4.0 * a
}
fn cubic_interp1d(x0: f32, x1: f32, x2: f32, x3: f32, t: f64) -> f32 {
let coeffs = get_cubic_coefficients(t);
x0 * coeffs[0] as f32 + x1 * coeffs[1] as f32 + x2 * coeffs[2] as f32 + x3 * coeffs[3] as f32
}
pub fn interpolate_bicubic_standard(
input: &Tensor,
batch_size: usize,
channels: usize,
input_height: usize,
input_width: usize,
output_height: usize,
output_width: usize,
height_scale: f64,
width_scale: f64,
align_corners: bool,
) -> Result<Tensor> {
// tensor没有to_vec4, 所以把bs和channels先合在一起
let dim0 = batch_size * channels;
let input_3dim = input.reshape((dim0, input_height, input_width))?;
let input_data = input_3dim.to_dtype(DType::F32)?.to_vec3::<f32>()?;
let mut output_data = vec![vec![vec![0.0f32; output_width]; output_height]; dim0];
for c in 0..dim0 {
for out_y in 0..output_height {
let center_y = if align_corners {
out_y as f64 * height_scale
} else {
(out_y as f64 + 0.5) * height_scale - 0.5
};
let in_y = center_y.floor() as isize;
let t_y = center_y - in_y as f64;
for out_x in 0..output_width {
let center_x = if align_corners {
out_x as f64 * width_scale
} else {
(out_x as f64 + 0.5) * width_scale - 0.5
};
let in_x = center_x.floor() as isize;
let t_x = center_x - in_x as f64;
let mut coefficients = [0.0; 4];
// for k in 0..4 {
for (k, coefficients_k) in coefficients.iter_mut().enumerate() {
let row = (in_y - 1 + k as isize)
.max(0)
.min(input_height as isize - 1) as usize;
let x_minus_1 = input_data[c][row]
[(in_x - 1).max(0).min(input_width as isize - 1) as usize];
let x_plus_0 =
input_data[c][row][in_x.max(0).min(input_width as isize - 1) as usize];
let x_plus_1 = input_data[c][row]
[(in_x + 1).max(0).min(input_width as isize - 1) as usize];
let x_plus_2 = input_data[c][row]
[(in_x + 2).max(0).min(input_width as isize - 1) as usize];
// coefficients[k] = cubic_interp1d(x_minus_1, x_plus_0, x_plus_1, x_plus_2, t_x);
*coefficients_k = cubic_interp1d(x_minus_1, x_plus_0, x_plus_1, x_plus_2, t_x);
}
output_data[c][out_y][out_x] = cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y,
);
}
}
}
let output = Tensor::new(output_data, input.device())?
.reshape((batch_size, channels, output_height, output_width))?
.to_dtype(input.dtype())?;
Ok(output)
}
pub fn interpolate_bicubic(
input: &Tensor,
target_size: (usize, usize),
antialias: Option<bool>,
align_corner: Option<bool>,
) -> Result<Tensor> {
if input.rank() != 4 {
return Err(anyhow::anyhow!(
"Input rank must have at least 3 dimensions"
));
}
// if input.dim(0)? != 1 {
// return Err(anyhow::anyhow!("Input batch_size must be 1"));
// }
let (batch_size, channels, input_height, input_width) = input.dims4()?;
let (output_height, output_width) = target_size;
if output_height == input_height && output_width == input_width {
return Ok(input.clone());
}
let align_corners = match align_corner {
Some(true) => true,
Some(false) => false,
None => false,
};
let height_scale = compute_scale(input_height, output_height, align_corners);
let width_scale = compute_scale(input_width, output_width, align_corners);
// let input_squeeze = input.squeeze(0)?;
let output = if let Some(antialias_) = antialias
&& antialias_
&& (input_height > output_height || input_width > output_width)
{
interpolate_bicubic_antialias(
input,
batch_size,
channels,
input_height,
input_width,
output_height,
output_width,
height_scale,
width_scale,
align_corners,
)?
} else {
interpolate_bicubic_standard(
input,
batch_size,
channels,
input_height,
input_width,
output_height,
output_width,
height_scale,
width_scale,
align_corners,
)?
};
let output = output.to_dtype(input.dtype())?.to_device(input.device())?;
Ok(output)
}
pub fn index_select_2d(t: &Tensor, index: &Tensor) -> Result<Tensor> {
if t.rank() != 2 && index.rank() != 2 {
return Err(anyhow::anyhow!("t and index rank must be equal to 2"));
}
let mut res_vec = Vec::new();
let index_dim0 = index.dim(0)?;
for i in 0..index_dim0 {
let index_i = index.i(i)?;
let rel_i = t.index_select(&index_i, 0)?;
res_vec.push(rel_i);
}
let res = Tensor::stack(&res_vec, 0)?;
Ok(res)
}
pub fn quick_gelu(xs: &Tensor) -> Result<Tensor> {
let x = xs.affine(1.702, 0.0)?;
let x = sigmoid(&x)?;
Ok(xs.mul(&x)?)
}
pub fn topk(weight: &Tensor, topk: usize) -> Result<(Tensor, Tensor)> {
let topk_idx = weight
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, topk)?
.contiguous()?;
let topk_weight = weight.gather(&topk_idx, D::Minus1)?;
Ok((topk_weight, topk_idx))
}
pub fn onehot(input: &Tensor, len: usize) -> Result<Tensor> {
let mut shape = input.dims().to_vec();
shape.push(len);
let expand_input = input.unsqueeze(D::Minus1)?.broadcast_as(shape)?;
let range =
Tensor::arange(0u32, len as u32, input.device())?.broadcast_as(expand_input.dims())?;
let onehot = expand_input.eq(&range)?;
Ok(onehot)
}
pub fn nonzero(input: &Tensor) -> Result<(Vec<u32>, Vec<u32>)> {
assert!(input.rank() == 2, "input rank must be 2!");
let mut topk_ids = Vec::new();
let mut token_ids_all = Vec::new();
let topk = input.dim(0)?;
let input_vec = input.to_vec2::<u32>()?;
for (i, vec) in input_vec.iter().enumerate().take(topk) {
let token_ids: Vec<u32> = vec
.iter()
.enumerate()
.filter_map(|(idx, &val)| if val > 0 { Some(idx as u32) } else { None })
.collect();
let token_len = token_ids.len();
topk_ids.extend_from_slice(&vec![i as u32; token_len]);
token_ids_all.extend_from_slice(&token_ids);
}
Ok((topk_ids, token_ids_all))
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/position_embed/rope.rs | src/position_embed/rope.rs | use anyhow::Result;
use candle_core::{D, DType, Device, IndexOp, Tensor};
use candle_transformers::models::deepseek2::SplitOp;
use crate::utils::tensor_utils::{index_select_2d, split_tensor};
pub fn compute_default_rope_parameters(dim: usize, base: f32) -> Vec<f32> {
let inv_freq: Vec<f32> = (0..dim)
.step_by(2)
.map(|i| 1.0_f32 / base.powf(i as f32 / dim as f32))
.collect();
inv_freq
}
pub fn rotate_half(x: &Tensor) -> Result<Tensor> {
let half_dim = x.dim(D::Minus1)? / 2;
let x1 = x.narrow(D::Minus1, 0, half_dim)?;
let x2 = x.narrow(D::Minus1, half_dim, half_dim)?;
let x2 = x2.affine(-1.0, 0.0)?;
let rotate_x = Tensor::cat(&[&x2, &x1], D::Minus1)?.contiguous()?;
Ok(rotate_x)
}
pub fn apply_multimodel_rotary_pos_emb(
q: &Tensor,
k: &Tensor,
cos: &Tensor,
sin: &Tensor,
mrope_section: Vec<usize>,
) -> Result<(Tensor, Tensor)> {
let mrope_section = mrope_section.repeat(2);
let cos_select: Vec<Tensor> = cos
.split(&mrope_section, D::Minus1)?
.iter()
.enumerate()
.map(|(i, m)| m.i(i % 3).unwrap())
.collect();
let cos = Tensor::cat(&cos_select, D::Minus1)?
.unsqueeze(1)?
.contiguous()?;
let sin_select: Vec<Tensor> = sin
.split(&mrope_section, D::Minus1)?
.iter()
.enumerate()
.map(|(i, m)| m.i(i % 3).unwrap())
.collect();
let sin = Tensor::cat(&sin_select, D::Minus1)?
.unsqueeze(1)?
.contiguous()?;
let q_embed = q
.broadcast_mul(&cos)?
.add(&rotate_half(q)?.broadcast_mul(&sin)?)?;
let k_embed = k
.broadcast_mul(&cos)?
.add(&rotate_half(k)?.broadcast_mul(&sin)?)?;
Ok((q_embed, k_embed))
}
pub fn apply_rotary_pos_emb_vision(
q: &Tensor,
k: &Tensor,
cos: &Tensor,
sin: &Tensor,
) -> Result<(Tensor, Tensor)> {
// q, k -> (seq_len, num_heads, head_dim)
// cos, sin -> (seq_len, head_dim) -> (seq_len, 1, head_dim)
let cos = cos.unsqueeze(D::Minus2)?;
let sin = sin.unsqueeze(D::Minus2)?;
let cos = cos.to_dtype(q.dtype())?;
let sin = sin.to_dtype(q.dtype())?;
let q_embed = q
.broadcast_mul(&cos)?
.add(&rotate_half(q)?.broadcast_mul(&sin)?)?;
let k_embed = k
.broadcast_mul(&cos)?
.add(&rotate_half(k)?.broadcast_mul(&sin)?)?;
Ok((q_embed, k_embed))
}
pub fn apply_rotary_pos_emb(
q: &Tensor,
k: &Tensor,
cos: &Tensor,
sin: &Tensor,
tof32: bool,
) -> Result<(Tensor, Tensor)> {
// sin/cos: to (bs, 1, seq_len, head_dim)
// q/k: (bs, n_head, seq_len, head_dim)
let mut cos = cos.clone();
let mut sin = sin.clone();
if cos.rank() == 2 {
// (seq_len, head_dim) -> (1, 1, seq_len, head_dim)
cos = cos.unsqueeze(0)?.unsqueeze(0)?;
sin = sin.unsqueeze(0)?.unsqueeze(0)?;
}
if cos.rank() == 3 {
// (bs, seq_len, head_dim) -> (bs, 1, seq_len, head_dim)
cos = cos.unsqueeze(1)?;
sin = sin.unsqueeze(1)?;
}
let orig_dtype = q.dtype();
let q = if tof32 { &q.to_dtype(DType::F32)? } else { q };
let k = if tof32 { &k.to_dtype(DType::F32)? } else { k };
let cos = cos.to_dtype(q.dtype())?;
let sin = sin.to_dtype(q.dtype())?;
let q_embed = q
.broadcast_mul(&cos)?
.add(&rotate_half(q)?.broadcast_mul(&sin)?)?
.to_dtype(orig_dtype)?;
let k_embed = k
.broadcast_mul(&cos)?
.add(&rotate_half(k)?.broadcast_mul(&sin)?)?
.to_dtype(orig_dtype)?;
Ok((q_embed, k_embed))
}
#[derive(Debug, Clone)]
pub struct Qwen2_5VLTextRotaryEmbedding {
inv_freq: Vec<f32>,
}
impl Qwen2_5VLTextRotaryEmbedding {
pub fn new(dim: usize, theta_base: f32) -> Self {
let inv_freq = compute_default_rope_parameters(dim, theta_base);
Self { inv_freq }
}
pub fn forward(
&self,
position_ids: &Tensor,
dtype: DType,
mrope_section: Vec<usize>,
) -> Result<(Tensor, Tensor)> {
// position_ids shape: (3, bs, position) -> (3, bs, 1, position)
let position_ids_expanded = position_ids
.unsqueeze(D::Minus2)?
.to_dtype(DType::F32)?
.contiguous()?;
// inv_freq Vec<f32> -> Tensor(1, 1, head_dim / 2, 1) -> (3, bs, head_dim / 2, 1)
let inv_freq_expanded = Tensor::from_vec(
self.inv_freq.clone(),
(1, 1, self.inv_freq.len(), 1),
position_ids.device(),
)?
.broadcast_as((3, position_ids.dim(1)?, self.inv_freq.len(), 1))?
.to_dtype(DType::F32)?
.contiguous()?;
// (3, bs, head_dim / 2, 1) matmul (3, bs, 1, position)
// -> (3, bs, head_dim / 2, seq_len) -> (3, bs, seq_len, head_dim / 2)
let freqs = inv_freq_expanded
.matmul(&position_ids_expanded)?
.transpose(2, 3)?;
// let freqs = position_ids_expanded.matmul(&inv_freq_expanded)?;
// (3, bs, seq_len, head_dim / 2) -> (3, bs, seq_len, head_dim)
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?.contiguous()?;
let cos = emb.cos()?;
let sin = emb.sin()?;
let mrope_section = mrope_section.repeat(2);
let cos_select: Vec<Tensor> = cos
.split(&mrope_section, D::Minus1)?
.iter()
.enumerate()
.map(|(i, m)| m.i(i % 3).unwrap())
.collect();
// (bs, seq_len, head_dim) -> (bs, 1, seq_len, head_dim)
let cos = Tensor::cat(&cos_select, D::Minus1)?
.unsqueeze(1)?
.contiguous()?;
let sin_select: Vec<Tensor> = sin
.split(&mrope_section, D::Minus1)?
.iter()
.enumerate()
.map(|(i, m)| m.i(i % 3).unwrap())
.collect();
// (bs, seq_len, head_dim) -> (bs, 1, seq_len, head_dim)
let sin = Tensor::cat(&sin_select, D::Minus1)?
.unsqueeze(1)?
.contiguous()?;
Ok((cos.to_dtype(dtype)?, sin.to_dtype(dtype)?))
}
}
#[derive(Debug, Clone)]
pub struct Qwen2_5VisionRotaryEmbedding {
inv_freq: Vec<f32>,
}
impl Qwen2_5VisionRotaryEmbedding {
pub fn new(dim: usize, theta_base: Option<f32>) -> Self {
let theta_base = theta_base.unwrap_or(10000.0_f32);
let inv_freq = compute_default_rope_parameters(dim, theta_base);
Self { inv_freq }
}
pub fn forward(&self, seqlen: usize, device: &Device) -> Result<Tensor> {
let seq = Tensor::arange(0.0_f32, seqlen as f32, device)?.reshape((seqlen, 1))?;
let inv_freq = Tensor::from_vec(self.inv_freq.clone(), (1, self.inv_freq.len()), device)?;
let freqs = seq.matmul(&inv_freq)?;
Ok(freqs)
}
}
#[derive(Debug, Clone)]
pub struct Qwen3VLTextRotaryEmbedding {
inv_freq: Vec<f32>,
}
impl Qwen3VLTextRotaryEmbedding {
pub fn new(dim: usize, theta_base: f32) -> Self {
let inv_freq = compute_default_rope_parameters(dim, theta_base);
Self { inv_freq }
}
pub fn apply_interleaved_mrope(
&self,
freqs: &Tensor,
mrope_section: Vec<usize>,
) -> Result<Tensor> {
let mut freqs_t = freqs.i(0)?.contiguous()?; //(3, bs, seq_len, head_dim //2) -> (bs, seq_len, head_dim //2)
// for dim in 1..3 {
for (dim, section) in mrope_section.iter().enumerate().skip(1) {
// let length = mrope_section[dim] * 3;
let length = section * 3;
let idx = Tensor::arange_step(dim as u32, length as u32, 3, freqs.device())?;
let src = freqs.i(dim)?.contiguous()?; // (bs, seq_len, head_dim //2)
let src = src.index_select(&idx, D::Minus1)?.contiguous()?;
let idx = idx
.unsqueeze(0)?
.unsqueeze(0)?
.broadcast_as(src.shape())?
.contiguous()?;
freqs_t = freqs_t.scatter(&idx, &src, D::Minus1)?;
}
Ok(freqs_t)
}
pub fn forward(
&self,
position_ids: &Tensor,
dtype: DType,
mrope_section: Vec<usize>,
) -> Result<(Tensor, Tensor)> {
// position_ids shape: (3, bs, position) -> (3, bs, 1, position)
let position_ids = if position_ids.rank() == 2 {
let (bs, len) = position_ids.dims2()?;
position_ids.unsqueeze(0)?.expand((3, bs, len))?
} else {
position_ids.clone()
};
let position_ids_expanded = position_ids
.unsqueeze(D::Minus2)?
.to_dtype(DType::F32)?
.contiguous()?;
// inv_freq Vec<f32> -> Tensor(1, 1, head_dim / 2, 1) -> (3, bs, head_dim / 2, 1)
let inv_freq_expanded = Tensor::from_vec(
self.inv_freq.clone(),
(1, 1, self.inv_freq.len(), 1),
position_ids.device(),
)?
.broadcast_as((3, position_ids.dim(1)?, self.inv_freq.len(), 1))?
.to_dtype(DType::F32)?
.contiguous()?;
// (3, bs, head_dim / 2, 1) matmul (3, bs, 1, position)
// -> (3, bs, head_dim / 2, seq_len) -> (3, bs, seq_len, head_dim / 2)
let freqs = inv_freq_expanded
.matmul(&position_ids_expanded)?
.transpose(2, 3)?;
let freqs = self.apply_interleaved_mrope(&freqs, mrope_section)?;
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?.contiguous()?;
let cos = emb.cos()?;
let sin = emb.sin()?;
Ok((cos.to_dtype(dtype)?, sin.to_dtype(dtype)?))
}
}
pub struct RoPE {
inv_freq: Tensor, // (1, dim / 2)
}
impl RoPE {
pub fn new(dim: usize, theta_base: f32, device: &Device) -> Result<Self> {
let inv_freq = compute_default_rope_parameters(dim, theta_base);
let inv_freq = Tensor::from_slice(&inv_freq, (1, inv_freq.len()), device)?;
Ok(Self { inv_freq })
}
pub fn forward(
&self,
seqlen_offset: usize,
seq_len: usize,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let positions = Tensor::arange(
seqlen_offset as f32,
(seqlen_offset + seq_len) as f32,
device,
)?
.reshape((seq_len, 1))?; // (seq_len, 1)
let freqs = positions.matmul(&self.inv_freq)?; // (seq_len, dim / 2)
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?.contiguous()?; // (seq_len, dim)
let cos = emb.cos()?;
let sin = emb.sin()?;
Ok((cos, sin))
}
}
pub fn get_xd_cos_sin(
cos: &Tensor,
sin: &Tensor,
position_ids: &Tensor,
xdrope_section: Vec<usize>,
) -> Result<(Tensor, Tensor)> {
let x_dim = xdrope_section.len();
// position_ids: (bs, 4, seq_len)
let mut cos_vec = vec![];
let mut sin_vec = vec![];
let bs = position_ids.dim(0)?;
for i in 0..bs {
let pos_i = position_ids.i(i)?;
let cos_i = index_select_2d(cos, &pos_i)?;
let sin_i = index_select_2d(sin, &pos_i)?;
cos_vec.push(cos_i);
sin_vec.push(sin_i);
}
// (bs, 4, seq_len, dim) -> (bs, seq_len, 4, dim)
let cos = Tensor::stack(&cos_vec, 0)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let sin = Tensor::stack(&sin_vec, 0)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let xdrope_section: Vec<usize> = xdrope_section.iter().map(|&i| i * 2).collect();
let cos_select: Vec<Tensor> = split_tensor(&cos, &xdrope_section, D::Minus1)?
.iter()
.enumerate()
.map(|(i, m)| m.i((.., .., i % x_dim)).unwrap())
.collect();
let sin_select: Vec<Tensor> = split_tensor(&sin, &xdrope_section, D::Minus1)?
.iter()
.enumerate()
.map(|(i, m)| m.i((.., .., i % x_dim)).unwrap())
.collect();
let cos = Tensor::cat(&cos_select, D::Minus1)?;
let sin = Tensor::cat(&sin_select, D::Minus1)?;
Ok((cos, sin))
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/src/position_embed/mod.rs | src/position_embed/mod.rs | pub mod rope;
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/messy_test.rs | tests/messy_test.rs | use anyhow::Result;
use candle_core::Tensor;
#[test]
fn messy_test() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda messy_test -r -- --nocapture
let device = &candle_core::Device::Cpu;
// let path = get_default_save_dir();
let x = Tensor::arange(0.0, 9.0, device)?;
println!("x: {}", x);
// let x = x
// .unsqueeze(0)?
// .unsqueeze(0)?
// .broadcast_as((5, 5, 9))?
// .reshape((5, 5, 3, 3))?;
// println!("x: {}", x);
// let x = x.permute((0, 2, 1, 3))?;
// println!("x: {}", x);
// let x = x.reshape((15, 15))?;
// println!("x: {}", x);
// let xs = Tensor::rand(0.0, 5.0, (1, 1, 3, 3), device)?;
// println!("xs: {}", xs);
// let xs = xs.pad_with_zeros(3, 2, 2)?
// .pad_with_zeros(2, 2, 2)?;
// println!("xs: {}", xs);
// let xs = Tensor::arange(0.0, 25.0, device)?;
// println!("xs: {}", xs);
// let splits = split_tensor_with_size(&xs, 5, 0)?;
// for v in splits {
// println!("v: {}", v);
// }
// let xs = Tensor::arange(0.0, 25.0, device)?.broadcast_as((1, 1, 5, 5))?;
// println!("xs: {}", xs);
// let xs = xs.avg_pool2d(5)?;
// println!("xs: {}", xs);
// let xs = Tensor::rand(0.0, 1.0, (1, 4, 4, 2), device)?;
// println!("xs: {}", xs);
// let shape = Shape::from_dims(&[1, 2, 2, 2, 2, 2]);
// let xs = xs.reshape(shape)?;
// println!("xs: {}", xs);
// let x0 = xs.i((.., .., 0, .., 0, ..))?;
// let x1 = xs.i((.., .., 1, .., 0, ..))?;
// let x2 = xs.i((.., .., 0, .., 1, ..))?;
// let x3 = xs.i((.., .., 1, .., 1, ..))?;
// let xs = Tensor::cat(&[x0, x1, x2, x3], D::Minus1)?;
// println!("xs: {}", xs);
// let xs = xs.reshape((1, (), 4 * 2))?;
// println!("xs: {}", xs);
// let path_str = "file://./assets/img/ocr_test1.png";
// let path = url::Url::from_str(path_str)?;
// let path = path.to_file_path();
// let path = match path {
// Ok(path) => path,
// Err(_) => {
// let mut path = path_str.to_owned();
// path = path.split_off(7);
// PathBuf::from(path)
// }
// };
// println!("to file path: {:?}", path);
// let device = &candle_core::Device::Cpu;
// let t = Tensor::arange(0.0f32, 40.0, device)?.broadcast_as((1, 1, 40, 40))?;
// println!("t: {}", t);
// let i_start = Instant::now();
// let t_inter = interpolate_bilinear(&t, (20, 20), Some(false))?;
// let i_duration = i_start.elapsed();
// println!("Time elapsed in interpolate_bilinear is: {:?}", i_duration);
// println!("t_inter: {}", t_inter);
// let x: Vec<u32> = (0..5).flat_map(|_| 0u32..10).collect();
// let id: Vec<u32> = (0..5).flat_map(|h| vec![h; 10]).collect();
// println!("x: {:?}", id);
// let t = Tensor::randn(0.0f32, 1.0, (1, 768, 64, 64), device)?;
// let t = Tensor::arange(0u32, 10, device)?.broadcast_as((1, 10))?;
// let eq = t.broadcast_eq(&Tensor::new(5u32, device)?)?;
// println!("eq: {}", eq);
// let t = Tensor::arange(0.0f32, 10.0, device)?.broadcast_as((1, 1, 10, 10))?;
// println!("t: {}", t);
// let t_resized = interpolate_bicubic(&t, (5, 5), Some(true), Some(false))?;
// println!("t_resized: {}", t_resized);
// let t1 = Tensor::rand(0.0, 1.0, (1, 5, 5, 10), device)?;
// let t2 = Tensor::rand(0.0, 1.0, (5, 8, 10), device)?;
// let t2 = t2.t()?;
// println!("t2: {:?}", t2);
// let re = t1.broadcast_matmul(&t2)?;
// println!("re: {:?}", re);
// let index = Tensor::arange(0u32, 10u32, device)?;
// let index_2d_vec = vec![index;5];
// let index_2d = Tensor::stack(&index_2d_vec, 0)?;
// println!("index_2d: {}", index_2d);
// let t = Tensor::rand(0.0, 1.0, (20, 8), device)?;
// println!("t: {}", t);
// let res = index_select_2d(&t, &index_2d)?;
// println!("res: {}", res);
// let t = Tensor::arange(0.0, 10.0, device)?
// .unsqueeze(0)?
// .unsqueeze(0)?;
// println!("t: {}", t);
// let t_resized = interpolate_linear(&t, 20, None)?;
// println!("t_resized: {}", t_resized);
// let grid_thw = Tensor::new(vec![vec![3u32, 12, 20], vec![5, 30, 25]], device)?;
// let cu_seqlens = grid_thw.i((.., 1))?.mul(&grid_thw.i((.., 2))?)?;
// let grid_t = grid_thw.i((.., 0))?.to_vec1::<u32>()?;
// println!("cu_seqlens: {}", cu_seqlens);
// println!("cu_seqlens rank: {}", cu_seqlens.rank());
// println!("grid_t: {:?}", grid_t);
// let image_mask = Tensor::new(vec![0u32, 0, 0, 1, 0, 1], device)?;
// let video_mask = Tensor::new(vec![0u32, 1, 0, 1, 0, 1], device)?;
// let visual_mask = bitor_tensor(&image_mask, &video_mask)?;
// println!("visual_mask: {}", visual_mask);
// let x = Tensor::arange_step(0.0_f32, 5., 0.5, &device)?;
// let x_int = x.to_dtype(candle_core::DType::U32)?;
// println!("x: {}", x);
// println!("x_int: {}", x_int);
// let x_affine = x_int.affine(1.0, 1.0)?;
// println!("x_affine: {}", x_affine);
// let x_clamp = x_affine.clamp(0u32, 3u32)?;
// println!("x_clamp: {}", x_clamp);
// let wav_path = "./assets/audio/voice_01.wav";
// let audio_tensor = load_audio_with_resample(wav_path, device, Some(16000))?;
// println!("audio_tensor: {}", audio_tensor);
// let string = "你好啊".to_string();
// let vec_str: Vec<String>= string.chars().map(|c| c.to_string()).collect();
// println!("vec_str: {:?}", vec_str);
// let t = Tensor::rand(-1.0, 1.0, (2, 2), &device)?;
// println!("t: {}", t);
// let re_t = t.recip()?;
// println!("re_t: {}", re_t);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_paddleocr_vl.rs | tests/test_paddleocr_vl.rs | use std::{pin::pin, time::Instant};
use aha::models::{GenerateModel, paddleocr_vl::generate::PaddleOCRVLGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use rocket::futures::StreamExt;
#[test]
fn paddleocr_vl_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda paddleocr_vl_generate -r -- --nocapture
let message = r#"
{
"model": "paddleocr_vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "OCR:"
}
]
}
],
"stream": false
}
"#;
let model_path = "/home/jhq/huggingface_model/PaddlePaddle/PaddleOCR-VL/";
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = PaddleOCRVLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let res = model.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", res);
if res.usage.is_some() {
let num_token = res.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[tokio::test]
async fn paddleocr_vl_stream() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda paddleocr_vl_stream -r -- --nocapture
let message = r#"
{
"model": "paddleocr_vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "OCR:"
}
]
}
]
}
"#;
let model_path = "/home/jhq/huggingface_model/PaddlePaddle/PaddleOCR-VL/";
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = PaddleOCRVLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let mut stream = pin!(model.generate_stream(mes)?);
while let Some(item) = stream.next().await {
println!("generate: \n {:?}", item);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_minicpm4.rs | tests/test_minicpm4.rs | use std::{pin::pin, time::Instant};
use aha::models::{GenerateModel, minicpm4::generate::MiniCPMGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use rocket::futures::StreamExt;
#[test]
fn minicpm_generate() -> Result<()> {
// test with cpu :(太慢了, : RUST_BACKTRACE=1 cargo test minicpm_generate -r -- --nocapture
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda minicpm_generate -r -- --nocapture
// test with cuda+flash-attn: RUST_BACKTRACE=1 cargo test -F cuda,flash-attn minicpm_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/MiniCPM4-0.5B/";
let message = r#"
{
"temperature": 0.3,
"top_p": 0.8,
"model": "minicpm4",
"messages": [
{
"role": "user",
"content": "你吃饭了没"
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = MiniCPMGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let result = model.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", result);
if result.usage.is_some() {
let num_token = result.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[tokio::test]
async fn minicpm_stream() -> Result<()> {
// test with cuda+flash-attn: RUST_BACKTRACE=1 cargo test -F cuda,flash-attn minicpm_stream -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/MiniCPM4-0.5B/";
let message = r#"
{
"model": "minicpm4",
"messages": [
{
"role": "user",
"content": "你是谁"
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = MiniCPMGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let mut stream = pin!(model.generate_stream(mes)?);
while let Some(item) = stream.next().await {
println!("generate: \n {:?}", item);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/config_tests.rs | tests/config_tests.rs | use aha::models::{
deepseek_ocr::config::DeepseekOCRConfig, hunyuan_ocr::config::HunYuanVLConfig,
minicpm4::config::MiniCPM4Config, paddleocr_vl::config::PaddleOCRVLConfig,
qwen2_5vl::config::Qwen2_5VLConfig, qwen3vl::config::Qwen3VLConfig,
voxcpm::config::VoxCPMConfig,
};
use anyhow::Result;
#[test]
fn qwen2_5_vl_config() -> Result<()> {
// cargo test -F cuda,flash-attn qwen2_5vl_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen2.5-VL-3B-Instruct/";
let config_path = model_path.to_string() + "/config.json";
let config: Qwen2_5VLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn minicpm4_config() -> Result<()> {
// cargo test -F cuda,flash-attn minicpm4_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/MiniCPM4-0.5B/";
let config_path = model_path.to_string() + "/config.json";
let config: MiniCPM4Config = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn voxcpm_config() -> Result<()> {
// cargo test -F cuda,flash-attn voxcpm_config -r -- --nocapture
// cargo test -F cuda voxcpm_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/openbmb/VoxCPM-0.5B/";
let config_path = model_path.to_string() + "/config.json";
let config: VoxCPMConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn voxcpm1_5_config() -> Result<()> {
// cargo test -F cuda voxcpm1_5_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/VoxCPM1.5/";
let config_path = model_path.to_string() + "/config.json";
let config: VoxCPMConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn qwen3vl_config() -> Result<()> {
// cargo test -F cuda qwen3vl_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen3-VL-4B-Instruct/";
let config_path = model_path.to_string() + "/config.json";
let config: Qwen3VLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn deepseek_ocr_config() -> Result<()> {
// cargo test -F cuda deepseek_ocr_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/deepseek-ai/DeepSeek-OCR/";
let config_path = model_path.to_string() + "/config.json";
let config: DeepseekOCRConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn hunyuan_ocr_config() -> Result<()> {
// cargo test -F cuda hunyuan_ocr_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/Tencent-Hunyuan/HunyuanOCR/";
let config_path = model_path.to_string() + "/config.json";
let config: HunYuanVLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
#[test]
fn paddleocr_vl_config() -> Result<()> {
// cargo test -F cuda paddleocr_vl_config -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/PaddlePaddle/PaddleOCR-VL/";
let config_path = model_path.to_string() + "/config.json";
let config: PaddleOCRVLConfig = serde_json::from_slice(&std::fs::read(config_path)?)?;
println!("{:?}", config);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_qwen2_5vl.rs | tests/test_qwen2_5vl.rs | use std::{pin::pin, time::Instant};
use aha::models::{GenerateModel, qwen2_5vl::generate::Qwen2_5VLGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use rocket::futures::StreamExt;
#[test]
fn qwen2_5vl_generate() -> Result<()> {
// test with cpu :(太慢了, : RUST_BACKTRACE=1 cargo test qwen2_5vl_generate -r -- --nocapture
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda qwen2_5vl_generate -r -- --nocapture
// test with cuda+flash-attn: RUST_BACKTRACE=1 cargo test -F cuda,flash-attn qwen2_5vl_generate -r -- --nocapture
// let device = Device::cuda_if_available(0)?;
// let dtype = DType::BF16;
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen2.5-VL-3B-Instruct/";
let message = r#"
{
"model": "qwen2.5vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "请分析图片并提取所有可见文本内容,按从左到右、从上到下的布局,返回纯文本"
}
]
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = Qwen2_5VLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let result = model.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", result);
if result.usage.is_some() {
let num_token = result.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[tokio::test]
async fn qwen2_5vl_stream() -> Result<()> {
// test with cuda+flash-attn: RUST_BACKTRACE=1 cargo test -F cuda,flash-attn qwen2_5vl_stream -r -- --nocapture
// let device = Device::cuda_if_available(0)?;
// let dtype = DType::BF16;
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen2.5-VL-3B-Instruct/";
let message = r#"
{
"model": "qwen2.5vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "请分析图片并提取所有可见文本内容,按从左到右、从上到下的布局,返回纯文本"
}
]
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = Qwen2_5VLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let mut stream = pin!(model.generate_stream(mes)?);
while let Some(item) = stream.next().await {
println!("generate: \n {:?}", item);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_voxcpm.rs | tests/test_voxcpm.rs | use std::time::Instant;
use aha::{
models::{
GenerateModel,
voxcpm::{generate::VoxCPMGenerate, tokenizer::SingleChineseTokenizer},
},
utils::audio_utils::{extract_and_save_audio_from_response, save_wav},
};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::{Ok, Result};
#[test]
fn voxcpm_use_message_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda voxcpm_use_message_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/openbmb/VoxCPM-0.5B/";
let message = r#"
{
"model": "voxcpm",
"messages": [
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url":
{
"url": "https://sis-sample-audio.obs.cn-north-1.myhuaweicloud.com/16k16bit.wav"
}
},
{
"type": "text",
"text": "VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly realistic speech."
}
]
}
],
"metadata": {"prompt_text": "华为致力于把数字世界带给每个人,每个家庭,每个组织,构建万物互联的智能世界。"}
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut voxcpm_generate = VoxCPMGenerate::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let generate = voxcpm_generate.generate(mes)?;
let save_path = extract_and_save_audio_from_response(&generate, "./")?;
for path in save_path {
println!("save audio: {}", path);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
// save_wav(&generate, "voxcpm.wav", 16000)?;
Ok(())
}
#[test]
fn voxcpm_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda voxcpm_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/openbmb/VoxCPM-0.5B/";
let i_start = Instant::now();
let mut voxcpm_generate = VoxCPMGenerate::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
// let generate = voxcpm_generate.generate_simple("太阳当空照,花儿对我笑,小鸟说早早早".to_string())?;
let generate = voxcpm_generate.inference(
"VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly realistic speech.".to_string(),
Some("啥子小师叔,打狗还要看主人,你再要继续,我,就是你的对手".to_string()),
Some("file://./assets/audio/voice_01.wav".to_string()),
// Some("一定被灰太狼给吃了,我已经为他准备好了花圈了".to_string()),
// Some("file://./assets/audio/voice_05.wav".to_string()),
2,
100,
10,
2.0,
// false,
6.0,
)?;
// 创建prompt_cache
// let _ = voxcpm_generate.build_prompt_cache(
// "啥子小师叔,打狗还要看主人,你再要继续,我,就是你的对手".to_string(),
// "file://./assets/audio/voice_01.wav".to_string(),
// )?;
// // 使用prompt_cache生成语音
// let generate = voxcpm_generate.generate_use_prompt_cache(
// "太阳当空照,花儿对我笑,小鸟说早早早".to_string(),
// 2,
// 100,
// 10,
// 2.0,
// false,
// 6.0,
// )?;
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
save_wav(&generate, "voxcpm.wav", 16000)?;
Ok(())
}
#[test]
fn voxcpm_tokenizer() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/openbmb/VoxCPM-0.5B/";
let tokenizer = SingleChineseTokenizer::new(model_path)?;
let ids = tokenizer.encode("你好啊,你吃饭了吗".to_string())?;
println!("ids: {:?}", ids);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_qwen3vl.rs | tests/test_qwen3vl.rs | use std::{pin::pin, time::Instant};
use aha::models::{GenerateModel, qwen3vl::generate::Qwen3VLGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use rocket::futures::StreamExt;
#[test]
fn qwen3vl_generate() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda,ffmpeg qwen3vl_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen3-VL-2B-Instruct/";
let message = r#"
{
"model": "qwen3vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "video",
"video_url":
{
"url": "./assets/video/video_test.mp4"
}
},
{
"type": "text",
"text": "视频中发生了什么?, 现在几点了"
}
]
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut qwen3vl = Qwen3VLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let res = qwen3vl.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", res);
if res.usage.is_some() {
let num_token = res.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[tokio::test]
async fn qwen3vl_stream() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda,ffmpeg qwen3vl_stream -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen3-VL-2B-Instruct/";
let message = r#"
{
"model": "qwen3vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "video",
"video_url":
{
"url": "./assets/video/video_test.mp4"
}
},
{
"type": "text",
"text": "视频中发生了什么?"
}
]
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut qwen3vl = Qwen3VLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let mut stream = pin!(qwen3vl.generate_stream(mes)?);
while let Some(item) = stream.next().await {
println!("generate: \n {:?}", item);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/weight_test.rs | tests/weight_test.rs | use std::collections::HashMap;
use aha::utils::{find_type_files, get_device};
use anyhow::Result;
use candle_core::{Device, pickle::read_all_with_key, safetensors};
use candle_nn::VarBuilder;
#[test]
fn minicpm4_weight() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/OpenBMB/MiniCPM4-0.5B/";
let model_list = find_type_files(model_path, "safetensors")?;
let device = Device::Cpu;
for m in model_list {
let weights = safetensors::load(m, &device)?;
for (key, tensor) in weights.iter() {
println!("=== {} ===", key);
println!("Shape: {:?}", tensor.shape());
println!("DType: {:?}", tensor.dtype());
}
}
Ok(())
}
#[test]
fn voxcpm_weight() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/openbmb/VoxCPM-0.5B/";
let model_list = find_type_files(model_path, "pth")?;
println!("model_list: {:?}", model_list);
let dev = get_device(None);
let mut dict_to_hashmap = HashMap::new();
let mut dtype = candle_core::DType::F16;
for m in model_list {
let dict = read_all_with_key(m, Some("state_dict"))?;
dtype = dict[0].1.dtype();
for (k, v) in dict {
println!("key: {}, tensor shape: {:?}", k, v);
dict_to_hashmap.insert(k, v);
}
}
let vb = VarBuilder::from_tensors(dict_to_hashmap, dtype, &dev);
let contain_key = vb.contains_tensor("encoder.block.4.block.2.block.3.weight_g");
println!(
"contain encoder.block.4.block.2.block.3.weight_g: {}",
contain_key
);
Ok(())
}
#[test]
fn voxcpm1_5_weight() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/OpenBMB/VoxCPM1.5/";
let model_list = find_type_files(model_path, "pth")?;
println!("model_list: {:?}", model_list);
// let dev = get_device(None);
let mut dict_to_hashmap = HashMap::new();
// let mut dtype = candle_core::DType::F32;
for m in model_list {
let dict = read_all_with_key(m, Some("state_dict"))?;
// dtype = dict[0].1.dtype();
for (k, v) in dict {
println!("key: {}, tensor shape: {:?}", k, v);
dict_to_hashmap.insert(k, v);
}
}
Ok(())
}
#[test]
fn qwen3vl_weight() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/Qwen/Qwen3-VL-4B-Instruct/";
let model_list = find_type_files(model_path, "safetensors")?;
let device = Device::Cpu;
for m in &model_list {
let weights = safetensors::load(m, &device)?;
for (key, tensor) in weights.iter() {
println!("=== {} === {:?}", key, tensor.shape());
}
}
println!("model_list: {:?}", model_list);
Ok(())
}
#[test]
fn deepseekocr_weight() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/deepseek-ai/DeepSeek-OCR/";
let model_list = find_type_files(model_path, "safetensors")?;
let device = Device::Cpu;
for m in &model_list {
let weights = safetensors::load(m, &device)?;
for (key, tensor) in weights.iter() {
if key.contains("rel_pos_h") {
println!("=== {} === {:?}", key, tensor.shape());
}
// println!("=== {} === {:?}", key, tensor.shape());
}
}
println!("model_list: {:?}", model_list);
Ok(())
}
#[test]
fn hunyuanocr_weight() -> Result<()> {
let model_path = "/home/jhq/huggingface_model/Tencent-Hunyuan/HunyuanOCR/";
let model_list = find_type_files(model_path, "safetensors")?;
let device = Device::Cpu;
for m in &model_list {
let weights = safetensors::load(m, &device)?;
for (key, tensor) in weights.iter() {
if key.contains(".image_") {
println!("=== {} === {:?}", key, tensor.shape());
}
// println!("=== {} === {:?}", key, tensor.shape());
}
}
println!("model_list: {:?}", model_list);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_rmbg2_0.rs | tests/test_rmbg2_0.rs | use std::time::Instant;
use aha::models::rmbg2_0::generate::RMBG2_0Model;
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
#[test]
fn rmbg2_0_generate() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda rmbg2_0_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/AI-ModelScope/RMBG-2.0/";
let message = r#"
{
"model": "rmbg2.0",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/gougou.jpg"
}
}
]
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let model = RMBG2_0Model::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let result = model.inference(mes)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
for (i, img) in result.iter().enumerate() {
let _ = img.save(format!("rmbg_{i}.png"));
}
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_voxcpm1_5.rs | tests/test_voxcpm1_5.rs | use std::time::Instant;
use aha::{
models::{
GenerateModel,
voxcpm::{generate::VoxCPMGenerate, tokenizer::SingleChineseTokenizer},
},
utils::audio_utils::{extract_and_save_audio_from_response, save_wav},
};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::{Ok, Result};
#[test]
fn voxcpm1_5_use_message_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda voxcpm1_5_use_message_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/VoxCPM1.5/";
let message = r#"
{
"model": "voxcpm1.5",
"messages": [
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url":
{
"url": "https://sis-sample-audio.obs.cn-north-1.myhuaweicloud.com/16k16bit.wav"
}
},
{
"type": "text",
"text": "VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly realistic speech."
}
]
}
],
"metadata": {"prompt_text": "华为致力于把数字世界带给每个人,每个家庭,每个组织,构建万物互联的智能世界。"}
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut voxcpm_generate = VoxCPMGenerate::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let generate = voxcpm_generate.generate(mes)?;
let save_path = extract_and_save_audio_from_response(&generate, "./")?;
for path in save_path {
println!("save audio: {}", path);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[test]
fn voxcpm1_5_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda voxcpm1_5_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/VoxCPM1.5/";
let i_start = Instant::now();
let mut voxcpm_generate = VoxCPMGenerate::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
// let generate = voxcpm_generate.generate_simple("太阳当空照,花儿对我笑,小鸟说早早早".to_string())?;
let generate = voxcpm_generate.inference(
"VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly realistic speech.".to_string(),
Some("啥子小师叔,打狗还要看主人,你再要继续,我就是你的对手".to_string()),
Some("file://./assets/audio/voice_01.wav".to_string()),
// Some("一定被灰太狼给吃了,我已经为他准备好了花圈了".to_string()),
// Some("file://./assets/audio/voice_05.wav".to_string()),
2,
4096,
10,
2.0,
// false,
6.0,
)?;
// 创建prompt_cache
// let _ = voxcpm_generate.build_prompt_cache(
// "啥子小师叔,打狗还要看主人,你再要继续,我,就是你的对手".to_string(),
// "file://./assets/audio/voice_01.wav".to_string(),
// )?;
// // 使用prompt_cache生成语音
// let generate = voxcpm_generate.generate_use_prompt_cache(
// "太阳当空照,花儿对我笑,小鸟说早早早".to_string(),
// 2,
// 100,
// 10,
// 2.0,
// false,
// 6.0,
// )?;
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
save_wav(&generate, "voxcpm1_5.wav", 44100)?;
Ok(())
}
#[test]
fn voxcpm1_5_tokenizer() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda voxcpm1_5_tokenizer -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/OpenBMB/VoxCPM1.5/";
let tokenizer = SingleChineseTokenizer::new(model_path)?;
let ids = tokenizer.encode("你好啊,你吃饭了吗".to_string())?;
println!("ids: {:?}", ids);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_deepseek_ocr.rs | tests/test_deepseek_ocr.rs | use std::{pin::pin, time::Instant};
use aha::models::{GenerateModel, deepseek_ocr::generate::DeepseekOCRGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use rocket::futures::StreamExt;
#[test]
fn deepseek_ocr_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda deepseek_ocr_generate -r -- --nocapture
let message = r#"
{
"model": "deepseek-ocr",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "<image>\nConvert the document to markdown. "
}
]
}
],
"metadata": {"base_size": "640", "image_size": "640", "crop_mode": "false"}
}
"#;
let model_path = "/home/jhq/huggingface_model/deepseek-ai/DeepSeek-OCR/";
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = DeepseekOCRGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let res = model.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", res);
if res.usage.is_some() {
let num_token = res.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[tokio::test]
async fn deepseek_ocr_stream() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda deepseek_ocr_stream -r -- --nocapture
let message = r#"
{
"model": "deepseek-ocr",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "<image>\n<|grounding|>Convert the document to markdown. "
}
]
},
{
"role": "assistant",
"content": ""
}
],
"metadata": {"base_size": "640", "image_size": "640", "crop_mode": "false"}
}
"#;
let model_path = "/home/jhq/huggingface_model/deepseek-ai/DeepSeek-OCR/";
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = DeepseekOCRGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let mut stream = pin!(model.generate_stream(mes)?);
while let Some(item) = stream.next().await {
println!("generate: \n {:?}", item);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_hunyuan_ocr.rs | tests/test_hunyuan_ocr.rs | use std::{pin::pin, time::Instant};
use aha::models::{GenerateModel, hunyuan_ocr::generate::HunyuanOCRGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
use rocket::futures::StreamExt;
#[test]
fn hunyuan_ocr_generate() -> Result<()> {
// RUST_BACKTRACE=1 cargo test -F cuda hunyuan_ocr_generate -r -- --nocapture
let message = r#"
{
"model": "hunyuan-ocr",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "识别图片中的文字"
}
]
}
]
}
"#;
let model_path = "/home/jhq/huggingface_model/Tencent-Hunyuan/HunyuanOCR/";
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = HunyuanOCRGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let res = model.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", res);
if res.usage.is_some() {
let num_token = res.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
#[tokio::test]
async fn hunyuan_ocr_stream() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda hunyuan_ocr_stream -r -- --nocapture
let message = r#"
{
"model": "hunyuan-ocr",
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"image_url":
{
"url": "file://./assets/img/ocr_test1.png"
}
},
{
"type": "text",
"text": "检测并识别图片中的文字,将文本坐标格式化输出。"
}
]
}
]
}
"#;
let model_path = "/home/jhq/huggingface_model/Tencent-Hunyuan/HunyuanOCR/";
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = HunyuanOCRGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let mut stream = pin!(model.generate_stream(mes)?);
while let Some(item) = stream.next().await {
println!("generate: \n {:?}", item);
}
let i_duration = i_start.elapsed();
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_robo_brain.rs | tests/test_robo_brain.rs | use std::time::Instant;
use aha::models::{GenerateModel, qwen2_5vl::generate::Qwen2_5VLGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
#[test]
fn robo_brain_generate() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda robo_brain_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/BAAI/RoboBrain2.0-3B/";
let message = r#"
{
"model": "qwen2.5vl",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "hello RoboBrain"
}
]
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = Qwen2_5VLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let result = model.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", result);
if result.usage.is_some() {
let num_token = result.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_gelab_zero.rs | tests/test_gelab_zero.rs | use std::time::Instant;
use aha::models::{GenerateModel, qwen3vl::generate::Qwen3VLGenerateModel};
use aha_openai_dive::v1::resources::chat::ChatCompletionParameters;
use anyhow::Result;
#[test]
fn gelab_zero_generate() -> Result<()> {
// test with cuda: RUST_BACKTRACE=1 cargo test -F cuda gelab_zero_generate -r -- --nocapture
let model_path = "/home/jhq/huggingface_model/stepfun-ai/GELab-Zero-4B-preview";
let message = r#"
{
"model": "gelab-zero",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Hello, GELab-Zero!, 现在几点了"
}
]
}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_current_time",
"description": "当你想知道现在的时间时非常有用。",
"parameters": {}
}
},
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "当你想查询指定城市的天气时非常有用。",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "城市或县区,比如北京市、杭州市、余杭区等。"
}
},
"required": ["location"]
}
}
}
],
"tool_choice": null
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut qwen3vl = Qwen3VLGenerateModel::init(model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let res = qwen3vl.generate(mes)?;
let i_duration = i_start.elapsed();
println!("generate: \n {:?}", res);
if res.usage.is_some() {
let num_token = res.usage.as_ref().unwrap().total_tokens;
let duration_secs = i_duration.as_secs_f64();
let tps = num_token as f64 / duration_secs;
println!("Tokens per second (TPS): {:.2}", tps);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
jhqxxx/aha | https://github.com/jhqxxx/aha/blob/3ba4de8a7dd578b809fa4676f865a438abba91ab/tests/test_rmbg2_0_perf.rs | tests/test_rmbg2_0_perf.rs | use std::time::Instant;
use anyhow::Result;
use image::{ImageReader, Rgba, RgbaImage};
use rayon::prelude::*;
/// 测试像素组合性能对比
#[test]
fn test_pixel_combine_performance() -> Result<()> {
// cargo test test_pixel_combine_performance -r -- --nocapture
let img_path = "./assets/img/gougou.jpg";
let img = ImageReader::open(img_path)?.decode()?;
// 缩小图片以加快测试
let img = img.resize(1024, 1024, image::imageops::FilterType::Nearest);
let width = img.width();
let height = img.height();
let rgb_img = img.to_rgb8();
// 模拟 alpha 通道
let alpha_gray =
image::GrayImage::from_fn(width, height, |x, y| image::Luma([((x + y) % 256) as u8]));
let iterations = 10;
println!("=== 像素组合性能测试 ===");
println!("图片尺寸: {}x{}", width, height);
println!();
// 旧方法:逐像素操作
let start = Instant::now();
for _ in 0..iterations {
let mut rgba_img = RgbaImage::new(width, height);
for (x, y, pixel) in rgb_img.enumerate_pixels() {
let alpha_value = alpha_gray.get_pixel(x, y).0[0];
let rgba_pixel = Rgba([pixel.0[0], pixel.0[1], pixel.0[2], alpha_value]);
rgba_img.put_pixel(x, y, rgba_pixel);
}
std::hint::black_box(&rgba_img);
}
let old_duration = start.elapsed();
println!(
"旧方法(逐像素): {:?}, 平均: {:?}",
old_duration,
old_duration / iterations
);
// 新方法:串行索引赋值
let start = Instant::now();
for _ in 0..iterations {
let rgb_raw = rgb_img.as_raw();
let alpha_raw = alpha_gray.as_raw();
let pixel_count = (width * height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
for i in 0..pixel_count {
let dst = i * 4;
let src = i * 3;
rgba_raw[dst] = rgb_raw[src];
rgba_raw[dst + 1] = rgb_raw[src + 1];
rgba_raw[dst + 2] = rgb_raw[src + 2];
rgba_raw[dst + 3] = alpha_raw[i];
}
let rgba_img = RgbaImage::from_raw(width, height, rgba_raw).unwrap();
std::hint::black_box(&rgba_img);
}
let serial_duration = start.elapsed();
println!(
"新方法(串行索引): {:?}, 平均: {:?}",
serial_duration,
serial_duration / iterations
);
// 新方法:并行分块写入
let start = Instant::now();
for _ in 0..iterations {
let rgb_raw = rgb_img.as_raw();
let alpha_raw = alpha_gray.as_raw();
let pixel_count = (width * height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
rgba_raw
.par_chunks_mut(4)
.enumerate()
.for_each(|(i, chunk)| {
let src = i * 3;
chunk[0] = rgb_raw[src];
chunk[1] = rgb_raw[src + 1];
chunk[2] = rgb_raw[src + 2];
chunk[3] = alpha_raw[i];
});
let rgba_img = RgbaImage::from_raw(width, height, rgba_raw).unwrap();
std::hint::black_box(&rgba_img);
}
let parallel_duration = start.elapsed();
println!(
"新方法(并行): {:?}, 平均: {:?}",
parallel_duration,
parallel_duration / iterations
);
let speedup_serial = old_duration.as_secs_f64() / serial_duration.as_secs_f64();
let speedup_parallel = old_duration.as_secs_f64() / parallel_duration.as_secs_f64();
println!();
println!("串行索引 vs 逐像素: {:.2}x", speedup_serial);
println!("并行 vs 逐像素: {:.2}x", speedup_parallel);
Ok(())
}
/// 测试图片 resize 性能对比(串行 vs 并行)
#[test]
fn test_image_resize_parallel_vs_serial() -> Result<()> {
// cargo test test_image_resize_parallel_vs_serial -r -- --nocapture
let img_path = "./assets/img/gougou.jpg";
let img = ImageReader::open(img_path)?.decode()?;
// 缩小原图以加快测试
let img = img.resize(2048, 2048, image::imageops::FilterType::Nearest);
let num_images = 4;
let imgs: Vec<_> = (0..num_images).map(|_| img.clone()).collect();
let target_h = 1024u32;
let target_w = 1024u32;
let iterations = 10;
println!("=== 图片 Resize 性能测试 ===");
println!("图片数量: {}", num_images);
println!("原始尺寸: {}x{}", img.width(), img.height());
println!("目标尺寸: {}x{}", target_w, target_h);
println!();
// 串行 resize
let start = Instant::now();
for _ in 0..iterations {
let mut results = Vec::with_capacity(num_images);
for img in &imgs {
let resized =
img.resize_exact(target_w, target_h, image::imageops::FilterType::CatmullRom);
results.push(resized);
}
std::hint::black_box(&results);
}
let serial_duration = start.elapsed();
println!(
"串行 resize: {:?}, 平均: {:?}",
serial_duration,
serial_duration / iterations
);
// 并行 resize
let start = Instant::now();
for _ in 0..iterations {
let results: Vec<_> = imgs
.par_iter()
.map(|img| {
img.resize_exact(target_w, target_h, image::imageops::FilterType::CatmullRom)
})
.collect();
std::hint::black_box(&results);
}
let parallel_duration = start.elapsed();
println!(
"并行 resize: {:?}, 平均: {:?}",
parallel_duration,
parallel_duration / iterations
);
let speedup = serial_duration.as_secs_f64() / parallel_duration.as_secs_f64();
println!();
println!("并行 vs 串行: {:.2}x", speedup);
Ok(())
}
/// 测试后处理阶段并行 vs 串行性能(纯图像操作)
#[test]
fn test_postprocess_parallel_vs_serial() -> Result<()> {
// cargo test test_postprocess_parallel_vs_serial -r -- --nocapture
let img_path = "./assets/img/gougou.jpg";
let img = ImageReader::open(img_path)?.decode()?;
// 缩小图片以加快测试
let img = img.resize(1024, 1024, image::imageops::FilterType::Nearest);
let width = img.width();
let height = img.height();
// 模拟多张图片的后处理数据
let num_images = 4;
let rgb_imgs: Vec<_> = (0..num_images).map(|_| img.to_rgb8()).collect();
let alpha_grays: Vec<_> = (0..num_images)
.map(|_| {
image::GrayImage::from_fn(width, height, |x, y| image::Luma([((x + y) % 256) as u8]))
})
.collect();
let iterations = 10;
println!("=== 后处理阶段性能测试(纯图像操作)===");
println!("图片数量: {}", num_images);
println!("图片尺寸: {}x{}", width, height);
println!();
// 串行后处理(for-in 循环)
let start = Instant::now();
for _ in 0..iterations {
let mut results = Vec::with_capacity(num_images);
for i in 0..num_images {
let rgb_raw = rgb_imgs[i].as_raw();
let alpha_raw = alpha_grays[i].as_raw();
let pixel_count = (width * height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
for j in 0..pixel_count {
let dst = j * 4;
let src = j * 3;
rgba_raw[dst] = rgb_raw[src];
rgba_raw[dst + 1] = rgb_raw[src + 1];
rgba_raw[dst + 2] = rgb_raw[src + 2];
rgba_raw[dst + 3] = alpha_raw[j];
}
let rgba_img = RgbaImage::from_raw(width, height, rgba_raw).unwrap();
results.push(rgba_img);
}
std::hint::black_box(&results);
}
let serial_duration = start.elapsed();
println!(
"串行后处理: {:?}, 平均: {:?}",
serial_duration,
serial_duration / iterations
);
// 并行后处理(外层并行 + 内层并行)
let start = Instant::now();
for _ in 0..iterations {
let results: Vec<_> = (0..num_images)
.into_par_iter()
.map(|i| {
let rgb_raw = rgb_imgs[i].as_raw();
let alpha_raw = alpha_grays[i].as_raw();
let pixel_count = (width * height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
rgba_raw
.par_chunks_mut(4)
.enumerate()
.for_each(|(j, chunk)| {
let src = j * 3;
chunk[0] = rgb_raw[src];
chunk[1] = rgb_raw[src + 1];
chunk[2] = rgb_raw[src + 2];
chunk[3] = alpha_raw[j];
});
RgbaImage::from_raw(width, height, rgba_raw).unwrap()
})
.collect();
std::hint::black_box(&results);
}
let parallel_duration = start.elapsed();
println!(
"并行后处理: {:?}, 平均: {:?}",
parallel_duration,
parallel_duration / iterations
);
let speedup = serial_duration.as_secs_f64() / parallel_duration.as_secs_f64();
println!();
println!("后处理性能提升: {:.2}x", speedup);
Ok(())
}
/// 测试完整图像处理流程(resize + 像素合并)串行 vs 并行
#[test]
fn test_full_image_pipeline_parallel_vs_serial() -> Result<()> {
// cargo test test_full_image_pipeline_parallel_vs_serial -r -- --nocapture
let img_path = "./assets/img/gougou.jpg";
let img = ImageReader::open(img_path)?.decode()?;
// 缩小图片以加快测试
let img = img.resize(2048, 2048, image::imageops::FilterType::Nearest);
let orig_width = img.width();
let orig_height = img.height();
let num_images = 4;
let imgs: Vec<_> = (0..num_images).map(|_| img.clone()).collect();
let target_h = 1024u32;
let target_w = 1024u32;
// 模拟 alpha 蒙版
let alpha_grays: Vec<_> = (0..num_images)
.map(|_| {
image::GrayImage::from_fn(orig_width, orig_height, |x, y| {
image::Luma([((x + y) % 256) as u8])
})
})
.collect();
let iterations = 10;
println!("=== 完整图像处理流程性能测试 ===");
println!("图片数量: {}", num_images);
println!("原始尺寸: {}x{}", orig_width, orig_height);
println!("处理尺寸: {}x{}", target_w, target_h);
println!();
// 串行处理流程
let start = Instant::now();
for _ in 0..iterations {
let mut results = Vec::with_capacity(num_images);
for i in 0..num_images {
// 预处理:resize 到模型输入尺寸
let _resized =
imgs[i].resize_exact(target_w, target_h, image::imageops::FilterType::CatmullRom);
// 后处理:合并 RGB 和 alpha
let rgb_img = imgs[i].to_rgb8();
let rgb_raw = rgb_img.as_raw();
let alpha_raw = alpha_grays[i].as_raw();
let pixel_count = (orig_width * orig_height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
for j in 0..pixel_count {
let dst = j * 4;
let src = j * 3;
rgba_raw[dst] = rgb_raw[src];
rgba_raw[dst + 1] = rgb_raw[src + 1];
rgba_raw[dst + 2] = rgb_raw[src + 2];
rgba_raw[dst + 3] = alpha_raw[j];
}
let rgba_img = RgbaImage::from_raw(orig_width, orig_height, rgba_raw).unwrap();
results.push(rgba_img);
}
std::hint::black_box(&results);
}
let serial_duration = start.elapsed();
println!(
"串行流程: {:?}, 平均: {:?}",
serial_duration,
serial_duration / iterations
);
// 并行处理流程
let start = Instant::now();
for _ in 0..iterations {
// 并行预处理
let _resized: Vec<_> = imgs
.par_iter()
.map(|img| {
img.resize_exact(target_w, target_h, image::imageops::FilterType::CatmullRom)
})
.collect();
// 并行后处理
let results: Vec<_> = (0..num_images)
.into_par_iter()
.map(|i| {
let rgb_img = imgs[i].to_rgb8();
let rgb_raw = rgb_img.as_raw();
let alpha_raw = alpha_grays[i].as_raw();
let pixel_count = (orig_width * orig_height) as usize;
let mut rgba_raw = vec![0u8; pixel_count * 4];
rgba_raw
.par_chunks_mut(4)
.enumerate()
.for_each(|(j, chunk)| {
let src = j * 3;
chunk[0] = rgb_raw[src];
chunk[1] = rgb_raw[src + 1];
chunk[2] = rgb_raw[src + 2];
chunk[3] = alpha_raw[j];
});
RgbaImage::from_raw(orig_width, orig_height, rgba_raw).unwrap()
})
.collect();
std::hint::black_box(&results);
}
let parallel_duration = start.elapsed();
println!(
"并行流程: {:?}, 平均: {:?}",
parallel_duration,
parallel_duration / iterations
);
let speedup = serial_duration.as_secs_f64() / parallel_duration.as_secs_f64();
println!();
println!("完整流程性能提升: {:.2}x", speedup);
Ok(())
}
| rust | Apache-2.0 | 3ba4de8a7dd578b809fa4676f865a438abba91ab | 2026-01-04T20:22:58.809484Z | false |
fMeow/arangors | https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/user.rs | src/user.rs | use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use typed_builder::TypedBuilder;
#[derive(Debug, Clone, Serialize, Deserialize, TypedBuilder)]
pub struct User {
#[serde(rename = "user")]
pub username: String,
#[serde(rename = "passwd")]
password: Option<String>,
pub active: bool,
pub extra: Option<HashMap<String, Value>>, // change_password: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserResponse {
error: bool,
code: u16,
pub(crate) result: Vec<User>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeleteUserResponse {
error: bool,
code: u16,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserDatabasesGetResponse {
error: bool,
code: u16,
pub result: Value, // can be two formats based on parameter "full"
}
pub enum UserAccessLevel {
None,
ReadWrite,
ReadOnly,
}
pub(crate) fn access_level_enum_to_str(level: UserAccessLevel) -> String {
match level {
UserAccessLevel::None => "none".into(),
UserAccessLevel::ReadWrite => "rw".into(),
UserAccessLevel::ReadOnly => "ro".into(),
}
}
| rust | MIT | 4ee57cfdce34a504d94108dedce5abc11809de87 | 2026-01-04T20:24:21.210590Z | false |
fMeow/arangors | https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/lib.rs | src/lib.rs | //! [](https://github.com/fMeow/arangors/actions)
//! [](./LICENSE)
//! [](https://crates.io/crates/arangors)
//! [](https://docs.rs/arangors)
//!
//! `arangors` is an intuitive rust client for [ArangoDB](https://www.arangodb.com/),
//! inspired by [pyArango](https://github.com/tariqdaouda/pyArango).
//!
//! `arangors` enables you to connect with ArangoDB server, access to database,
//! execute AQL query, manage ArangoDB in an easy and intuitive way,
//! both `async` and plain synchronous code with any HTTP ecosystem you love.
//!
//! # Philosophy of arangors
//!
//! `arangors` is targeted at ergonomic, intuitive and OOP-like API for
//! ArangoDB, both top level and low level API for users' choice.
//!
//! Overall architecture of ArangoDB:
//!
//! > databases -> collections -> documents/edges
//!
//! In fact, the design of `arangors` just mimic this architecture, with a
//! slight difference that in the top level, there is a connection object on top
//! of databases, containing a HTTP client with authentication information in
//! HTTP headers.
//!
//! Hierarchy of arangors:
//! > connection -> databases -> collections -> documents/edges
//!
//! # Features & TODO
//!
//! - [X] make connection to ArangoDB
//! - [X] get list of databases and collections
//! - [X] fetch database and collection info
//! - [X] create and delete database or collections
//! - [X] full featured AQL query
//! - [X] Synchronous connection based on `reqwest` and full featured AQL query.
//! - [X] Fill the unimplemented API in `Connection`, `Database`, `Collection` and `Document`.
//! - [X] support both `async` and sync client
//! - [X] Offers a way to use custom HTTP client ecosystem.
//! - [X] Index Management (since 0.4.3)
//! - [X] Graph Management (since 0.4.5)
//! - [X] User Management (since 0.5.4)
//!
//! # Glance
//!
//! ## Use Different HTTP Ecosystem, Regardless of Async or Sync
//!
//! You can switch to different HTTP ecosystem with a feature gate, or implement
//! the Client yourself (see examples).
//!
//! Currently out-of-box supported ecosystem are:
//! - `reqwest_async`
//! - `reqwest_blocking`
//! - `surf_async`
//!
//! By default, `arangors` use `reqwest_async` as underling HTTP Client to
//! connect with ArangoDB. You can switch other ecosystem in feature gate:
//!
//! ```toml
//! [dependencies]
//! arangors = { version = "0.6", features = ["surf_async"], default-features = false }
//! ```
//!
//! Or if you want to stick with other ecosystem that are not listed in the
//! feature gate, you can get vanilla `arangors` without any HTTP client
//! dependency:
//!
//! ```toml
//! [dependencies]
//! ## This one is async
//! arangors = { version = "0.6", default-features = false }
//! ## This one is synchronous
//! arangors = { version = "0.6", features = ["blocking"], default-features = false }
//! ```
//!
//! Thanks to `maybe_async`, `arangors` can unify sync and async API and toggle
//! with a feature gate. Arangors adopts async first policy.
//!
//! ## Connection
//!
//! There is three way to establish connections:
//! - jwt
//! - basic auth
//! - no authentication
//!
//! So are the `arangors` API.
//!
//! Example:
//!
//! - With authentication
//!
//! ```rust
//! use arangors::Connection;
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! // (Recommended) Handy functions
//! let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! .await
//! .unwrap();
//! let conn = Connection::establish_basic_auth("http://localhost:8529", "username", "password")
//! .await
//! .unwrap();
//! # }
//! ```
//!
//! - Without authentication
//!
//! **Only use in evaluation setting**.
//!
//! ``` rust, ignore
//! use arangors::Connection;
//! let conn = Connection::establish_without_auth("http://localhost:8529").await.unwrap();
//! ```
//!
//! ## Database && Collection
//!
//! To get info or operate on database or collections:
//!
//! ```rust
//! use arangors::Connection;
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! # .await
//! # .unwrap();
//! let db = conn.db("test_db").await.unwrap();
//! let collection = db.collection("test_collection").await.unwrap();
//! # }
//! ```
//!
//! ## AQL Query
//!
//! All [AQL](https://www.arangodb.com/docs/stable/aql/index.html) query related functions are associated with database, as AQL query
//! is performed at database level.
//!
//! There are several way to execute AQL query, and can be categorized into two
//! classes:
//!
//! - batch query with cursor
//! - `aql_query_batch`
//! - `aql_next_batch`
//!
//! - query to fetch all results
//! - `aql_str`
//! - `aql_bind_vars`
//! - `aql_query`
//!
//! This later ones provide a convenient high level API, whereas batch
//! queries offer more control.
//!
//! ### Typed or Not Typed
//!
//! Note that results from ArangoDB server, e.x. fetched documents, can be
//! strong typed given deserializable struct, or arbitrary JSON object with
//! `serde::Value`.
//!
//! ```rust
//! # use arangors::Connection;
//! # use serde::Deserialize;
//!
//! #[derive(Deserialize, Debug)]
//! struct User {
//! pub username: String,
//! pub password: String,
//! }
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! # .await
//! # .unwrap();
//! # let db = conn.db("test_db").await.unwrap();
//! // Typed
//! let resp: Vec<User> = db
//! .aql_str("FOR u IN test_collection RETURN u")
//! .await
//! .unwrap();
//! // Not typed: Arbitrary JSON objects
//! let resp: Vec<serde_json::Value> = db
//! .aql_str("FOR u IN test_collection RETURN u")
//! .await
//! .unwrap();
//! # }
//! ```
//!
//! ### Batch query
//!
//! `arangors` offers a way to manually handle batch query.
//!
//! Use `aql_query_batch` to get a cursor, and use `aql_next_batch` to fetch
//! next batch and update cursor with the cursor.
//!
//! ```rust
//! # use arangors::{ClientError,Connection, AqlQuery};
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! # .await
//! # .unwrap();
//! # let db = conn.db("test_db").await.unwrap();
//! let aql = AqlQuery::builder()
//! .query("FOR u IN @@collection LIMIT 3 RETURN u")
//! .bind_var("@collection", "test_collection")
//! .batch_size(1)
//! .count(true)
//! .build();
//!
//! // fetch the first cursor
//! let mut cursor = db.aql_query_batch(aql).await.unwrap();
//! // see metadata in cursor
//! println!("count: {:?}", cursor.count);
//! println!("cached: {}", cursor.cached);
//! let mut results: Vec<serde_json::Value> = Vec::new();
//! loop {
//! if cursor.more {
//! let id = cursor.id.unwrap().clone();
//! // save data
//! results.extend(cursor.result.into_iter());
//! // update cursor
//! cursor = db.aql_next_batch(id.as_str()).await.unwrap();
//! } else {
//! break;
//! }
//! }
//! println!("{:?}", results);
//! # }
//! ```
//!
//! ### Fetch All Results
//!
//! There are three functions for AQL query that fetch all results from
//! ArangoDB. These functions internally fetch batch results one after another
//! to get all results.
//!
//! The functions for fetching all results are listed as bellow:
//!
//! #### `aql_str`
//!
//! This function only accept a AQL query string.
//!
//! Here is an example of strong typed query result with `aql_str`:
//!
//! ```rust
//! # use arangors::Connection;
//! # use serde::Deserialize;
//!
//! #[derive(Deserialize, Debug)]
//! struct User {
//! pub username: String,
//! pub password: String,
//! }
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! # .await
//! # .unwrap();
//! # let db = conn.db("test_db").await.unwrap();
//! let result: Vec<User> = db
//! .aql_str(r#"FOR i in test_collection FILTER i.username=="test2" return i"#)
//! .await
//! .unwrap();
//! # }
//! ```
//!
//! #### `aql_bind_vars`
//!
//! This function can be used to start a AQL query with bind variables.
//!
//! ```rust
//! # use serde::{Deserialize, Serialize};
//! # use std::collections::HashMap;
//! use arangors::{Connection, Document};
//!
//! #[derive(Serialize, Deserialize, Debug)]
//! struct User {
//! pub username: String,
//! pub password: String,
//! }
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! # .await
//! # .unwrap();
//! # let db = conn.db("test_db").await.unwrap();
//!
//! let mut vars = HashMap::new();
//! let user = User {
//! username: "test".to_string(),
//! password: "test_pwd".to_string(),
//! };
//! vars.insert("user", serde_json::value::to_value(&user).unwrap());
//! let result: Vec<Document<User>> = db
//! .aql_bind_vars(r#"FOR i in test_collection FILTER i==@user return i"#, vars)
//! .await
//! .unwrap();
//! # }
//! ```
//!
//! #### `aql_query`
//!
//! This function offers all the options available to tweak a AQL query.
//! Users have to construct a `AqlQuery` object first. And `AqlQuery` offer all
//! the options needed to tweak AQL query. You can set batch size, add bind
//! vars, limit memory, and all others
//! options available.
//!
//! ```rust
//! use arangors::{AqlQuery, Connection, Cursor, Database};
//! use serde_json::value::Value;
//!
//! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
//! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
//! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
//! # async fn main() {
//! # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
//! # .await
//! # .unwrap();
//! # let db = conn.db("test_db").await.unwrap();
//!
//! let aql = AqlQuery::builder()
//! .query("FOR u IN @@collection LIMIT 3 RETURN u")
//! .bind_var("@collection", "test_collection")
//! .batch_size(1)
//! .count(true)
//! .build();
//!
//! let resp: Vec<Value> = db.aql_query(aql).await.unwrap();
//! println!("{:?}", resp);
//! # }
//! ```
//!
//! ## Contributing
//!
//! Contributions and feed back are welcome following Github workflow.
//!
//! ## License
//!
//! `arangors` is provided under the MIT license. See [LICENSE](./LICENSE).
//! An ergonomic [ArangoDB](https://www.arangodb.com/) client for rust.
#[cfg(all(feature = "reqwest_async", feature = "reqwest_blocking"))]
compile_error!(
r#"feature "reqwest_async" and "reqwest_blocking" cannot be set at the same time.
If what you want is "reqwest_blocking", please turn off default features by adding "default-features=false" in your Cargo.toml"#
);
#[cfg(all(feature = "reqwest_async", feature = "surf_async"))]
compile_error!(
r#"feature "reqwest_async" and "surf_async" cannot be set at the same time.
If what you want is "surf_async", please turn off default features by adding "default-features=false" in your Cargo.toml"#
);
#[cfg(all(
feature = "reqwest_async",
feature = "reqwest_blocking",
feature = "surf_async"
))]
compile_error!(
r#"only one of features "reqwest_async", "reqwest_blocking" and "surf_async" can be"#
);
#[cfg(any(
feature = "reqwest_async",
feature = "reqwest_blocking",
feature = "surf_async"
))]
pub use crate::connection::Connection;
pub use crate::{
aql::{AqlOptions, AqlQuery, Cursor},
collection::Collection,
connection::GenericConnection,
database::Database,
document::Document,
error::{ArangoError, ClientError},
};
pub mod analyzer;
pub mod aql;
pub mod client;
pub mod collection;
pub mod connection;
pub mod database;
pub mod document;
pub mod error;
pub mod graph;
pub mod index;
mod query;
mod response;
pub mod transaction;
pub mod user;
pub mod view;
| rust | MIT | 4ee57cfdce34a504d94108dedce5abc11809de87 | 2026-01-04T20:24:21.210590Z | false |
fMeow/arangors | https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/index.rs | src/index.rs | //! This module facilitates the building of new indexes as well as the retrieval
//! of existing indexes in ArangoDB.
//! The following types are supported:
//!
//! * Fulltext
//! * Geo
//! * Hash
//! * Persistent
//! * Skiplist
//! * Ttl (Time to live)
//!
//! An index of type [Primary] cannot be created and is only available for
//! the retrieval of existing indexes, as ArangoDB creates a primary index on
//! every collection.
//! For detailed information about ArangoDB indexes, please check out the
//! official ArangoDB [documentation](https://www.arangodb.com/docs/stable/http/indexes.html).
//!
//! [Primary]: https://www.arangodb.com/docs/stable/http/indexes.html#primary-index
use serde::{Deserialize, Serialize};
use typed_builder::TypedBuilder;
pub(crate) const INDEX_API_PATH: &str = "_api/index";
/// Represents an [`Index`] in ArangoDB. The following types are
/// supported:
/// * Fulltext
/// * Geo
/// * Hash
/// * Persistent
/// * Skiplist
/// * Ttl (Time to live)
///
/// As different settings may be applied to different index types, use the
/// [`settings`] field on the index to specify the exact `type` of the index
/// including the required settings.
///
/// # Example
/// ```
/// # use arangors::Connection;
/// # use arangors::index::{IndexSettings, Index};
///
/// # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)]
/// # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)]
/// # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)]
/// # async fn main() -> Result<(),anyhow::Error>{
/// # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password")
/// # .await
/// # .unwrap();
/// let database = conn.db("test_db").await.unwrap();
///
/// let index = Index::builder()
/// .name("doc_test_index_name")
/// .fields(vec!["password".to_string()])
/// .settings(IndexSettings::Persistent {
/// unique: true,
/// sparse: false,
/// deduplicate: false,
/// })
/// .build();
///
/// let index = database.create_index("test_collection", &index).await?;
/// let delete_result = database.delete_index(&index.id).await.unwrap();
/// # Ok(())
/// # }
/// ```
/// [`Index`]: struct.Index.html
/// [`settings`]: enum.IndexSettings.html
#[derive(Debug, Clone, Serialize, Deserialize, Default, TypedBuilder)]
#[serde(rename_all = "camelCase")]
pub struct Index {
#[builder(default)]
pub fields: Vec<String>,
#[builder(default, setter(into))]
pub name: String,
#[builder(default)]
pub id: String,
#[builder(default)]
pub is_newly_created: Option<bool>,
#[builder(default)]
pub selectivity_estimate: Option<f32>,
#[builder(default)]
pub in_background: Option<bool>,
#[serde(flatten)]
#[builder(default)]
pub settings: IndexSettings,
}
/// Settings for the different index types. This `enum` also sets the index
/// type.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", tag = "type")]
pub enum IndexSettings {
Primary {
unique: bool,
sparse: bool,
},
Persistent {
unique: bool,
sparse: bool,
deduplicate: bool,
},
Edge {
unique: bool,
sparse: bool,
},
Hash {
unique: bool,
sparse: bool,
deduplicate: bool,
},
Skiplist {
unique: bool,
sparse: bool,
deduplicate: bool,
},
#[serde(rename_all = "camelCase")]
Ttl {
expire_after: u32,
},
#[serde(rename_all = "camelCase")]
Geo {
geo_json: bool,
},
#[serde(rename_all = "camelCase")]
Fulltext {
min_length: u32,
},
}
impl Default for IndexSettings {
fn default() -> Self {
IndexSettings::Persistent {
unique: false,
sparse: false,
deduplicate: false,
}
}
}
/// Represents a collection of indexes on a collection in ArangoDB.
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct IndexCollection {
pub indexes: Vec<Index>,
}
/// Response from ArangoDB when deleting an index
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DeleteIndexResponse {
pub id: String,
}
| rust | MIT | 4ee57cfdce34a504d94108dedce5abc11809de87 | 2026-01-04T20:24:21.210590Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.