file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
next-app/src/components/sidebar/labelDropdown.tsx | TypeScript (TSX) | import { Card, CardContent, Grid, Typography } from "@mui/material";
export const LabelDropdown = (props: any) => (
<Card {...props}>
<CardContent>
<Grid container spacing={3}>
<Grid item>
<Typography variant="h5">Label Dropdown</Typography>
</Grid>
</Grid>
</CardContent>
</Card>
);
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
next-app/src/components/sidebar/modelCard.tsx | TypeScript (TSX) | import { Card, CardContent, Grid, Typography } from "@mui/material";
export const ModelCard = (props: any) => (
<Card {...props}>
<CardContent>
<Grid container spacing={3}>
<Grid item>
<Typography variant="h5">Model Card</Typography>
</Grid>
</Grid>
</CardContent>
</Card>
);
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
next-app/src/components/sidebar/sidebarGrid.tsx | TypeScript (TSX) | import { Container, Grid } from "@mui/material";
import { LabelDropdown } from "./labelDropdown";
import { ModelCard } from "./modelCard";
export const SideBarGrid = () => (
<Grid container lg={3} sm={6} xl={3} xs={12} direction={"column"}>
<Grid>
<LabelDropdown />
</Grid>
<br />
<Grid>
<ModelCard />
</Grid>
</Grid>
);
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
next-app/src/pages/_app.tsx | TypeScript (TSX) | import "@/styles/globals.css";
import { Container, Grid } from "@mui/material";
import NavBar from "../components/mainPage/navBar";
import { MetricsGrid } from "../components/metrics/metricsGrid";
import { SideBarGrid } from "../components/sidebar/sidebarGrid";
import { ImageGrid } from "../components/graphs/imageGrid";
const Page = () => (
<>
<NavBar />
<br />
<br />
<Container>
<Grid container>
<Grid>
<SideBarGrid />
</Grid>
<Grid>
<MetricsGrid />
<br />
<br />
<ImageGrid />
</Grid>
</Grid>
</Container>
</>
);
export default Page;
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
next-app/src/pages/index.tsx | TypeScript (TSX) | import React from "react";
import App from "./_app";
export default function Home() {
return <App />;
}
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
next-app/src/styles/globals.css | CSS | /* # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. */
.pretty-container {
border-radius: 8px;
background-color: #f9f9f9;
margin: 5px;
padding: 10px;
position: relative;
box-shadow: 2px 2px 2px lightgrey;
}
.metric-container {
border-radius: 8px;
background-color: #f9f9f9;
margin: 10px;
margin-top: 0%;
padding: 10px;
position: relative;
box-shadow: 2px 2px 2px lightgrey;
}
.metric-card-text {
margin: 0px;
padding: 0px;
font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif;
color: darkslategray;
}
.model-card-container {
border-radius: 8px;
background-color: #f9f9f9;
margin: 0px;
padding: 0px;
position: relative;
box-shadow: 2px 2px 2px lightgrey;
}
.model-card-text {
margin: 0px;
padding: 1px;
font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif;
color: darkslategray;
}
.card-title {
margin: 0px;
padding: 0px;
font-family: Ucityweb, sans-serif;
font-weight: normal;
}
/* .app-title {
font-family: Montserrat, sans-serif;
} */
#left-fig .modebar {
margin-top: 10px;
}
#right-fig .modebar {
margin-top: 10px;
}
body {
margin: 0px;
padding: 0px;
}
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
setup.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from setuptools import setup
rootdir = Path(__file__).parent
long_description = (rootdir / "README.md").read_text()
setup(
name="visionpod",
# package_dir={"": "src"},
# packages=["visionpod"],
version="0.0.1",
description="An End to End ML Product Example",
long_description=long_description,
long_description_content_type="text/markdown",
author="Justin Goheen",
license="Apache 2.0",
author_email="",
classifiers=[
"Environment :: Console",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
tests/test_datamodule.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import torch
from visionpod.pipeline.datamodule import PodDataModule
def test_module_not_abstract():
_ = PodDataModule()
def test_prepare_data():
data_module = PodDataModule()
data_module.prepare_data()
networkpath = Path(__file__).parent
projectpath = networkpath.parents[0]
datapath = os.path.join(projectpath, "data", "cache")
assert "PodDataset" in os.listdir(datapath)
def test_setup():
data_module = PodDataModule()
data_module.prepare_data()
data_module.setup()
data_keys = ["train_data", "test_data", "val_data"]
assert all(key in dir(data_module) for key in data_keys)
def test_trainloader():
data_module = PodDataModule()
data_module.prepare_data()
data_module.setup()
loader = data_module.train_dataloader()
sample = loader.dataset[0][0]
assert isinstance(sample, torch.Tensor)
def test_testloader():
data_module = PodDataModule()
data_module.prepare_data()
data_module.setup()
loader = data_module.test_dataloader()
sample = loader.dataset[0][0]
assert isinstance(sample, torch.Tensor)
def test_valloader():
data_module = PodDataModule()
data_module.prepare_data()
data_module.setup()
loader = data_module.val_dataloader()
sample = loader.dataset[0][0]
assert isinstance(sample, torch.Tensor)
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
tests/test_network_module.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from visionpod.core.module import Decoder, Encoder, PodModule
def test_module_not_abstract():
_ = PodModule()
def test_module_forward():
input_sample = torch.randn((1, 784))
model = PodModule()
preds, label = model.forward(input_sample)
assert preds.shape == input_sample.shape
def test_module_training_step():
input_sample = torch.randn((1, 784)), 1
model = PodModule()
loss = model.training_step(input_sample)
assert isinstance(loss, torch.Tensor)
def test_optimizer():
model = PodModule()
optimizer = model.configure_optimizers()
optimizer_base_class = optimizer.__class__.__base__.__name__
assert optimizer_base_class == "Optimizer"
def test_encoder_not_abstract():
_ = Encoder()
def test_encoder_forward():
input_sample = torch.randn((1, 784))
model = Encoder()
output = model.forward(input_sample)
assert output.shape == torch.Size([1, 3])
def test_decoder_not_abstract():
_ = Decoder()
def test_decoder_forward():
input_sample = torch.randn((1, 3))
model = Decoder()
output = model.forward(input_sample)
assert output.shape == torch.Size([1, 784])
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/__init__.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/cli/__init__.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/cli/console.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import click
from visionpod.cli.utils import common_destructive_flow, make_bug_trainer, teardown
from visionpod.components.hpo import sweep
from visionpod.core.module import PodModule
from visionpod.core.trainer import PodTrainer
from visionpod.fabric.bugreport import bugreport
from visionpod.pipeline.datamodule import PodDataModule
FILEPATH = Path(__file__)
PKGPATH = FILEPATH.parents[1]
PROJECTPATH = FILEPATH.parents[2]
@click.group()
def main() -> None:
pass
@main.command("teardown")
def _teardown() -> None:
common_destructive_flow([teardown], command_name="teardown")
@main.command("bug-report")
def bug_report() -> None:
bugreport.main()
print("\n")
make_bug_trainer()
trainer = os.path.join(PKGPATH, "core", "bug_trainer.py")
run_command = " ".join(["python", trainer, " 2> boring_trainer_error.md"])
os.system(run_command)
os.remove(trainer)
@main.group("trainer")
def trainer() -> None:
pass
# TODO add help description
@trainer.command("help")
def help() -> None:
trainer = os.path.join(PKGPATH, "core", "trainer.py")
os.system(f"python {trainer} --help")
@trainer.command("fast-dev-run")
def fast_dev_run() -> None:
model = PodModule()
dm = PodDataModule()
trainer = PodTrainer(fast_dev_run=True)
trainer.fit(model=model, datamodule=dm)
@trainer.command("sweep-and-train")
@click.option("--em", default="wandb", type=click.Choice(["wandb", "optuna"]))
@click.option("--project-name", default="visionpod")
@click.option("--trial-count", default=10)
@click.option("--persist_model", is_flag=True)
@click.option("--persist_predictions", is_flag=True)
@click.option("--persist_splits", is_flag=True)
def sweep_and_train(em, project_name, trial_count, persist_model, persist_predictions, persist_splits) -> None:
project_name = "-".join([project_name, em])
trainer = sweep.TrainFlow(experiment_manager=em, project_name=project_name, trial_count=trial_count)
trainer.run(persist_model=persist_model, persist_predictions=persist_predictions, persist_splits=persist_splits)
@trainer.command("train-only")
@click.option("--em", default="wandb", type=click.Choice(["wandb", "optuna"]))
@click.option("--project-name", default="visionpod")
@click.option("--trial-count", default=10)
@click.option("--persist_model", is_flag=True)
@click.option("--persist_predictions", is_flag=True)
@click.option("--persist_splits", is_flag=True)
def train_only(em, project_name, trial_count, persist_model, persist_predictions, persist_splits) -> None:
pass
@trainer.command("sweep-only")
@click.option("--project-name", default="visionpod")
def sweep_only(project_name) -> None:
pass
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/cli/utils.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from pathlib import Path
from typing import Union
import click
from rich import print as rprint
from rich.console import Console
from rich.table import Table
FILEPATH = Path(__file__)
PROJECTPATH = FILEPATH.parents[2]
PKGPATH = FILEPATH.parents[1]
def _preserve_dir(main_source_dir: str, sub_source_dir: str, destination: str) -> None:
destinationpath = os.path.join(PROJECTPATH, destination)
if not os.path.isdir(destinationpath):
os.mkdir(destinationpath)
src = os.path.join(PROJECTPATH, main_source_dir, sub_source_dir)
dest = os.path.join(PROJECTPATH, destinationpath, main_source_dir, sub_source_dir)
shutil.copytree(src, dest)
def preserve_examples() -> None:
_preserve_dir(PKGPATH.name, "core", "examples")
_preserve_dir(PKGPATH.name, "pipeline", "examples")
def _clean_and_build_package(module_to_copy: Union[str, Path]) -> None:
src = os.path.join(FILEPATH.parent, "init", module_to_copy)
dest = os.path.join(PROJECTPATH, PKGPATH, module_to_copy)
shutil.rmtree(dest)
shutil.copytree(src, dest)
def make_new_package() -> None:
_clean_and_build_package("core")
_clean_and_build_package("pipeline")
def build() -> None:
preserve_examples()
make_new_package()
def teardown() -> None:
do_not_delete = "README.md"
target_dirs = [
os.path.join(PROJECTPATH, "models", "checkpoints"),
os.path.join(PROJECTPATH, "models", "onnx"),
os.path.join(PROJECTPATH, "logs", "optuna"),
os.path.join(PROJECTPATH, "logs", "tensorboard"),
os.path.join(PROJECTPATH, "logs", "torch_profiler"),
os.path.join(PROJECTPATH, "logs", "wandb_logs"),
os.path.join(PROJECTPATH, "data", "cache"),
os.path.join(PROJECTPATH, "data", "predictions"),
os.path.join(PROJECTPATH, "data", "training_split"),
os.path.join(PROJECTPATH, "docs"),
]
for dir in target_dirs:
for target in os.listdir(dir):
targetpath = os.path.join(PROJECTPATH, dir, target)
if not os.path.isdir(targetpath):
if target != do_not_delete:
os.remove(targetpath)
else:
dirpath = os.path.join(PROJECTPATH, dir, target)
shutil.rmtree(dirpath)
def make_bug_trainer():
source = os.path.join(PROJECTPATH, "vision_pod", "cli", "bugreport", "trainer.py")
destination = os.path.join(PROJECTPATH, "vision_pod", "core", "bug_trainer.py")
shutil.copyfile(source, destination)
def show_purge_table(command_name) -> None:
# TITLE
table = Table(title="Directories To Be Purged")
# COLUMNS
table.add_column("Directory", justify="right", style="cyan", no_wrap=True)
table.add_column("Contents", style="magenta")
# ROWS
trash = ["data", "logs", "models"]
if command_name == "init":
trash.append(os.path.join(PKGPATH, "core"))
for dirname in trash:
dirpath = os.path.join(os.getcwd(), dirname)
contents = ", ".join([f for f in os.listdir(dirpath) if f != "README.md"])
table.add_row(dirname, contents)
# SHOW
console = Console()
console.print(table)
def show_destructive_behavior_warning(command_name) -> None:
"""
uses rich console markup
notes: https://rich.readthedocs.io/en/stable/markup.html
"""
print()
rprint(":warning: [bold red]Alert![/bold red] This action has destructive behavior! :warning: ")
print()
rprint("The following directories will be [bold red]purged[/bold red]")
print()
show_purge_table(command_name)
print()
def common_destructive_flow(commands: list, command_name: str) -> None:
show_destructive_behavior_warning(command_name)
if click.confirm("Do you want to continue"):
for command in commands:
command()
print()
rprint(f"[bold green]{command_name.title()} complete[bold green]")
print()
else:
print()
rprint("[bold green]No Action Taken[/bold green]")
print()
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/components/__init__.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/components/hpo/__init__.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/components/hpo/sweep.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from typing import Any, Dict, List, Optional
import optuna
import wandb
from lightning import LightningFlow
from lightning.pytorch.callbacks import EarlyStopping
from lightning.pytorch.loggers import WandbLogger
from optuna.trial import FrozenTrial, Trial, TrialState
from rich.console import Console
from rich.table import Table
from torch import optim
from visionpod import conf
from visionpod.core.module import PodModule
from visionpod.core.trainer import PodTrainer
from visionpod.pipeline.datamodule import PodDataModule
class ObjectiveWork:
def __init__(
self,
sweep_config: Dict[str, Any],
project_name: str,
wandb_save_dir: str,
experiment_manager: str = "wandb",
):
self.experiment_manager = experiment_manager
self.project_name = project_name
self.wandb_save_dir = wandb_save_dir
self.sweep_config = sweep_config
if self.experiment_manager == "wandb":
self.sweep_id = wandb.sweep(sweep=self.sweep_config, project=project_name)
if self.experiment_manager == "optuna":
self.sweep_id = wandb.util.generate_id()
self.sweep_name = "-".join(["Sweep", self.sweep_id])
self.sweep_config.update({"name": self.sweep_name})
self.datamodule = PodDataModule()
self.trial_number = 1
@property
def wandb_settings(self) -> Dict[str, Any]:
return self.trainer.logger.experiment.settings
@property
def sweep_url(self) -> str:
return "/".join([self.entity, self.project_name, "sweeps", self.sweep_id])
@property
def entity(self) -> str:
return self.trainer.logger.experiment.entity
@property
def trials(self) -> List[FrozenTrial]:
return self._optuna_study.trials
@property
def pruned_trial(self) -> List[FrozenTrial]:
return self._optuna_study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
@property
def complete_trials(self) -> List[FrozenTrial]:
return self._optuna_study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
@property
def best_trial(self) -> FrozenTrial:
return self._optuna_study.best_trial
@property
def artifact_path(self) -> str:
"""helps to sync wandb and optuna directory names for logs"""
log_dir = self.wandb_settings.log_user or self.wandb_settings.log_internal
if log_dir:
log_dir = os.path.dirname(log_dir.replace(os.getcwd(), "."))
return str(log_dir).split(os.sep)[-2]
def _set_artifact_dir(self) -> None:
"""sets optuna log file
Note:
borrowed from Optuna
see https://github.com/optuna/optuna/blob/fd841edc732124961113d1915ee8b7f750a0f04c/optuna/cli.py#L1026
"""
root_logger = logging.getLogger("optuna")
root_logger.setLevel(logging.DEBUG)
artifact_dir = os.path.join(conf.OPTUNAPATH, self.artifact_path)
if not os.path.isdir(artifact_dir):
os.mkdir(artifact_dir)
file_handler = logging.FileHandler(filename=os.path.join(conf.OPTUNAPATH, artifact_dir, "optuna.log"))
file_handler.setFormatter(optuna.logging.create_default_formatter())
root_logger.addHandler(file_handler)
def persist_model(self) -> None:
input_sample = self.trainer.datamodule.train_data.dataset[0][0]
self.trainer.model.to_onnx(conf.MODELPATH, input_sample=input_sample, export_params=True)
def persist_predictions(self) -> None:
self.trainer.persist_predictions()
def persist_splits(self) -> None:
self.trainer.datamodule.persist_splits()
def _optuna_objective(self, trial: Trial) -> float:
"""
Note:
see:
- https://github.com/optuna/optuna-examples/blob/main/pytorch/pytorch_lightning_simple.py
- https://github.com/nzw0301/optuna-wandb/blob/main/part-1/wandb_optuna.py
- https://medium.com/optuna/optuna-meets-weights-and-biases-58fc6bab893
- PyTorch with Optuna (by PyTorch) https://youtu.be/P6NwZVl8ttc
"""
lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
optimizer = getattr(optim, optimizer_name)
dropout = trial.suggest_float("dropout", 0.2, 0.5)
model = PodModule(dropout=dropout, optimizer=optimizer, lr=lr)
config = dict(trial.params)
config["trial.number"] = trial.number
trainer_init_kwargs = {
"max_epochs": 10,
"callbacks": [
EarlyStopping(monitor="training_loss", mode="min"),
],
}
if hasattr(self, "trainer"):
# stops previous wandb run so that a new run can be initialized on new trial
# also helps to avoid hanging process in wandb sdk
# i.e. if this is called after self.trainer.fit,
# a key error is encountered in wandb.sdk on the final trial
# and wandb does not finish, and does not return control to TrialFlow
self.trainer.logger.experiment.finish()
self.trainer = PodTrainer(
logger=WandbLogger(
project=self.project_name,
name="-".join(["sweep", self.sweep_id, "trial", str(trial.number)]),
group=self.sweep_config["name"],
save_dir=self.wandb_save_dir,
config=config,
),
**trainer_init_kwargs,
)
# set optuna logs dir
self._set_artifact_dir()
# logs hyperparameters to logs/wandb_logs/wandb/{run_name}/files/config.yaml
hyperparameters = dict(optimizer=optimizer_name, lr=lr, dropout=dropout)
self.trainer.logger.log_hyperparams(hyperparameters)
self.trainer.fit(model=model, datamodule=self.datamodule)
return self.trainer.callback_metrics["val_acc"].item()
def _wandb_objective(self) -> float:
logger = WandbLogger(
project=self.project_name,
name="-".join(["sweep", self.sweep_id, "trial", str(self.trial_number)]),
group=self.sweep_config["name"],
save_dir=self.wandb_save_dir,
)
lr = wandb.config.lr
optimizer_name = wandb.config.optimizer
optimizer = getattr(optim, optimizer_name)
dropout = wandb.config.dropout
model = PodModule(dropout=dropout, optimizer=optimizer, lr=lr)
trainer_init_kwargs = {
"max_epochs": 10,
"callbacks": [
EarlyStopping(monitor="training_loss", mode="min"),
],
}
self.trainer = PodTrainer(
logger=logger,
**trainer_init_kwargs,
)
# logs hyperparameters to logs/wandb_logs/wandb/{run_name}/files/config.yaml
hyperparameters = dict(optimizer=optimizer_name, lr=lr, dropout=dropout)
self.trainer.logger.log_hyperparams(hyperparameters)
self.trainer.fit(model=model, datamodule=self.datamodule)
self.trial_number += 1
return self.trainer.callback_metrics["val_acc"].item()
def run(
self,
trial_count: int = 10,
optuna_study_name: Optional[str] = None,
) -> float:
if self.experiment_manager == "optuna":
self._study = optuna.create_study(direction="maximize", study_name=optuna_study_name)
self._study.optimize(self._optuna_objective, n_trials=trial_count, timeout=600)
if self.experiment_manager == "wandb":
wandb.agent(self.sweep_id, function=self._wandb_objective, count=trial_count)
def stop(self) -> None:
if self.experiment_manager == "wandb":
os.system(f"wandb sweep --stop {self.entity}/{self.project_name}/{self.sweep_id}")
if self.experiment_manager == "optuna":
self.trainer.logger.experiment.finish()
class SweepFlow:
def __init__(
self,
project_name: Optional[str] = None,
trial_count: int = 10,
wandb_dir: Optional[str] = conf.WANDBPATH,
experiment_manager: str = "wandb",
) -> None:
"""
Notes:
see: https://community.wandb.ai/t/run-best-model-off-sweep/2423
"""
# settings
self.experiment_manager = experiment_manager
self.project_name = project_name
self.wandb_dir = wandb_dir
self.trial_count = trial_count
# _ helps to avoid LightningFlow from checking JSON serialization if converting to Lightning App
self._sweep_config = dict(
method="random",
metric={"goal": "maximize", "name": "val_acc"},
parameters={
"lr": {"min": 0.0001, "max": 0.1},
"optimizer": {"distribution": "categorical", "values": ["Adam", "RMSprop", "SGD"]},
"dropout": {"min": 0.2, "max": 0.5},
},
)
self._objective_work = ObjectiveWork(
self._sweep_config, self.project_name, self.wandb_dir, experiment_manager=self.experiment_manager
)
self._wandb_api = wandb.Api()
@property
def best_params(self):
if self.experiment_manager == "wandb":
return self._wandb_api.sweep(self._objective_work.sweep_url).best_run().config
if self.experiment_manager == "optuna":
return self._objective_work._study.best_params
@staticmethod
def _display_report(trial_metric_names: List[str], trial_info: List[str]) -> None:
table = Table(title="Study Statistics")
for col in trial_metric_names:
table.add_column(col, header_style="cyan")
table.add_row(*trial_info)
console = Console()
console.print(table, new_line_start=True)
def run(
self,
experiment_manager: str,
persist_model: bool = False,
persist_predictions: bool = False,
persist_splits: bool = False,
display_report: bool = False,
) -> None:
# this is blocking
self._objective_work.run(trial_count=self.trial_count)
# will only run after objective is complete
self._objective_work.stop()
if display_report:
self._display_report(
trial_metric_names=list(self.best_params.keys()),
trial_info=list(self.best_params.values()),
)
if persist_model:
self._objective_work.persist_model()
if persist_predictions:
self._objective_work.persist_predictions()
if persist_splits:
self._objective_work.persist_splits()
if issubclass(SweepFlow, LightningFlow):
sys.exit()
class TrainWork:
def persist_model(self) -> None:
input_sample = self.trainer.datamodule.train_data.dataset[0][0]
self.trainer.model.to_onnx(conf.MODELPATH, input_sample=input_sample, export_params=True)
def persist_predictions(self) -> None:
self.trainer.persist_predictions()
def persist_splits(self) -> None:
self.trainer.datamodule.persist_splits()
def run(
self,
lr: float,
dropout: float,
optimizer: str,
project_name: str,
training_run_name: str,
wandb_dir: Optional[str] = conf.WANDBPATH,
) -> None:
self.model = PodModule(lr=lr, dropout=dropout, optimizer=getattr(optim, optimizer))
self.datamodule = PodDataModule()
logger = WandbLogger(
project=project_name,
name=training_run_name,
group="Training Runs",
save_dir=wandb_dir,
)
trainer_init_kwargs = {
"max_epochs": 100,
"callbacks": [
EarlyStopping(monitor="training_loss", mode="min"),
],
}
self.trainer = PodTrainer(logger=logger, **trainer_init_kwargs)
self.trainer.fit(model=self.model, datamodule=self.datamodule)
class TrainFlow:
def __init__(
self,
experiment_manager: str = "wandb",
project_name: Optional[str] = None,
trial_count: int = 10,
) -> None:
self.experiment_manager = experiment_manager
self.project_name = project_name
self._sweep_flow = SweepFlow(project_name=project_name, trial_count=trial_count)
self._train_work = TrainWork()
@property
def best_params(self) -> Dict[str, Any]:
return self._sweep_flow.best_params
@property
def lr(self) -> float:
return self._sweep_flow.best_params["lr"]
@property
def dropout(self) -> float:
return self._sweep_flow.best_params["dropout"]
@property
def optimizer(self) -> str:
return self._sweep_flow.best_params["optimizer"]
@property
def sweep_group(self) -> str:
return self._sweep_flow._sweep_config["name"]
@property
def run_name(self) -> str:
return self.sweep_group.replace("Sweep", "train")
def run(
self,
persist_model: bool = False,
persist_predictions: bool = False,
persist_splits: bool = False,
) -> None:
self._sweep_flow.run(experiment_manager=self.experiment_manager, display_report=False)
self._train_work.run(
lr=self.lr,
dropout=self.dropout,
optimizer=self.optimizer,
project_name=self.project_name,
training_run_name=self.run_name,
)
if persist_model:
self._train_work.persist_model()
if persist_predictions:
self._train_work.persist_predictions()
if persist_splits:
self._train_work.persist_splits()
if issubclass(TrainFlow, LightningFlow):
sys.exit()
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/conf.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
# GLOBAL SEED
GLOBALSEED = 42
# SET PATHS
filepath = Path(__file__)
PROJECTPATH = filepath.parents[1]
# research paths
_researchpath = os.path.join(PROJECTPATH, "research")
_logspath = os.path.join(_researchpath, "logs")
TORCHPROFILERPATH = os.path.join(_logspath, "torch_profiler")
SIMPLEPROFILERPATH = os.path.join(_logspath, "simple_profiler")
TENSORBOARDPATH = os.path.join(_logspath, "tensorboard")
CHKPTSPATH = os.path.join(_researchpath, "models", "checkpoints")
MODELPATH = os.path.join(_researchpath, "models", "onnx", "model.onnx")
PREDSPATH = os.path.join(_researchpath, "data", "predictions", "predictions.pt")
DATASETPATH = os.path.join(_researchpath, "data")
SPLITSPATH = os.path.join(_researchpath, "data", "training_split")
WANDBPATH = os.path.join(_researchpath, "logs", "wandb")
OPTUNAPATH = os.path.join(_researchpath, "logs", "optuna")
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/core/__init__.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/core/module.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lightning as L
import torch
import torch.nn.functional as F
from torch import nn, optim
from torchmetrics.functional import accuracy
class VisionNet(nn.Module):
"""
Note:
see below for example
https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#define-a-loss-function-and-optimizer
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class PodModule(L.LightningModule):
"""A custom PyTorch Lightning LightningModule.
# Arguments
optimizer: a PyTorch Optimizer.
lr: the learning rate.
accuracy_task: task for torchmetrics.accuracy.
num_classes: number of classes.
"""
def __init__(
self,
optimizer: str = "adam",
lr: float = 1e-3,
accuracy_task: str = "multiclass",
num_classes: int = 10,
):
super().__init__()
self.vision_net = VisionNet()
self.optimizer = getattr(optim, optimizer)
self.lr = lr
self.accuracy_task = accuracy_task
self.num_classes = num_classes
self.save_hyperparameters()
def forward(self, x):
y_hat = self.vision_net(x)
return y_hat
def training_step(self, batch):
return self._common_step(batch, "training")
def test_step(self, batch, *args):
self._common_step(batch, "test")
def validation_step(self, batch, *args):
self._common_step(batch, "val")
def _common_step(self, batch, stage):
x, y = batch
print(stage, x.shape)
y_hat = self.vision_net(x)
loss = F.cross_entropy(y_hat, y)
if stage in ["val", "test"]:
acc = accuracy(y_hat, y, task=self.accuracy_task, num_classes=self.num_classes)
self.log(f"{stage}_acc", acc)
self.log(f"{stage}_loss", loss)
if stage == "training":
self.log(f"{stage}_loss", loss)
return loss
def predict_step(self, batch, batch_idx, dataloader_idx=0):
x, y = batch
return self(x)
def configure_optimizers(self):
optimizer = self.optimizer(self.parameters(), lr=self.lr)
return optimizer
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/core/trainer.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import lightning as L
import torch
from lightning.pytorch import seed_everything
from lightning.pytorch.callbacks import ModelCheckpoint
from lightning.pytorch.loggers import Logger, TensorBoardLogger
from lightning.pytorch.profilers import Profiler, PyTorchProfiler
from visionpod import conf
class PodTrainer(L.Trainer):
def __init__(
self,
logger: Optional[Logger] = None,
profiler: Optional[Profiler] = None,
callbacks: Optional[List] = [],
plugins: Optional[List] = [],
set_seed: bool = True,
**trainer_init_kwargs: Dict[str, Any]
) -> None:
# SET SEED
if set_seed:
seed_everything(conf.GLOBALSEED, workers=True)
super().__init__(
logger=logger or TensorBoardLogger(conf.TENSORBOARDPATH, name="logs"),
profiler=profiler or PyTorchProfiler(dirpath=conf.TORCHPROFILERPATH, filename="profiler"),
callbacks=callbacks + [ModelCheckpoint(dirpath=conf.CHKPTSPATH, filename="model")],
plugins=plugins,
**trainer_init_kwargs
)
def persist_predictions(self, predictions_dir: Optional[Union[str, Path]] = conf.PREDSPATH) -> None:
self.test(ckpt_path="best", datamodule=self.datamodule)
predictions = self.predict(self.model, self.datamodule.val_dataloader())
torch.save(predictions, predictions_dir)
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/fabric/bugreport/bugreport.py | Python | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diagnose your system and show basic information.
This server mainly to get detail info for better bug reporting.
"""
import os
import platform
import sys
import numpy
import torch
import tqdm
sys.path += [os.path.abspath(".."), os.path.abspath("")]
try:
import pytorch_lightning # noqa: E402
except ModuleNotFoundError:
pass
try:
import lightning
except ModuleNotFoundError:
pass
try:
import lightning_app
except ModuleNotFoundError:
pass
LEVEL_OFFSET = "\t"
KEY_PADDING = 20
def info_system():
return {
"OS": platform.system(),
"architecture": platform.architecture(),
"version": platform.version(),
"processor": platform.processor(),
"python": platform.python_version(),
}
def info_cuda():
return {
"GPU": [torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())],
# 'nvidia_driver': get_nvidia_driver_version(run_lambda),
"available": torch.cuda.is_available(),
"version": torch.version.cuda,
}
def info_packages():
return {
"numpy": numpy.__version__,
"pyTorch_version": torch.__version__,
"pyTorch_debug": torch.version.debug,
"pytorch-lightning": pytorch_lightning.__version__ if "pytorch_lightning" in sys.modules else None,
"lightning": lightning.__version__ if "lightning" in sys.modules else None,
"lightning_app": lightning_app.__version__ if "lightning_app" in sys.modules else None,
"tqdm": tqdm.__version__,
}
def nice_print(details, level=0):
lines = []
for k in sorted(details):
key = f"* {k}:" if level == 0 else f"- {k}:"
if isinstance(details[k], dict):
lines += [level * LEVEL_OFFSET + key]
lines += nice_print(details[k], level + 1)
elif isinstance(details[k], (set, list, tuple)):
lines += [level * LEVEL_OFFSET + key]
lines += [(level + 1) * LEVEL_OFFSET + "- " + v for v in details[k]]
else:
template = "{:%is} {}" % KEY_PADDING
key_val = template.format(key, details[k])
lines += [(level * LEVEL_OFFSET) + key_val]
return lines
def main():
details = {"System": info_system(), "CUDA": info_cuda(), "Packages": info_packages()}
lines = nice_print(details)
text = os.linesep.join(lines)
print(text)
with open("bug_report.md", "w") as file:
file.writelines(text)
file.close()
if __name__ == "__main__":
main()
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/fabric/bugreport/trainer.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import hydra
import lightning as L
from lightning.pytorch.demos.boring_classes import BoringModel
from omegaconf.dictconfig import DictConfig
# SET PATHS
filepath = Path(__file__)
PROJECTPATH = os.getcwd()
@hydra.main(
config_path=str(filepath.parent),
config_name="trainer",
version_base=hydra.__version__,
)
def main(cfg: DictConfig) -> None:
model = BoringModel()
trainer = L.Trainer(
max_epochs=cfg.trainer.max_epochs,
limit_train_batches=cfg.trainer.limit_train_batches,
limit_predict_batches=cfg.trainer.limit_predict_batches,
limit_test_batches=cfg.trainer.limit_test_batches,
limit_val_batches=cfg.trainer.limit_val_batches,
accelerator=cfg.trainer.accelerator,
devices=cfg.trainer.devices,
deterministic=cfg.trainer.deterministic,
strategy=cfg.trainer.strategy,
precision=cfg.trainer.precision,
enable_model_summary=cfg.trainer.enable_model_summary,
enable_checkpointing=cfg.trainer.enable_checkpointing,
enable_progress_bar=cfg.trainer.enable_progress_bar,
# logger=logger,
# profiler=profiler,
# callbacks=callbacks,
# plugins=plugins,
default_root_dir=cfg.trainer.default_root_dir,
gradient_clip_val=cfg.trainer.gradient_clip_val,
gradient_clip_algorithm=cfg.trainer.gradient_clip_algorithm,
num_nodes=cfg.trainer.num_nodes,
num_processes=cfg.trainer.num_processes,
gpus=cfg.trainer.gpus,
auto_select_gpus=cfg.trainer.auto_select_gpus,
tpu_cores=cfg.trainer.tpu_cores,
ipus=cfg.trainer.ipus,
overfit_batches=cfg.trainer.overfit_batches,
track_grad_norm=cfg.trainer.track_grad_norm,
check_val_every_n_epoch=cfg.trainer.check_val_every_n_epoch,
fast_dev_run=cfg.trainer.fast_dev_run,
accumulate_grad_batches=cfg.trainer.accumulate_grad_batches,
min_epochs=cfg.trainer.min_epochs,
max_steps=cfg.trainer.max_steps,
min_steps=cfg.trainer.min_steps,
max_time=cfg.trainer.max_time,
val_check_interval=cfg.trainer.val_check_interval,
log_every_n_steps=cfg.trainer.log_every_n_steps,
sync_batchnorm=cfg.trainer.sync_batchnorm,
weights_save_path=cfg.trainer.weights_save_path,
num_sanity_val_steps=cfg.trainer.num_sanity_val_steps,
resume_from_checkpoint=cfg.trainer.resume_from_checkpoint,
benchmark=cfg.trainer.benchmark,
reload_dataloaders_every_n_epochs=cfg.trainer.reload_dataloaders_every_n_epochs,
auto_lr_find=cfg.trainer.auto_lr_find,
replace_sampler_ddp=cfg.trainer.replace_sampler_ddp,
detect_anomaly=cfg.trainer.detect_anomaly,
auto_scale_batch_size=cfg.trainer.auto_scale_batch_size,
amp_backend=cfg.trainer.amp_backend,
amp_level=cfg.trainer.amp_level,
move_metrics_to_cpu=cfg.trainer.move_metrics_to_cpu,
multiple_trainloader_mode=cfg.trainer.multiple_trainloader_mode,
)
trainer.fit(model=model)
if __name__ == "__main__":
main()
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/pipeline/__init__.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/pipeline/datamodule.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
import torch
from lightning.pytorch import LightningDataModule
from lightning.pytorch.loggers import Logger
from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from visionpod import conf
from visionpod.pipeline.dataset import PodDataset
filepath = Path(__file__)
PROJECTPATH = os.getcwd()
NUMWORKERS = int(multiprocessing.cpu_count() // 2)
class PodDataModule(LightningDataModule):
def __init__(
self,
dataset: Any = PodDataset,
data_dir: str = os.path.join(conf.DATASETPATH, "cache"),
split: bool = True,
train_size: float = 0.8,
num_workers: int = NUMWORKERS,
transforms: Callable = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
batch_size: int = 64,
):
super().__init__()
self.data_dir = data_dir
self.dataset = dataset
self.split = split
self.train_size = train_size
self.num_workers = num_workers
self.transforms = transforms
self.batch_size = batch_size
def prepare_data(self, logger: Optional[Logger] = None, log_preprocessing: bool = False) -> None:
self.dataset(self.data_dir, download=True)
def setup(self, stage: Union[str, None] = None) -> None:
if stage == "fit" or stage is None:
full_dataset = self.dataset(self.data_dir, train=True, transform=self.transforms)
train_size = int(len(full_dataset) * self.train_size)
test_size = len(full_dataset) - train_size
self.train_data, self.val_data = random_split(full_dataset, lengths=[train_size, test_size])
if stage == "test" or stage is None:
self.test_data = self.dataset(self.data_dir, train=False, transform=self.transforms)
def persist_splits(self):
"""saves all splits for reproducibility"""
torch.save(self.train_data, os.path.join(conf.SPLITSPATH, "train.pt"))
torch.save(self.val_data, os.path.join(conf.SPLITSPATH, "val.pt"))
if not hasattr(self, "test_data"):
self.test_data = self.dataset(self.data_dir, train=False, transform=self.transforms)
torch.save(self.test_data, os.path.join(conf.SPLITSPATH, "test.pt"))
def train_dataloader(self) -> TRAIN_DATALOADERS:
return DataLoader(self.train_data, num_workers=self.num_workers, batch_size=self.batch_size)
def test_dataloader(self) -> EVAL_DATALOADERS:
return DataLoader(self.test_data, num_workers=self.num_workers, batch_size=self.batch_size)
def val_dataloader(self) -> EVAL_DATALOADERS:
return DataLoader(self.val_data, num_workers=self.num_workers, batch_size=self.batch_size)
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/pipeline/dataset.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from torchvision.datasets import CIFAR10 as Dataset
class PodDataset(Dataset):
"""
Note:
see below for a basic example of a custom torch dataset
```python
import pandas as pd
import torch
from torch.utils.data import Dataset
class PodDataset(Dataset):
def __init__(self, features_path, labels_path):
self.features = pd.read_csv(features_path)
self.labels = pd.read_csv(labels_path)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
x, y = self.features.iloc[idx], self.labels.iloc[idx]
return torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
```
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
visionpod/pipeline/preprocess.py | Python | # Copyright Justin R. Goheen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# use this to create a pre-processing script if needed
| yurijmikhalevich/lightning-pod-vision | 0 | An End to End ML Product Example | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 | |
benchmarks/benchmark-cifar.py | Python | import os
from typing import Iterable, List, Tuple, cast
from PIL import Image
import numpy as np
from tqdm import tqdm
from benchmarks.config import DATASET_DIR, BATCH_SIZE
from rclip import model
from torchvision.datasets import CIFAR100
def main():
cifar100 = CIFAR100(root=os.path.join(DATASET_DIR, "cifar100"), download=True, train=False)
model_instance = model.Model()
class_description_vectors = model_instance.compute_text_features(cifar100.classes)
processed = 0
top1_match = 0
top5_match = 0
batch = []
def process_batch():
nonlocal processed, top1_match, top5_match, batch
images, target_classes = zip(*batch)
batch = []
image_features = model_instance.compute_image_features(cast(List[Image.Image], images))
similarities = image_features @ class_description_vectors.T
ordered_predicted_classes = np.argsort(similarities, axis=1)
target_classes_np = np.array(target_classes)
top1_match += np.sum(target_classes_np == ordered_predicted_classes[:, -1])
top5_match += np.sum(np.any(target_classes_np.reshape(-1, 1) == ordered_predicted_classes[:, -5:], axis=1))
processed += len(images)
for item in tqdm(cast(Iterable[Tuple[Image.Image, int]], cifar100)):
batch.append(item)
if len(batch) < BATCH_SIZE:
continue
process_batch()
if len(batch) > 0:
process_batch()
print(f"Processed: {processed}")
print(f"Top-1 accuracy: {top1_match / processed}")
print(f"Top-5 accuracy: {top5_match / processed}")
if __name__ == "__main__":
main()
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
benchmarks/benchmark-imagenet.py | Python | import os
import tempfile
import numpy as np
from tqdm import tqdm
from benchmarks.config import BATCH_SIZE, DATASET_DIR
from benchmarks.datasets.imagenet_1k.classes import IMAGENET2012_CLASSES # type: ignore
from rclip import model, db
from rclip.main import RClip
# To run this benchmark, clone imagenet-1k dataset from hf to `DATASET_DIR/imagenet_1k`
# https://huggingface.co/datasets/imagenet-1k/tree/main
# Then, untar train_images_X.tar.gz archives under `imagenet_1k/data/`
# TODO(yurij): make this script do that
# You may also need to increase the ulimit to avoid "Too many open files" error:
# `ulimit -n 1024`
def main(tmp_datadir: str):
TEST_IMAGE_PREFIX = os.path.join(DATASET_DIR, "imagenet_1k", "data")
model_instance = model.Model()
database = db.DB(os.path.join(tmp_datadir, "db.sqlite3"))
rclip = RClip(model_instance, database, BATCH_SIZE, None)
rclip.ensure_index(TEST_IMAGE_PREFIX)
ids_list, descriptions = zip(*IMAGENET2012_CLASSES.items())
class_description_vectors = model_instance.compute_text_features(
[f"photo of {description}" for description in descriptions]
)
ids = np.array(ids_list)
def get_image_class(filepath: str):
return filepath.split("/")[-1].split("_")[0]
processed = 0
top1_match = 0
top5_match = 0
batch = []
def process_batch():
nonlocal processed, top1_match, top5_match, batch
image_features = np.stack([np.frombuffer(image["vector"], np.float32) for image in batch])
similarities = image_features @ class_description_vectors.T
ordered_predicted_classes = np.argsort(similarities, axis=1)
target_classes = np.array([get_image_class(image["filepath"]) for image in batch])
top1_match += np.sum(target_classes == ids[ordered_predicted_classes[:, -1]])
top5_match += np.sum(np.any(target_classes.reshape(-1, 1) == ids[ordered_predicted_classes[:, -5:]], axis=1))
processed += len(batch)
batch = []
for image in tqdm(database.get_image_vectors_by_dir_path(TEST_IMAGE_PREFIX)):
batch.append(image)
if len(batch) < BATCH_SIZE:
continue
process_batch()
if len(batch) > 0:
process_batch()
print(f"Processed: {processed}")
print(f"Top-1 accuracy: {top1_match / processed}")
print(f"Top-5 accuracy: {top5_match / processed}")
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmp_dir:
print(f"Using temporary directory: {tmp_dir}")
main(tmp_dir)
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
benchmarks/config.py | Python | import os
DATASET_DIR = os.getenv("BENCHMARK_DATASET_DIR", os.path.join(os.path.dirname(__file__), "datasets"))
BATCH_SIZE = int(os.getenv("BENCHMARK_BATCH_SIZE", 256))
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
benchmarks/similarity_search.py | Python | import os
import tempfile
import numpy as np
from tqdm import tqdm
from benchmarks.config import BATCH_SIZE, DATASET_DIR
from benchmarks.datasets.imagenet_1k.classes import IMAGENET2012_CLASSES # type: ignore
from rclip import model, db
from rclip.main import RClip
# To run this benchmark, clone imagenet-1k dataset from hf to `DATASET_DIR/imagenet_1k`
# https://huggingface.co/datasets/imagenet-1k/tree/main
# Then, untar train_images_X.tar.gz archives under `imagenet_1k/data/`
# TODO(yurij): make this script do that
# You may also need to increase the ulimit to avoid "Too many open files" error:
# `ulimit -n 1024`
def main(tmp_datadir: str):
TEST_IMAGE_PREFIX = os.path.join(DATASET_DIR, "imagenet_1k", "data")
model_instance = model.Model("mps")
database = db.DB(os.path.join(tmp_datadir, "db.sqlite3"))
rclip = RClip(model_instance, database, BATCH_SIZE, None)
rclip.ensure_index(TEST_IMAGE_PREFIX)
def get_images_for_class(class_id: str, limit: int = 750):
return database._con.execute( # type: ignore
"""
SELECT filepath, vector FROM images WHERE filepath LIKE ? AND deleted IS NULL ORDER BY RANDOM() LIMIT ?
""",
(TEST_IMAGE_PREFIX + f"{os.path.sep}%{os.path.sep}{class_id}_%", limit),
)
def get_image_class(filepath: str):
return filepath.split("/")[-1].split("_")[0]
accuracies = []
for class_id in tqdm(IMAGENET2012_CLASSES.keys()):
images = get_images_for_class(class_id, limit=10)
for image in images:
results = rclip.search(image["filepath"], TEST_IMAGE_PREFIX, top_k=100)
top100_classes = [get_image_class(result.filepath) for result in results]
accuracies.append(np.mean(np.array(top100_classes) == class_id))
print(f"Accuracy: {np.mean(accuracies)}") # type: ignore
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmp_dir:
print(f"Using temporary directory: {tmp_dir}")
main(tmp_dir)
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/const.py | Python | import sys
IS_MACOS = sys.platform == "darwin"
IS_LINUX = sys.platform.startswith("linux")
IS_WINDOWS = sys.platform == "win32" or sys.platform == "cygwin"
# these images are always processed
IMAGE_EXT = ["jpg", "jpeg", "png", "webp", "heic"]
# RAW images are processed only if there is no processed image alongside it
IMAGE_RAW_EXT = ["arw", "cr2"]
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/db.py | Python | import os.path
import pathlib
import sqlite3
from typing import Any, Optional, TypedDict, Union
class ImageOmittable(TypedDict, total=False):
deleted: bool
class NewImage(ImageOmittable):
filepath: str
modified_at: float
size: int
vector: bytes
class Image(NewImage):
id: int
class DB:
VERSION = 2
def __init__(self, filename: Union[str, pathlib.Path]):
self._con = sqlite3.connect(filename)
self._con.row_factory = sqlite3.Row
self.ensure_tables()
self.ensure_version()
def close(self):
self._con.commit()
self._con.close()
def ensure_tables(self):
self._con.execute("""
CREATE TABLE IF NOT EXISTS images (
id INTEGER PRIMARY KEY,
deleted BOOLEAN,
filepath TEXT NOT NULL UNIQUE,
modified_at DATETIME NOT NULL,
size INTEGER NOT NULL,
vector BLOB NOT NULL
)
""")
# Query for images
self._con.execute("CREATE UNIQUE INDEX IF NOT EXISTS existing_images ON images(filepath) WHERE deleted IS NULL")
self._con.execute("CREATE TABLE IF NOT EXISTS db_version (version INTEGER)")
self._con.commit()
def ensure_version(self):
db_version_entry = self._con.execute("SELECT version FROM db_version").fetchone()
db_version = db_version_entry["version"] if db_version_entry else 1
if db_version == self.VERSION:
return
if db_version > self.VERSION:
raise Exception(
"found index version newer than this version of rclip can support;"
" please, update rclip: https://github.com/yurijmikhalevich/rclip/blob/main/README.md#installation",
)
if db_version < 2:
self._con.execute("ALTER TABLE images ADD COLUMN indexing BOOLEAN")
db_version = 2
if db_version < self.VERSION:
raise Exception("migration to a newer index version isn't implemented")
if db_version_entry:
self._con.execute("UPDATE db_version SET version=?", (self.VERSION,))
else:
self._con.execute("INSERT INTO db_version(version) VALUES (?)", (self.VERSION,))
self._con.commit()
def commit(self):
self._con.commit()
def upsert_image(self, image: NewImage, commit: bool = True):
self._con.execute(
"""
INSERT INTO images(deleted, indexing, filepath, modified_at, size, vector)
VALUES (:deleted, :indexing, :filepath, :modified_at, :size, :vector)
ON CONFLICT(filepath) DO UPDATE SET
deleted=:deleted, indexing=:indexing, modified_at=:modified_at, size=:size, vector=:vector
""",
{"deleted": None, "indexing": None, **image},
)
if commit:
self._con.commit()
def remove_indexing_flag_from_all_images(self, commit: bool = True):
self._con.execute("UPDATE images SET indexing = NULL")
if commit:
self._con.commit()
def flag_images_in_a_dir_as_indexing(self, path: str, commit: bool = True):
self._con.execute("UPDATE images SET indexing = 1 WHERE filepath LIKE ?", (path + f"{os.path.sep}%",))
if commit:
self._con.commit()
def flag_indexing_images_in_a_dir_as_deleted(self, path: str):
self._con.execute(
"UPDATE images SET deleted = 1, indexing = NULL WHERE filepath LIKE ? AND indexing = 1",
(path + f"{os.path.sep}%",),
)
self._con.commit()
def remove_indexing_flag(self, filepath: str, commit: bool = True):
self._con.execute("UPDATE images SET indexing = NULL WHERE filepath = ?", (filepath,))
if commit:
self._con.commit()
def get_image(self, **kwargs: Any) -> Optional[Image]:
query = " AND ".join(f"{key}=:{key}" for key in kwargs)
cur = self._con.execute(f"SELECT * FROM images WHERE {query} LIMIT 1", kwargs)
return cur.fetchone()
def get_image_vectors_by_dir_path(self, path: str) -> sqlite3.Cursor:
return self._con.execute(
"SELECT filepath, vector FROM images WHERE filepath LIKE ? AND deleted IS NULL", (path + f"{os.path.sep}%",)
)
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/fs.py | Python | import os
from typing import Callable, Pattern
COUNT_FILES_UPDATE_EVERY = 10_000
def count_files(
directory: str, exclude_dir_re: Pattern[str], file_re: Pattern[str], on_change: Callable[[int], None]
) -> None:
prev_update_count = 0
count = 0
for _ in walk(directory, exclude_dir_re, file_re):
count += 1
if count - prev_update_count >= COUNT_FILES_UPDATE_EVERY:
on_change(count)
prev_update_count = count
on_change(count)
def walk(
directory: str,
exclude_dir_re: Pattern[str],
file_re: Pattern[str],
):
"""Walks through a directory recursively and yields files that match the given regex"""
dirs_to_process = [directory]
while dirs_to_process:
dir = dirs_to_process.pop()
with os.scandir(dir) as it:
for entry in it:
if entry.is_dir():
if not exclude_dir_re.match(entry.path):
dirs_to_process.append(entry.path)
elif entry.is_file() and file_re.match(entry.name):
yield entry
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/main.py | Python | import itertools
import os
import re
import sys
import threading
from typing import Iterable, List, NamedTuple, Optional, Tuple, TypedDict, cast
import numpy as np
from tqdm import tqdm
import PIL
from PIL import Image, ImageFile
from pillow_heif import register_heif_opener
from rclip import db, fs, model
from rclip.const import IMAGE_EXT, IMAGE_RAW_EXT
from rclip.utils.preview import preview
from rclip.utils.snap import check_snap_permissions, is_snap, get_snap_permission_error
from rclip.utils import helpers
ImageFile.LOAD_TRUNCATED_IMAGES = True
register_heif_opener()
class ImageMeta(TypedDict):
modified_at: float
size: int
PathMetaVector = Tuple[str, ImageMeta, model.FeatureVector]
def get_image_meta(entry: os.DirEntry[str]) -> ImageMeta:
stat = entry.stat()
return ImageMeta(modified_at=stat.st_mtime, size=stat.st_size)
def is_image_meta_equal(image: db.Image, meta: ImageMeta) -> bool:
for key in meta:
if meta[key] != image[key]:
return False
return True
class RClip:
EXCLUDE_DIRS_DEFAULT = ["@eaDir", "node_modules", ".git"]
DB_IMAGES_BEFORE_COMMIT = 50_000
class SearchResult(NamedTuple):
filepath: str
score: float
def __init__(
self,
model_instance: model.Model,
database: db.DB,
indexing_batch_size: int,
exclude_dirs: Optional[List[str]],
enable_raw_support: bool = False,
):
self._model = model_instance
self._db = database
self._indexing_batch_size = indexing_batch_size
self._enable_raw_support = enable_raw_support
supported_image_ext = IMAGE_EXT + (IMAGE_RAW_EXT if enable_raw_support else [])
self._image_regex = re.compile(f"^.+\\.({'|'.join(supported_image_ext)})$", re.I)
excluded_dirs = "|".join(re.escape(dir) for dir in exclude_dirs or self.EXCLUDE_DIRS_DEFAULT)
self._exclude_dir_regex = re.compile(f"^.+\\{os.path.sep}({excluded_dirs})(\\{os.path.sep}.+)?$")
def _index_files(self, filepaths: List[str], metas: List[ImageMeta]):
images: List[Image.Image] = []
filtered_paths: List[str] = []
for path in filepaths:
try:
image = helpers.read_image(path)
images.append(image)
filtered_paths.append(path)
except PIL.UnidentifiedImageError:
pass
except Exception as ex:
print(f"error loading image {path}:", ex, file=sys.stderr)
try:
features = self._model.compute_image_features(images)
except Exception as ex:
print("error computing features:", ex, file=sys.stderr)
return
for path, meta, vector in cast(Iterable[PathMetaVector], zip(filtered_paths, metas, features)):
self._db.upsert_image(
db.NewImage(filepath=path, modified_at=meta["modified_at"], size=meta["size"], vector=vector.tobytes()),
commit=False,
)
def _does_processed_image_exist_for_raw(self, raw_path: str) -> bool:
"""Check if there is a processed image alongside the raw one; doesn't support mixed-case extensions,
e.g. it won't detect the .JpG image, but will detect .jpg or .JPG"""
image_path = os.path.splitext(raw_path)[0]
for ext in IMAGE_EXT:
if os.path.isfile(image_path + "." + ext):
return True
if os.path.isfile(image_path + "." + ext.upper()):
return True
return False
def ensure_index(self, directory: str):
print(
"checking images in the current directory for changes;"
' use "--no-indexing" to skip this if no images were added, changed, or removed',
file=sys.stderr,
)
self._db.remove_indexing_flag_from_all_images(commit=False)
self._db.flag_images_in_a_dir_as_indexing(directory, commit=True)
with tqdm(total=None, unit="images") as pbar:
def update_total_images(count: int):
pbar.total = count
pbar.refresh()
counter_thread = threading.Thread(
target=fs.count_files,
args=(directory, self._exclude_dir_regex, self._image_regex, update_total_images),
)
counter_thread.start()
images_processed = 0
batch: List[str] = []
metas: List[ImageMeta] = []
for entry in fs.walk(directory, self._exclude_dir_regex, self._image_regex):
filepath = entry.path
if self._enable_raw_support:
file_ext = helpers.get_file_extension(filepath)
if file_ext in IMAGE_RAW_EXT and self._does_processed_image_exist_for_raw(filepath):
images_processed += 1
pbar.update()
continue
try:
meta = get_image_meta(entry)
except Exception as ex:
print(f"error getting fs metadata for {filepath}:", ex, file=sys.stderr)
continue
if not images_processed % self.DB_IMAGES_BEFORE_COMMIT:
self._db.commit()
images_processed += 1
pbar.update()
image = self._db.get_image(filepath=filepath)
if image and is_image_meta_equal(image, meta):
self._db.remove_indexing_flag(filepath, commit=False)
continue
batch.append(filepath)
metas.append(meta)
if len(batch) >= self._indexing_batch_size:
self._index_files(batch, metas)
batch = []
metas = []
if len(batch) != 0:
self._index_files(batch, metas)
self._db.commit()
counter_thread.join()
self._db.flag_indexing_images_in_a_dir_as_deleted(directory)
print("", file=sys.stderr)
def search(
self,
query: str,
directory: str,
top_k: int = 10,
positive_queries: List[str] = [],
negative_queries: List[str] = [],
) -> List[SearchResult]:
filepaths, features = self._get_features(directory)
positive_queries = [query] + positive_queries
sorted_similarities = self._model.compute_similarities_to_text(features, positive_queries, negative_queries)
# exclude images that were part of the query from the results
exclude_files = [
os.path.abspath(query) for query in positive_queries + negative_queries if helpers.is_file_path(query)
]
filtered_similarities = filter(
lambda similarity: (
not self._exclude_dir_regex.match(filepaths[similarity[1]]) and filepaths[similarity[1]] not in exclude_files
),
sorted_similarities,
)
top_k_similarities = itertools.islice(filtered_similarities, top_k)
return [RClip.SearchResult(filepath=filepaths[th[1]], score=th[0]) for th in top_k_similarities]
def _get_features(self, directory: str) -> Tuple[List[str], model.FeatureVector]:
filepaths: List[str] = []
features: List[model.FeatureVector] = []
for image in self._db.get_image_vectors_by_dir_path(directory):
filepaths.append(image["filepath"])
features.append(np.frombuffer(image["vector"], np.float32))
if not filepaths:
return [], np.ndarray(shape=(0, model.Model.VECTOR_SIZE))
return filepaths, np.stack(features)
def init_rclip(
working_directory: str,
indexing_batch_size: int,
device: str = "cpu",
exclude_dir: Optional[List[str]] = None,
no_indexing: bool = False,
enable_raw_support: bool = False,
):
datadir = helpers.get_app_datadir()
db_path = datadir / "db.sqlite3"
database = db.DB(db_path)
model_instance = model.Model(device=device or "cpu")
rclip = RClip(
model_instance=model_instance,
database=database,
indexing_batch_size=indexing_batch_size,
exclude_dirs=exclude_dir,
enable_raw_support=enable_raw_support,
)
if not no_indexing:
try:
rclip.ensure_index(working_directory)
except PermissionError as e:
if is_snap() and e.filename is not None and os.path.islink(e.filename):
symlink_path = e.filename
realpath = os.path.realpath(e.filename)
print(f"\n{get_snap_permission_error(realpath, symlink_path, is_current_directory=False)}\n")
sys.exit(1)
raise
return rclip, model_instance, database
def print_results(result: List[RClip.SearchResult], args: helpers.argparse.Namespace):
# if we are not outputting to console on windows, ensure unicode encoding is correct
if not sys.stdout.isatty() and os.name == "nt":
sys.stdout.reconfigure(encoding="utf-8-sig")
if args.filepath_only:
for r in result:
print(r.filepath)
else:
print("score\tfilepath")
for r in result:
print(f'{r.score:.3f}\t"{r.filepath}"')
if args.preview:
preview(r.filepath, args.preview_height)
def main():
arg_parser = helpers.init_arg_parser()
args = arg_parser.parse_args()
current_directory = os.getcwd()
if is_snap():
check_snap_permissions(current_directory, is_current_directory=True)
rclip, _, db = init_rclip(
current_directory,
args.indexing_batch_size,
vars(args).get("device", "cpu"),
args.exclude_dir,
args.no_indexing,
args.experimental_raw_support,
)
try:
result = rclip.search(args.query, current_directory, args.top, args.add, args.subtract)
print_results(result, args)
except Exception as e:
raise e
finally:
db.close()
if __name__ == "__main__":
main()
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/model.py | Python | import re
from typing import List, Tuple, Optional, cast
import sys
import numpy as np
import numpy.typing as npt
from PIL import Image, UnidentifiedImageError
from rclip.utils import helpers
from importlib.metadata import version
import open_clip
QUERY_WITH_MULTIPLIER_RE = re.compile(r"^(?P<multiplier>(\d+(\.\d+)?|\.\d+|\d+\.)):(?P<query>.+)$")
QueryWithMultiplier = Tuple[float, str]
FeatureVector = npt.NDArray[np.float32]
TEXT_ONLY_SUPPORTED_MODELS = [
{
"model_name": "ViT-B-32-quickgelu",
"checkpoint_name": "openai",
}
]
def get_open_clip_version():
return version("open_clip_torch")
class Model:
VECTOR_SIZE = 512
_model_name = "ViT-B-32-quickgelu"
_checkpoint_name = "openai"
def __init__(self, device: str = "cpu"):
self._device = device
self._model_var = None
self._model_text_var = None
self._preprocess_var = None
self._tokenizer_var = None
self._text_model_path = helpers.get_app_datadir() / f"{self._model_name}_{self._checkpoint_name}_text.pth"
self._text_model_version_path = (
helpers.get_app_datadir() / f"{self._model_name}_{self._checkpoint_name}_text.version"
)
@property
def _tokenizer(self):
if not self._tokenizer_var:
self._tokenizer_var = open_clip.get_tokenizer(self._model_name)
return self._tokenizer_var
def _load_model(self):
self._model_var, _, self._preprocess_var = open_clip.create_model_and_transforms(
self._model_name,
pretrained=self._checkpoint_name,
device=self._device,
)
self._model_text_var = None
if {
"model_name": self._model_name,
"checkpoint_name": self._checkpoint_name,
} in TEXT_ONLY_SUPPORTED_MODELS and self._should_update_text_model():
import torch
model_text = self._get_text_model(cast(open_clip.CLIP, self._model_var))
torch.save(model_text, self._text_model_path)
with self._text_model_version_path.open("w") as f:
f.write(get_open_clip_version())
@staticmethod
def _get_text_model(model: open_clip.CLIP):
import copy
model_text = copy.deepcopy(model)
model_text.visual = None # type: ignore
return model_text
def _should_update_text_model(self):
if not self._text_model_path.exists():
return True
if not self._text_model_version_path.exists():
return True
with self._text_model_version_path.open("r") as f:
text_model_version = f.read().strip()
# to be safe, update the text model on open_clip update (which could update the base model)
return get_open_clip_version() != text_model_version
@property
def _model(self):
if not self._model_var:
self._load_model()
return cast(open_clip.CLIP, self._model_var)
@property
def _model_text(self):
if self._model_var:
return self._model_var
if self._model_text_var:
return self._model_text_var
if self._text_model_path.exists() and not self._should_update_text_model():
import torch
self._model_text_var = torch.load(self._text_model_path, weights_only=False)
return self._model_text_var
if not self._model_var:
self._load_model()
return cast(open_clip.CLIP, self._model_var)
@property
def _preprocess(self):
from torchvision.transforms import Compose
if not self._preprocess_var:
self._load_model()
return cast(Compose, self._preprocess_var)
def compute_image_features(self, images: List[Image.Image]) -> npt.NDArray[np.float32]:
import torch
images_preprocessed = torch.stack(cast(list[torch.Tensor], [self._preprocess(thumb) for thumb in images])).to(
self._device
)
with torch.no_grad():
image_features = self._model.encode_image(images_preprocessed)
image_features /= image_features.norm(dim=-1, keepdim=True)
return image_features.cpu().numpy()
def compute_text_features(self, text: List[str]) -> npt.NDArray[np.float32]:
import torch
with torch.no_grad():
text_features = self._model_text.encode_text(self._tokenizer(text).to(self._device))
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features.cpu().numpy()
@staticmethod
def _extract_query_multiplier(query: str) -> QueryWithMultiplier:
match = QUERY_WITH_MULTIPLIER_RE.match(query)
if not match:
return 1.0, query
multiplier = float(match.group("multiplier"))
query = match.group("query")
return multiplier, query
@staticmethod
def _group_queries_by_type(
queries: List[str],
) -> Tuple[List[QueryWithMultiplier], List[QueryWithMultiplier], List[QueryWithMultiplier]]:
phrase_queries: List[Tuple[float, str]] = []
local_file_queries: List[Tuple[float, str]] = []
url_queries: List[Tuple[float, str]] = []
for query in queries:
multiplier, query = Model._extract_query_multiplier(query)
if helpers.is_http_url(query):
url_queries.append((multiplier, query))
elif helpers.is_file_path(query):
local_file_queries.append((multiplier, query))
else:
phrase_queries.append((multiplier, query))
return phrase_queries, local_file_queries, url_queries
def compute_features_for_queries(self, queries: List[str]) -> FeatureVector:
text_features: Optional[FeatureVector] = None
image_features: Optional[FeatureVector] = None
phrases, files, urls = self._group_queries_by_type(queries)
# process images first to avoid loading BOTH full and text-only models
# if we need to process images, we will load the full model, and the text processing logic will use it, too
# if we don't need to process images, we will skip loading the full model, and the text processing
# logic will load the text-only model
if files or urls:
file_multipliers, file_paths = cast(Tuple[Tuple[float], Tuple[str]], zip(*(files))) if files else ((), ())
url_multipliers, url_paths = cast(Tuple[Tuple[float], Tuple[str]], zip(*(urls))) if urls else ((), ())
try:
images = [helpers.download_image(q) for q in url_paths] + [helpers.read_image(q) for q in file_paths]
except FileNotFoundError as e:
print(f'File "{e.filename}" not found. Check if you have typos in the filename.')
sys.exit(1)
except UnidentifiedImageError as e:
print(f'File "{e.filename}" is not an image. You can only use image files or text as queries.')
sys.exit(1)
image_multipliers = np.array(url_multipliers + file_multipliers)
image_features = np.add.reduce(self.compute_image_features(images) * image_multipliers.reshape(-1, 1))
if phrases:
phrase_multipliers, phrase_queries = cast(Tuple[Tuple[float], Tuple[str]], zip(*phrases))
phrase_multipliers_np = np.array(phrase_multipliers).reshape(-1, 1)
text_features = np.add.reduce(self.compute_text_features([*phrase_queries]) * phrase_multipliers_np)
if text_features is not None and image_features is not None:
return text_features + image_features
elif text_features is not None:
return text_features
elif image_features is not None:
return image_features
else:
return np.zeros(Model.VECTOR_SIZE, dtype=np.float32)
def compute_similarities_to_text(
self, item_features: FeatureVector, positive_queries: List[str], negative_queries: List[str]
) -> List[Tuple[float, int]]:
positive_features = self.compute_features_for_queries(positive_queries)
negative_features = self.compute_features_for_queries(negative_queries)
features = positive_features - negative_features
similarities = features @ item_features.T
sorted_similarities = sorted(zip(similarities, range(item_features.shape[0])), key=lambda x: x[0], reverse=True)
return sorted_similarities
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/utils/helpers.py | Python | import argparse
import os
import pathlib
import textwrap
from typing import IO, cast
from PIL import Image, UnidentifiedImageError
import re
import numpy as np
import rawpy
import requests
import sys
from importlib.metadata import version
from rclip.const import IMAGE_RAW_EXT, IS_LINUX, IS_MACOS, IS_WINDOWS
MAX_DOWNLOAD_SIZE_BYTES = 50_000_000
DOWNLOAD_TIMEOUT_SECONDS = 60
WIN_ABSOLUTE_FILE_PATH_REGEX = re.compile(r"^[a-z]:\\", re.I)
DEFAULT_TERMINAL_TEXT_WIDTH = 100
def __get_system_datadir() -> pathlib.Path:
"""
Returns a parent directory path
where persistent application data can be stored.
- linux: ~/.local/share
- macOS: ~/Library/Application Support
- windows: C:/Users/<USER>/AppData/Roaming
"""
home = pathlib.Path.home()
if IS_WINDOWS:
return home / "AppData/Roaming"
elif IS_LINUX:
return home / ".local/share"
elif IS_MACOS:
return home / "Library/Application Support"
raise NotImplementedError(f'"{sys.platform}" is not supported')
def get_app_datadir() -> pathlib.Path:
app_datadir = os.getenv("RCLIP_DATADIR")
if app_datadir:
app_datadir = pathlib.Path(app_datadir)
else:
app_datadir = __get_system_datadir() / "rclip"
os.makedirs(app_datadir, exist_ok=True)
return app_datadir
def positive_int_arg_type(arg: str) -> int:
arg_int = int(arg)
if arg_int < 1:
raise argparse.ArgumentTypeError("should be >0")
return arg_int
def get_terminal_text_width() -> int:
try:
computed_width = min(DEFAULT_TERMINAL_TEXT_WIDTH, os.get_terminal_size().columns - 2)
if computed_width < 20:
return DEFAULT_TERMINAL_TEXT_WIDTH
return computed_width
except OSError:
return DEFAULT_TERMINAL_TEXT_WIDTH
class HelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str, indent_increment: int = 2, max_help_position: int = 24) -> None:
text_width = get_terminal_text_width()
super().__init__(prog, indent_increment, max_help_position, width=text_width)
def init_arg_parser() -> argparse.ArgumentParser:
text_width = get_terminal_text_width()
parser = argparse.ArgumentParser(
formatter_class=HelpFormatter,
prefix_chars="-+",
description="rclip is an AI-powered command-line photo search tool",
epilog="hints:\n"
+ textwrap.fill(
'- relative file path should be prefixed with ./, e.g. "./cat.jpg", not "cat.jpg"',
initial_indent=" ",
subsequent_indent=" ",
width=text_width,
)
+ "\n"
+ textwrap.fill(
'- any query can be prefixed with a multiplier, e.g. "2:cat", "0.5:./cat-sleeps-on-a-chair.jpg";'
" adding a multiplier is especially useful when combining image and text queries because"
" image queries are usually weighted more than text ones",
initial_indent=" ",
subsequent_indent=" ",
width=text_width,
)
+ "\n\n"
"get help:\n"
" https://github.com/yurijmikhalevich/rclip/discussions/new/choose\n\n",
)
version_str = f"rclip {version('rclip')}"
parser.add_argument("--version", "-v", action="version", version=version_str, help=f'prints "{version_str}"')
parser.add_argument("query", help="a text query or a path/URL to an image file")
parser.add_argument(
"--add",
"-a",
"+",
metavar="QUERY",
action="append",
default=[],
help='a text query or a path/URL to an image file to add to the "original" query, can be used multiple times',
)
parser.add_argument(
"--subtract",
"--sub",
"-s",
"-",
metavar="QUERY",
action="append",
default=[],
help='a text query or a path/URL to an image file to subtract from the "original" query,'
" can be used multiple times",
)
parser.add_argument(
"--top", "-t", type=positive_int_arg_type, default=10, help="number of top results to display; default: 10"
)
display_mode_group = parser.add_mutually_exclusive_group()
display_mode_group.add_argument(
"--preview",
"-p",
action="store_true",
default=False,
help="preview results in the terminal (supported in iTerm2, Konsole 22.04+, wezterm, Mintty, mlterm)",
)
display_mode_group.add_argument(
"--filepath-only",
"-f",
action="store_true",
default=False,
help="outputs only filepaths",
)
parser.add_argument(
"--preview-height",
"-H",
metavar="PREVIEW_HEIGHT_PX",
action="store",
type=int,
default=400,
help="preview height in pixels; default: 400",
)
parser.add_argument(
"--no-indexing",
"--skip-index",
"--skip-indexing",
"-n",
action="store_true",
default=False,
help="allows to skip updating the index if no images were added, changed, or removed",
)
parser.add_argument(
"--indexing-batch-size",
"-b",
type=positive_int_arg_type,
default=8,
help="the size of the image batch used when updating the search index;"
" larger values may improve the indexing speed a bit on some hardware but will increase RAM usage; default: 8",
)
parser.add_argument(
"--exclude-dir",
action="append",
help="dir to exclude from search, can be used multiple times;"
' adding this argument overrides the default of ("@eaDir", "node_modules", ".git");'
" WARNING: the default will be removed in v2",
)
parser.add_argument(
"--experimental-raw-support",
action="store_true",
default=False,
help="enables support for RAW images (only ARW and CR2 are supported)",
)
if IS_MACOS:
if is_mps_available():
parser.add_argument(
"--device", "-d", default="mps", choices=["cpu", "mps"], help="device to run on; default: mps"
)
return parser
def is_mps_available() -> bool:
if not IS_MACOS:
return False
import torch.backends.mps
if not torch.backends.mps.is_available():
return False
try:
import torch
# on some systems, specifically in GHA
# torch.backends.mps.is_available() returns True, but using the mps backend fails
torch.ones(1, device="mps")
return True
except RuntimeError:
return False
# See: https://meta.wikimedia.org/wiki/User-Agent_policy
def download_image(url: str) -> Image.Image:
headers = {"User-agent": "rclip - (https://github.com/yurijmikhalevich/rclip)"}
check_size = requests.request("HEAD", url, headers=headers, timeout=60)
if length := check_size.headers.get("Content-Length"):
if int(length) > MAX_DOWNLOAD_SIZE_BYTES:
raise ValueError(f"Avoiding download of large ({length} byte) file.")
img = Image.open(
cast(IO[bytes], requests.get(url, headers=headers, stream=True, timeout=DOWNLOAD_TIMEOUT_SECONDS).raw)
)
return img
def get_file_extension(path: str) -> str:
return os.path.splitext(path)[1].lower()[1:]
def read_raw_image_file(path: str):
raw = rawpy.imread(path)
rgb = raw.postprocess()
return Image.fromarray(np.array(rgb))
def read_image(query: str) -> Image.Image:
path = str.removeprefix(query, "file://")
try:
file_ext = get_file_extension(path)
if file_ext in IMAGE_RAW_EXT:
image = read_raw_image_file(path)
else:
image = Image.open(path)
except UnidentifiedImageError as e:
# by default the filename on the UnidentifiedImageError is None
e.filename = path
raise e
return image
def is_http_url(path: str) -> bool:
return path.startswith("https://") or path.startswith("http://")
def is_file_path(path: str) -> bool:
return (
path.startswith("/")
or path.startswith("file://")
or path.startswith("./")
or WIN_ABSOLUTE_FILE_PATH_REGEX.match(path) is not None
)
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/utils/preview.py | Python | import base64
from io import BytesIO
import os
from PIL import Image
from rclip.utils.helpers import read_image
def _get_start_sequence():
term_env_var = os.getenv("TERM")
if term_env_var and (term_env_var.startswith("screen") or term_env_var.startswith("tmux")):
return "\033Ptmux;\033\033]"
return "\033]"
def _get_end_sequence():
term_env_var = os.getenv("TERM")
if term_env_var and (term_env_var.startswith("screen") or term_env_var.startswith("tmux")):
return "\a\033\\"
return "\a"
def preview(filepath: str, img_height_px: int):
with read_image(filepath) as img:
if img_height_px >= img.height:
width_px, height_px = img.width, img.height
else:
width_px, height_px = int(img_height_px * img.width / img.height), img_height_px
img = img.resize((width_px, height_px), Image.LANCZOS) # type: ignore
buffer = BytesIO()
img.convert("RGB").save(buffer, format="JPEG")
img_bytes = buffer.getvalue()
img_str = base64.b64encode(img_bytes).decode("utf-8")
print(
f"{_get_start_sequence()}1337;"
f"File=inline=1;size={len(img_bytes)};preserveAspectRatio=1;"
f"width={width_px}px;height={height_px}px:{img_str}{_get_end_sequence()}",
)
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rclip/utils/snap.py | Python | import os
import sys
def is_snap():
return bool(os.getenv("SNAP"))
def get_snap_permission_error(
directory: str,
symlink_path: str | None,
is_current_directory: bool = False,
) -> str:
homedir = os.getenv("SNAP_REAL_HOME")
if not homedir:
return (
"SNAP_REAL_HOME environment variable is not set."
" Please, report the issue to the rclip project on"
" GitHub https://github.com/yurijmikhalevich/rclip/issues."
)
directory_str = "the current directory" if is_current_directory else directory
if symlink_path and symlink_path != directory:
path_info = f"symlink {symlink_path} which points to {directory_str}"
else:
path_info = directory_str
if directory == homedir or directory.startswith(homedir + os.sep):
return (
f"rclip doesn't have access to {path_info}."
" You can resolve this issue by running:"
"\n\n\tsudo snap connect rclip:home\n\n"
"This command will grant rclip the necessary access to the home directory."
" Afterward, you can try again."
)
if directory == "/media" or directory.startswith("/media" + os.sep):
return (
f"rclip doesn't have access to {path_info}."
" You can resolve this issue by running:"
"\n\n\tsudo snap connect rclip:removable-media\n\n"
'This command will grant rclip the necessary access to the "/media" directory.'
" Afterward, you can try again."
)
return (
'rclip installed with snap cannot access files outside of the home or "/media" directories.'
' If you want to use rclip outside of home or "/media",'
" file an issue in the rclip project on GitHub https://github.com/yurijmikhalevich/rclip/issues,"
" describe your use case, and consider alternative rclip installation"
" options https://github.com/yurijmikhalevich/rclip#linux."
)
def check_snap_permissions(directory: str, is_current_directory: bool = False):
try:
any(os.scandir(directory))
except PermissionError:
symlink_path = None
realpath = directory
if os.path.islink(directory):
symlink_path = directory
realpath = os.path.realpath(directory)
print(get_snap_permission_error(realpath, symlink_path, is_current_directory))
sys.exit(1)
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
release-utils/appimage/appimage_after_bundle.sh | Shell | #!/usr/bin/env bash
set -e
PYTHONHOME=$APPDIR/usr \
PYTHONPATH=$APPDIR/usr/lib/python3/dist-packages:$APPDIR/usr/lib/python3.10 \
LD_LIBRARY_PATH=$APPDIR/usr/lib/x86_64-linux-gnu \
which python3.10 &&
python3.10 -m pip install poetry==1.8.4 &&
python3.10 -m pip install --upgrade --isolated --no-input --ignore-installed --prefix="$APPDIR/usr" certifi setuptools wheel &&
python3.10 -m poetry build &&
python3.10 -m poetry export --output requirements.txt &&
python3.10 -m pip install --extra-index-url https://download.pytorch.org/whl/cpu --upgrade --isolated --no-input --ignore-installed --prefix="$APPDIR/usr" -r requirements.txt &&
python3.10 -m pip install --no-dependencies --isolated --no-input --prefix="$APPDIR/usr" dist/*.whl
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
release-utils/homebrew/generate_formula.py | Python | import hashlib
from time import sleep
import jinja2
import poet
import requests
import sys
env = jinja2.Environment(trim_blocks=True)
TEMPLATE = env.from_string("""class Rclip < Formula
include Language::Python::Virtualenv
desc "AI-Powered Command-Line Photo Search Tool"
homepage "https://github.com/yurijmikhalevich/rclip"
url "{{ package.url }}"
sha256 "{{ package.checksum }}"
license "MIT"
if OS.linux?
depends_on "patchelf" => :build # for rawpy
end
depends_on "rust" => :build # for safetensors
depends_on "certifi"
depends_on "libheif"
depends_on "libyaml"
depends_on "numpy"
depends_on "pillow"
depends_on "python@3.12"
depends_on "pytorch-python312@2.5.1"
depends_on "sentencepiece"
depends_on "torchvision-python312@0.20.1"
{{ resources }}
if OS.mac?
if Hardware::CPU.arm?
resource "rawpy" do
url "https://files.pythonhosted.org/packages/87/75/610a34caf048aa87248f8393e70073610146f379fdda8194a988ba286d5b/rawpy-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", using: :nounzip
sha256 "1097b10eed4027e5b50006548190602e1adba9c824526b45f7a37781cfa01818"
end
elsif Hardware::CPU.intel?
resource "rawpy" do
url "https://files.pythonhosted.org/packages/27/1c/59024e87c20b325e10b43e3b709929681a0ed23bda3885c7825927244fcc/rawpy-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", using: :nounzip
sha256 "ed639b0dc91c3e85d6c39303a1523b7e1edc4f4b0381c376ed0ff99febb306e4"
end
else
raise "Unknown CPU architecture, only amd64 and arm64 are supported"
end
elsif OS.linux?
if Hardware::CPU.arm?
resource "rawpy" do
url "https://files.pythonhosted.org/packages/9c/c4/576853c0eea14d62a2776f683dae23c994572dfc2dcb47fd1a1473b7b18a/rawpy-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", using: :nounzip
sha256 "17a970fd8cdece57929d6e99ce64503f21b51c00ab132bad53065bd523154892"
end
elsif Hardware::CPU.intel?
resource "rawpy" do
url "https://files.pythonhosted.org/packages/fe/35/5d6765359ce6e06fe0aee5a3e4e731cfe08c056df093d97c292bdc02132a/rawpy-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", using: :nounzip
sha256 "a12fc4e6c5879b88c6937abb9f3f6670dd34d126b4a770ad4566e9f747e306fb"
end
else
raise "Unknown CPU architecture, only amd64 and arm64 are supported"
end
end
def install
# Fix for ZIP timestamp issue with files having dates before 1980
ENV["SOURCE_DATE_EPOCH"] = "315532800" # 1980-01-01
virtualenv_install_with_resources without: "rawpy"
resource("rawpy").stage do
wheel = Dir["*.whl"].first
valid_wheel = wheel.sub(/^.*--/, "")
File.rename(wheel, valid_wheel)
system "python3.12", "-m", "pip", "--python=#{libexec}/bin/python", "install", "--no-deps", valid_wheel
end
if OS.linux?
rawpy_so = Dir[libexec/"lib/python3.12/site-packages/rawpy/_rawpy*.so"].first
raise "rawpy shared object not found" unless rawpy_so
system "patchelf", "--set-rpath", "$ORIGIN/../rawpy.libs", rawpy_so
libraw_so = Dir[libexec/"lib/python3.12/site-packages/rawpy.libs/libraw*.so.*"].first
raise "libraw shared object not found" unless libraw_so
system "patchelf", "--set-rpath", "$ORIGIN", libraw_so
end
# link dependent virtualenvs to this one
site_packages = Language::Python.site_packages("python3.12")
paths = %w[pytorch-python312@2.5.1 torchvision-python312@0.20.1].map do |package_name|
package = Formula[package_name].opt_libexec
package/site_packages
end
(libexec/site_packages/"homebrew-deps.pth").write paths.join("\\n")
end
test do
output = shell_output("#{bin}/rclip cat")
assert_match("score\\tfilepath", output)
end
end
""") # noqa
# These deps are being installed from brew
DEPS_TO_IGNORE = ["numpy", "pillow", "certifi", "rawpy", "torch", "torchvision"]
RESOURCE_URL_OVERRIDES = {
# open-clip-torch publishes an incomplete tarball to pypi, so we will fetch one from GitHub
"open-clip-torch": env.from_string(
"https://github.com/mlfoundations/open_clip/archive/refs/tags/v{{ version }}.tar.gz"
),
}
def main():
if len(sys.argv) != 2:
print("Usage: generate_formula.py <version>")
sys.exit(1)
target_version = sys.argv[1]
deps = get_deps_for_requested_rclip_version_or_die(target_version)
for dep in DEPS_TO_IGNORE:
deps.pop(dep, None)
for dep, url in RESOURCE_URL_OVERRIDES.items():
new_url = url.render(version=deps[dep]["version"])
deps[dep]["url"] = new_url
deps[dep]["checksum"] = compute_checksum(new_url)
for _, dep in deps.items():
dep["name"] = dep["name"].lower()
rclip_metadata = deps.pop("rclip")
resources = "\n\n".join([poet.RESOURCE_TEMPLATE.render(resource=dep) for dep in deps.values()])
print(TEMPLATE.render(package=rclip_metadata, resources=resources))
def compute_checksum(url: str):
response = requests.get(url)
return hashlib.sha256(response.content).hexdigest()
def get_deps_for_requested_rclip_version_or_die(target_version: str):
deps = poet.make_graph("rclip")
rclip_metadata = deps["rclip"]
target_tarball = f"rclip-{target_version}.tar.gz"
# it takes a few seconds for a published wheel appear in PyPI
retries_left = 5
while not rclip_metadata["url"].endswith(target_tarball):
if retries_left == 0:
print(f"Version mismatch: {rclip_metadata['version']} != {target_version}. Exiting.", file=sys.stderr)
sys.exit(1)
retries_left -= 1
print(
f"Version mismatch: {rclip_metadata['url'].split('/')[-1]} != {target_tarball}. Retrying in 10 seconds.",
file=sys.stderr,
)
# it takes a few seconds for a published wheel appear in PyPI
sleep(10)
deps = poet.make_graph("rclip")
rclip_metadata = deps["rclip"]
return deps
if __name__ == "__main__":
main()
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
release-utils/homebrew/release.sh | Shell | #!/bin/bash
set -e
# the script requires gh cli and git to be installed and configured
# and push permissions to https://github.com/yurijmikhalevich/homebrew-tap
ORIG_PWD=$(pwd)
VERSION=$(poetry version -s)
TMP_DIR=$(mktemp -d -t release-rclip-brew-XXXXXXXXXX)
cd $TMP_DIR
echo "Working in $TMP_DIR"
function handle_exit() {
cd "$ORIG_PWD"
rm -rf $TMP_DIR
echo "Removed $TMP_DIR"
}
trap handle_exit 0 SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM
if [[ "$GITHUB_ACTIONS" ]]; then
git clone "https://$GITHUB_TOKEN@github.com/yurijmikhalevich/homebrew-tap.git" homebrew-tap
else
git clone git@github.com:yurijmikhalevich/homebrew-tap.git homebrew-tap
fi
cd homebrew-tap
PR_BRANCH="release-rclip-$VERSION"
PR_TITLE="rclip $VERSION"
git checkout -b "$PR_BRANCH"
python "$ORIG_PWD/release-utils/homebrew/generate_formula.py" ${VERSION} > Formula/rclip.rb
git commit -am "$PR_TITLE"
git push origin "$PR_BRANCH"
gh pr create --title "$PR_TITLE" --body "Automated commit updating **rclip** formula to $VERSION" --base main --head "$PR_BRANCH"
# it takes a few seconds for GHA to start checks on the PR
sleep 20
gh pr checks "$PR_BRANCH" --watch --fail-fast
gh pr edit "$PR_BRANCH" --add-label pr-pull
# it takes a few seconds for GHA to start checks on the PR
sleep 20
gh pr checks "$PR_BRANCH" --watch --fail-fast
# assert that PR_STATE was closed as it should
PR_STATE=$(gh pr view "$PR_BRANCH" --json state -q .state)
if [ "$PR_STATE" != "CLOSED" ]; then
echo "PR \"$PR_TITLE\" is not closed"
exit 1
fi
echo "Released rclip $VERSION"
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
release-utils/windows/build-msi.ps1 | PowerShell | param (
[string]$Version
)
if (-not $Version) {
throw "Version is not specified"
}
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
$currentDir = Get-Location
$templatePath = Join-Path -Path $currentDir -ChildPath "release-utils\windows\template.aip"
$advinst = New-Object -ComObject AdvancedInstaller
$project = $advinst.LoadProject($templatePath)
$project.ProductDetails.Version = $Version
$project.ProductDetails.UpgradeCode.UpgradeCode = "{7C6C2996-8E43-4D30-8D67-1A347DCFEEBF}"
$project.InstallParameters.PackageType = "32bit"
$buildPath = Join-Path -Path $currentDir -ChildPath "dist\rclip"
$project.FilesComponent.AddFolderContentS("appdir", $buildPath)
$pathEnvVar = $project.Environment.NewVariable("PATH", "[APPDIR]")
$pathEnvVar.InstallOperationType = "CreateOrUpdate"
$pathEnvVar.RemoveOnUninstall = $true
$pathEnvVar.IsSystemVariable = $false
$pathEnvVar.UpdateOperationType = "Append"
$pathEnvVar.Separator = ";"
$msiBuildPath = Join-Path -Path $currentDir -ChildPath "build-msi"
if (-not (Test-Path -Path $msiBuildPath -PathType Container)) {
New-Item -Path $msiBuildPath -ItemType Directory
}
$projectFile = Join-Path -Path $msiBuildPath -ChildPath "rclip.aip"
$project.SaveAs($projectFile)
$project.Build()
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
tests/e2e/test_rclip.py | Python | from pathlib import Path
import os
import subprocess
import sys
import tempfile
import pytest
def set_argv(*args: str):
script_name = sys.argv[0]
sys.argv.clear()
sys.argv.append(script_name)
sys.argv.extend(args)
@pytest.fixture
def test_images_dir():
return Path(__file__).parent / "images"
@pytest.fixture
def test_empty_dir():
return Path(__file__).parent / "empty_directory"
@pytest.fixture
def test_dir_with_nested_directories():
return Path(__file__).parent / "images nested directories"
@pytest.fixture
def test_dir_with_raw_images():
return Path(__file__).parent / "images raw"
@pytest.fixture
def test_dir_with_unicode_filenames():
return Path(__file__).parent / "images unicode"
def _assert_output_snapshot(
images_dir: Path, request: pytest.FixtureRequest, capfd: pytest.CaptureFixture[str], encoding: str | None = None
):
out, _ = capfd.readouterr()
snapshot_path = Path(__file__).parent / "output_snapshots" / f"{request.node.name}.txt"
snapshot = (
out.replace(str(images_dir) + os.path.sep, "<test_images_dir>")
.replace("./", "<test_images_dir>")
.replace("." + os.path.sep, "<test_images_dir>")
.replace(os.path.sep, "/")
.replace("\r\n", "\n")
# Stripping the BOM marker we are adding on Windows systems when the output is being piped to a file.
# Otherwise, the output won't be encoded correctly.
).lstrip("\ufeff")
if not snapshot_path.exists():
snapshot_path.write_text(snapshot)
assert snapshot == snapshot_path.read_text(encoding=encoding)
@pytest.fixture
def assert_output_snapshot(test_images_dir: Path, request: pytest.FixtureRequest, capfd: pytest.CaptureFixture[str]):
yield
_assert_output_snapshot(test_images_dir, request, capfd)
@pytest.fixture
def assert_output_snapshot_nested_directories(
test_dir_with_nested_directories: Path,
request: pytest.FixtureRequest,
capfd: pytest.CaptureFixture[str],
):
yield
_assert_output_snapshot(test_dir_with_nested_directories, request, capfd)
@pytest.fixture
def assert_output_snapshot_raw_images(
test_dir_with_raw_images: Path,
request: pytest.FixtureRequest,
capfd: pytest.CaptureFixture[str],
):
yield
_assert_output_snapshot(test_dir_with_raw_images, request, capfd)
@pytest.fixture
def assert_output_snapshot_unicode_filepaths(
test_dir_with_unicode_filenames: Path, request: pytest.FixtureRequest, capfd: pytest.CaptureFixture[str]
):
yield
_assert_output_snapshot(test_dir_with_unicode_filenames, request, capfd, "utf-8-sig")
def execute_query(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch, *args: str):
with tempfile.TemporaryDirectory() as tmpdirname:
run_system_rclip = os.getenv("RCLIP_TEST_RUN_SYSTEM_RCLIP")
if run_system_rclip:
completed_run = subprocess.run(
["rclip", *args],
cwd=test_images_dir,
env={**os.environ, "RCLIP_DATADIR": tmpdirname, "RCLIP_TEST_RUN_SYSTEM_RCLIP": ""},
)
if completed_run.returncode != 0:
raise SystemExit(completed_run.returncode)
else:
from rclip.main import main
monkeypatch.setenv("RCLIP_DATADIR", tmpdirname)
monkeypatch.chdir(test_images_dir)
set_argv(*args)
main()
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "kitty")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_webp(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
# this test result snapshot should contain a webp image
execute_query(test_images_dir, monkeypatch, "tree")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_png(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
# this test result snapshot should contain a png image
execute_query(test_images_dir, monkeypatch, "boats on a lake")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_heic(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
# this test result snapshot should contain a heic image
execute_query(test_images_dir, monkeypatch, "bee")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_repeated_searches_should_be_the_same(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "boats on a lake")
execute_query(test_images_dir, monkeypatch, "boats on a lake")
execute_query(test_images_dir, monkeypatch, "boats on a lake")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_by_image(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, str(test_images_dir / "cat.jpg"))
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_by_image_from_url(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(
test_images_dir,
monkeypatch,
"https://raw.githubusercontent.com/yurijmikhalevich/rclip/5630d6279ee94f0cad823777433d7fbeb921d19e/tests/e2e/images/cat.jpg", # noqa
)
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_by_non_existing_file(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
with pytest.raises(SystemExit):
execute_query(test_images_dir, monkeypatch, "./non-existing-file.jpg")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_by_not_an_image(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
with pytest.raises(SystemExit):
execute_query(test_images_dir, monkeypatch, str(test_images_dir / "not-an-image.txt"))
@pytest.mark.usefixtures("assert_output_snapshot")
def test_add_queries(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "kitty", "--add", "puppy", "-a", "roof", "+", "fence")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_subtract_queries(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "kitty", "--subtract", "puppy", "-s", "roof", "-", "fence")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_add_and_subtract_queries(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "kitty", "+", "roof", "-", "fence")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_query_multipliers(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "kitty", "+", "2:night", "-", "0.5:fence")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_combine_text_query_with_image_query(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, str(test_images_dir / "cat.jpg"), "-", "3:cat", "+", "2:bee")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_combine_image_query_with_text_query(test_images_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_images_dir, monkeypatch, "kitty", "-", str(test_images_dir / "cat.jpg"), "+", "1.5:bee")
@pytest.mark.usefixtures("assert_output_snapshot")
def test_search_empty_dir(test_empty_dir: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_empty_dir, monkeypatch, "kitty")
@pytest.mark.usefixtures("assert_output_snapshot_nested_directories")
def test_search_dir_with_multiple_nested_directories(
test_dir_with_nested_directories: Path,
monkeypatch: pytest.MonkeyPatch,
):
execute_query(test_dir_with_nested_directories, monkeypatch, "kitty")
@pytest.mark.usefixtures("assert_output_snapshot_nested_directories")
def test_search_dir_with_deeply_nested_directories(
test_dir_with_nested_directories: Path,
monkeypatch: pytest.MonkeyPatch,
):
# output should contain a nested path to the bee image
execute_query(test_dir_with_nested_directories, monkeypatch, "bee")
@pytest.mark.usefixtures("assert_output_snapshot_nested_directories")
def test_handles_addition_and_deletion_of_images(
test_dir_with_nested_directories: Path,
monkeypatch: pytest.MonkeyPatch,
):
execute_query(test_dir_with_nested_directories, monkeypatch, "bee")
bee_image_path = test_dir_with_nested_directories / "misc" / "bees" / "bee.jpg"
assert bee_image_path.exists()
bee_image_path_copy = bee_image_path.with_name("bee_copy.jpg")
try:
# copy bee image
bee_image_path_copy.write_bytes(bee_image_path.read_bytes())
# should include bee image copy in the output snapshot
execute_query(test_dir_with_nested_directories, monkeypatch, "bee")
# delete bee image copy
bee_image_path_copy.unlink()
# should not include bee image copy in the output snapshot
execute_query(test_dir_with_nested_directories, monkeypatch, "bee")
finally:
bee_image_path_copy.unlink(missing_ok=True)
@pytest.mark.usefixtures("assert_output_snapshot_raw_images")
def test_ignores_raw_files_if_raw_support_is_disabled(
test_dir_with_raw_images: Path,
monkeypatch: pytest.MonkeyPatch,
):
# output should not contain any raw images
execute_query(test_dir_with_raw_images, monkeypatch, "boat on a lake")
@pytest.mark.usefixtures("assert_output_snapshot_raw_images")
def test_ignores_raw_if_there_is_a_png_named_the_same_way_in_the_same_dir(
test_dir_with_raw_images: Path,
monkeypatch: pytest.MonkeyPatch,
):
# output should not contain "boat on a lake.ARW" image
execute_query(test_dir_with_raw_images, monkeypatch, "--experimental-raw-support", "boat on a lake")
@pytest.mark.usefixtures("assert_output_snapshot_raw_images")
def test_can_read_arw_images(
test_dir_with_raw_images: Path,
monkeypatch: pytest.MonkeyPatch,
):
# DSC08882.ARW should be at the top of the results
execute_query(test_dir_with_raw_images, monkeypatch, "--experimental-raw-support", "green ears of rye")
@pytest.mark.usefixtures("assert_output_snapshot_raw_images")
def test_can_read_cr2_images(
test_dir_with_raw_images: Path,
monkeypatch: pytest.MonkeyPatch,
):
# RAW_CANON_400D_ARGB.CR2 should be at the top of the results
execute_query(test_dir_with_raw_images, monkeypatch, "--experimental-raw-support", "dragon in a cave")
@pytest.mark.usefixtures("assert_output_snapshot_unicode_filepaths")
def test_unicode_filepaths(test_dir_with_unicode_filenames: Path, monkeypatch: pytest.MonkeyPatch):
execute_query(test_dir_with_unicode_filenames, monkeypatch, "鳥")
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
tests/unit/test_model.py | Python | import tempfile
from typing import List, cast
import open_clip
import pytest
import torch
from rclip.model import Model
def test_extract_query_multiplier():
assert Model._extract_query_multiplier("1.5:cat") == (1.5, "cat") # type: ignore
assert Model._extract_query_multiplier("cat") == (1.0, "cat") # type: ignore
assert Model._extract_query_multiplier("1:cat") == (1.0, "cat") # type: ignore
assert Model._extract_query_multiplier("0.5:cat") == (0.5, "cat") # type: ignore
assert Model._extract_query_multiplier(".5:cat") == (0.5, "cat") # type: ignore
assert Model._extract_query_multiplier("1.:cat") == (1.0, "cat") # type: ignore
assert Model._extract_query_multiplier("1..:cat") == (1.0, "1..:cat") # type: ignore
assert Model._extract_query_multiplier("..:cat") == (1.0, "..:cat") # type: ignore
assert Model._extract_query_multiplier("whatever:cat") == (1.0, "whatever:cat") # type: ignore
assert (
Model._extract_query_multiplier("1.5:complex and long query") # type: ignore
== (1.5, "complex and long query")
)
def test_text_model_produces_the_same_vector_as_the_main_model(monkeypatch: pytest.MonkeyPatch):
with tempfile.TemporaryDirectory() as tmpdirname:
monkeypatch.setenv("RCLIP_DATADIR", tmpdirname)
model = Model()
assert model._model_var is None # type: ignore
assert model._model_text_var is None # type: ignore
model._load_model() # type: ignore
assert model._model_var is not None # type: ignore
assert model._model_var.transformer is not None # type: ignore
assert model._model_var.visual is not None # type: ignore
assert model._model_text_var is None # type: ignore
text_model = model._get_text_model(model._model_var) # type: ignore
assert text_model.transformer is not None # type: ignore
assert text_model.visual is None # type: ignore
full_model = model._model_var # type: ignore
assert full_model.visual is not None # type: ignore
def encode_text(clip_model: open_clip.CLIP, text: List[str]):
return clip_model.encode_text(model._tokenizer(text).to(model._device)) # type: ignore
assert torch.equal(encode_text(full_model, ["cat"]), encode_text(text_model, ["cat"]))
assert torch.equal(encode_text(full_model, ["cat", "dog"]), encode_text(text_model, ["cat", "dog"]))
assert torch.equal(encode_text(full_model, ["cat", "dog", "bird"]), encode_text(text_model, ["cat", "dog", "bird"]))
def test_loads_text_model_when_text_processing_only_requested_and_checkpoint_exists(monkeypatch: pytest.MonkeyPatch):
with tempfile.TemporaryDirectory() as tmpdirname:
monkeypatch.setenv("RCLIP_DATADIR", tmpdirname)
model1 = Model()
assert model1._model_var is None # type: ignore
assert model1._model_text_var is None # type: ignore
full_model = cast(open_clip.CLIP, model1._model) # type: ignore
assert model1._model_var is not None # type: ignore
assert model1._model_var.transformer is not None # type: ignore
assert model1._model_var.visual is not None # type: ignore
assert model1._model_text_var is None # type: ignore
model2 = Model()
assert model2._model_var is None # type: ignore
assert model2._model_text_var is None # type: ignore
text_model = cast(open_clip.CLIP, model2._model_text) # type: ignore
assert model2._model_var is None # type: ignore
assert model2._model_text_var is not None # type: ignore
assert model2._model_text_var.transformer is not None # type: ignore
assert model2._model_text_var.visual is None # type: ignore
assert model2._model_text_var == text_model # type: ignore
def encode_text(clip_model: open_clip.CLIP, text: List[str]):
return clip_model.encode_text(model1._tokenizer(text).to(model1._device)) # type: ignore
assert torch.equal(encode_text(full_model, ["cat"]), encode_text(text_model, ["cat"]))
assert torch.equal(encode_text(full_model, ["cat", "dog"]), encode_text(text_model, ["cat", "dog"]))
assert torch.equal(encode_text(full_model, ["cat", "dog", "bird"]), encode_text(text_model, ["cat", "dog", "bird"]))
def test_loads_full_model_when_text_processing_only_requested_and_checkpoint_doesnt_exist(
monkeypatch: pytest.MonkeyPatch,
):
with tempfile.TemporaryDirectory() as tmpdirname:
monkeypatch.setenv("RCLIP_DATADIR", tmpdirname)
model = Model()
assert model._model_var is None # type: ignore
assert model._model_text_var is None # type: ignore
_ = model._model_text # type: ignore
assert model._model_var is not None # type: ignore
assert model._model_var.transformer is not None # type: ignore
assert model._model_var.visual is not None # type: ignore
assert model._model_text_var is None # type: ignore
| yurijmikhalevich/rclip | 900 | AI-Powered Command-Line Photo Search Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
benchmarks/objectnet.py | Python | # This is an image tagging benchmark run against the ObjectNet dataset https://objectnet.dev/
# The ObjectNet dataset is used because it was collected to intentionally show objects from
# new viewpoints on new backgrounds. And the results obtained on the ObjectNet dataset are
# more representative of the performance you can expect in the real world.
#
# You may need to increase the ulimit to avoid "Too many open files" error:
# `ulimit -n 1024`
#
# You may also presize the images to speed up the benchmark via (requires ImageMagick):
# `find . -name "*.png" -exec mogrify -resize 224x224\^ {} \;`
import os
from rclip.main import init_rclip
from tqdm import tqdm
import numpy as np
import tempfile
import json
DATASET_DIR = os.getenv('DATASET_DIR', os.path.join(os.path.dirname(__file__), 'datasets', 'objectnet-1.0'))
BATCH_SIZE = int(os.getenv('BATCH_SIZE', 256))
DEVICE = os.getenv('DEVICE', 'cpu')
def main(tmp_datadir: str):
if not os.path.isdir(DATASET_DIR):
raise FileNotFoundError(f'ObjectNet dataset not found at {DATASET_DIR}')
print(f'Using dataset: {DATASET_DIR}')
print(f'Batch size: {BATCH_SIZE}')
print(f'Device: {DEVICE}')
os.environ['RCLIP_DATADIR'] = tmp_datadir
_, model, rclipDB = init_rclip(DATASET_DIR, BATCH_SIZE, DEVICE)
# load objectnet directory-to-tag map from folder_to_objectnet_label.json
with open(os.path.join(DATASET_DIR, 'mappings', 'folder_to_objectnet_label.json')) as f:
directory_to_tag = json.load(f)
def get_tag_from_image_path(image_path: str) -> str:
return directory_to_tag[os.path.basename(os.path.dirname(image_path))]
tag_list = list(directory_to_tag.values())
tags = np.array(tag_list)
# generate features for the tags
# tag_features = model.compute_text_features(tag_list)
tag_features = model.compute_text_features([f'photo of {tag}' for tag in tags])
top1_match = 0
top5_match = 0
processed = 0
batch = []
def process_batch():
nonlocal processed, top1_match, top5_match, batch
image_features = np.stack([np.frombuffer(image['vector'], np.float32) for image in batch])
similarities = image_features @ tag_features.T
ordered_similarities = np.argsort(similarities, axis=1)
target_classes = np.array([get_tag_from_image_path(image['filepath']) for image in batch])
top1_match += np.sum(target_classes == tags[ordered_similarities[:, -1]])
top5_match += np.sum(np.any(target_classes.reshape(-1, 1) == tags[ordered_similarities[:, -5:]], axis=1))
processed += len(batch)
batch = []
for image in tqdm(rclipDB.get_image_vectors_by_dir_path(DATASET_DIR), unit='images'):
batch.append(image)
if len(batch) < BATCH_SIZE:
continue
process_batch()
if len(batch) > 0:
process_batch()
print(f'Processed: {processed}')
print(f'Top-1 accuracy: {top1_match / processed}')
print(f'Top-5 accuracy: {top5_match / processed}')
if __name__ == '__main__':
with tempfile.TemporaryDirectory() as tmp_dir:
print(f'Using temporary directory: {tmp_dir}')
main(tmp_dir)
| yurijmikhalevich/rtag | 12 | AI-Powered Command-Line Photo Tagging Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rtag/main.py | Python | import os
from rtag import utils
from rclip.main import init_rclip
from iptcinfo3 import IPTCInfo
import numpy as np
import textwrap
import sys
from tqdm import tqdm
def get_imagenet_tags_filepath():
return os.path.join(os.path.dirname(__file__), 'data', 'imagenet-labels.txt')
def load_tags_from_file(path: str):
with open(path, 'r') as f:
return f.read().splitlines()
def main():
arg_parser = utils.init_arg_parser()
args = arg_parser.parse_args()
if not args.dry_run and not args.yes:
print(
textwrap.fill(
'NOTICE: rtag is under active development. Expect bugs and changes.'
' It is recommended for you to have a backup of your images before running rtag.',
width=utils.get_terminal_text_width(),
) +
'\n\n' +
textwrap.fill(
'rtag is about to write new tags to the metadata of the images in the curent directory.'
' This operation:',
width=utils.get_terminal_text_width(),
) +
'\n'
' - is irreversible\n'
' - will modify the image files\n\n'
'Continue? [y/n]',
)
if input().lower() != 'y':
sys.exit(10)
current_directory = os.getcwd()
_, model, rclipDB = init_rclip(
current_directory,
args.indexing_batch_size,
vars(args).get('device', 'cpu'),
args.exclude_dir,
args.no_indexing,
)
# load tags
tags_filepath = args.tags_filepath or get_imagenet_tags_filepath()
tags = load_tags_from_file(tags_filepath)
# generate features for the tags
tag_features = model.compute_text_features(tags)
# loop over the images
print('tagging images')
for image in tqdm(rclipDB.get_image_vectors_by_dir_path(current_directory), unit='images'):
image_path = image['filepath']
image_features = np.frombuffer(image['vector'], np.float32)
similarities = image_features @ tag_features.T
if args.dry_run:
print(f'\n{image_path}')
new_tags = []
for tag, similarity in zip(tags, similarities):
if similarity > args.threshold:
if args.dry_run:
print(f'- {tag}: {similarity:.3f}')
new_tags.append(tag)
if not new_tags or args.dry_run:
continue
image_metadata = IPTCInfo(image_path, force=True)
if args.mode == 'append':
existing_tags = image_metadata['keywords']
image_metadata['keywords'] = [*set(existing_tags + new_tags)]
elif args.mode == 'overwrite':
image_metadata['keywords'] = new_tags
else:
raise ValueError(f'Invalid mode: {args.mode}')
image_metadata.save()
if __name__ == '__main__':
main()
| yurijmikhalevich/rtag | 12 | AI-Powered Command-Line Photo Tagging Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
rtag/utils.py | Python | import argparse
import os
from importlib.metadata import version
from rclip.const import IS_MACOS
from rclip.utils.helpers import is_mps_available
def positive_int_arg_type(arg: str) -> int:
arg_int = int(arg)
if arg_int < 1:
raise argparse.ArgumentTypeError('should be >0')
return arg_int
def get_terminal_text_width() -> int:
try:
return min(100, os.get_terminal_size().columns - 2)
except OSError:
return 100
class HelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str, indent_increment: int = 2, max_help_position: int = 24) -> None:
text_width = get_terminal_text_width()
super().__init__(prog, indent_increment, max_help_position, width=text_width)
def init_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
formatter_class=HelpFormatter,
prefix_chars='-+',
description='rtag is an AI-powered command-line photo tagging tool',
epilog='if you like rtag, checkout its sister project rclip - an AI-powered command-line photo search tool:\n' +
' https://github.com/yurijmikhalevich/rclip\n\n'
'get help:\n'
' https://github.com/yurijmikhalevich/rtag/discussions/new/choose\n\n',
)
version_str = f'rtag {version("rtag")}'
parser.add_argument('--version', '-v', action='version', version=version_str, help=f'prints "{version_str}"')
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='do not write any changes into the images; print them to the console instead',
)
parser.add_argument('--tags-filepath', help='path to the file with the newline-separated tags; default: imagenet-1k tags')
parser.add_argument('--yes', '-y', action='store_true', default=False, help='do not ask for confirmation')
parser.add_argument('--mode', '-m', choices=['append', 'overwrite'], default='append', help='default: append')
parser.add_argument(
'--threshold',
'-t',
type=float,
default=0.25,
help='tag confidence threshold; all tags with the confidence lower than this value will be ignored; default: 0.25',
)
parser.add_argument(
'--no-indexing', '--skip-index', '--skip-indexing', '-n',
action='store_true',
default=False,
help='allows to skip updating the index if no images were added, changed, or removed'
)
parser.add_argument(
'--indexing-batch-size', '-b', type=positive_int_arg_type, default=8,
help='the size of the image batch used when updating the search index;'
' larger values may improve the indexing speed a bit on some hardware but will increase RAM usage; default: 8',
)
parser.add_argument(
'--exclude-dir',
action='append',
help='dir to exclude from search, can be used multiple times;'
' adding this argument overrides the default of ("@eaDir", "node_modules", ".git");'
' WARNING: the default will be removed in v2'
)
if IS_MACOS:
if is_mps_available():
parser.add_argument('--device', '-d', default='mps', choices=['cpu', 'mps'],
help='device to run on; default: mps')
return parser
| yurijmikhalevich/rtag | 12 | AI-Powered Command-Line Photo Tagging Tool | Python | yurijmikhalevich | Yurij Mikhalevich | Makes magic at QAWolf 🐺 |
jupyter_sshd_proxy/__init__.py | Python | import os
import shutil
import shlex
import subprocess
from typing import Any, Dict
HOSTKEY_PATH = os.path.expanduser('~/.ssh/jupyter_sshd_hostkey')
AUTHORIZED_KEYS_PATH = os.environ.get('JUPYTER_SSHD_PROXY_AUTHORIZED_KEYS_PATH', '.ssh/authorized_keys .ssh/authorized_keys2')
SSHD_LOG_LEVEL = os.environ.get('JUPYTER_SSHD_PROXY_LOG_LEVEL', 'INFO')
def setup_sshd() -> Dict[str, Any]:
if not os.path.exists(HOSTKEY_PATH):
# Create a per-user hostkey if it does not exist
os.makedirs(os.path.dirname(HOSTKEY_PATH), mode=0o700, exist_ok=True)
subprocess.check_call(['ssh-keygen', '-f', HOSTKEY_PATH, '-q', '-N', ''])
sshd_path = shutil.which('sshd')
cmd = [
sshd_path, '-h', HOSTKEY_PATH, '-D', '-e',
# Intentionally have sshd ignore global config
'-f', 'none',
'-o', 'ListenAddress 127.0.0.1:{port}',
# Last login info is from /var/log/lastlog, which is transient in containerized systems
'-o', 'PrintLastLog no',
'-o', f'AuthorizedKeysFile {AUTHORIZED_KEYS_PATH}',
'-o', f'LogLevel {SSHD_LOG_LEVEL}',
# Default to enabling sftp
'-o', 'Subsystem sftp internal-sftp'
]
return {
"command": cmd,
"raw_socket_proxy": True,
"timeout": 60,
"launcher_entry": {"enabled": False},
}
| yuvipanda/jupyter-sshd-proxy | 15 | Run sshd under Jupyter | Python | yuvipanda | Yuvi | 2i2c-org |
setup.py | Python | import setuptools
from os import path
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="jupyter-sshd-proxy",
version='0.3.0',
url="https://github.com/yuvipanda/jupyter-sshd-proxy",
author="Yuvi Panda",
description="Run sshd under jupyter",
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
classifiers=['Framework :: Jupyter'],
install_requires=[
'jupyter-server-proxy>=4.3.0'
],
entry_points={
'jupyter_serverproxy_servers': [
'sshd = jupyter_sshd_proxy:setup_sshd',
]
}
)
| yuvipanda/jupyter-sshd-proxy | 15 | Run sshd under Jupyter | Python | yuvipanda | Yuvi | 2i2c-org |
tests/test_ssh.py | Python | import tempfile
import pexpect
import os
import shlex
import pytest
import subprocess
import secrets
import getpass
import time
import socket
from urllib.request import urlopen, Request
from urllib.error import URLError
@pytest.fixture
def random_port():
"""Get a single random port."""
# You aren't supposed to do this but who is gonna stop me?
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
@pytest.fixture
def jupyter_server(random_port):
token = secrets.token_hex(16)
c = [
'jupyter', 'server',
f'--port={random_port}', f'--ServerApp.token={token}', '--ip=127.0.0.1',
'--no-browser'
]
env = os.environ.copy()
# sshd requires that the path to the authorized keys (and every ancestor) is fully owned
# by the user who is trying to log in (or root), and mode is not group or world writeable.
# Since that's not necessarily true for `/tmp`, we can not put our keys there for tests.
# Create them instead in cwd, which we assume matches this description instead. We
# clean up after ourselves.
dir_prefix = os.path.join(os.getcwd(), "tmp-")
with tempfile.TemporaryDirectory(prefix=dir_prefix) as temp_dir:
os.chmod(temp_dir, 0o700)
authorized_keys_path = os.path.join(temp_dir, 'authorized_keys')
subprocess.check_call(['ssh-keygen', '-f', authorized_keys_path, '-q', '-N', ''])
env['JUPYTER_SSHD_PROXY_AUTHORIZED_KEYS_PATH'] = authorized_keys_path + '.pub'
proc = subprocess.Popen(c, env=env)
# Wait for server to be fully up before we yield
req = Request(f"http://127.0.0.1:{random_port}/api/status", headers={"Authorization": f"token {token}"})
while True:
try:
resp = urlopen(req)
if resp.status == 200:
break
except URLError as e:
if not isinstance(e.reason, ConnectionRefusedError):
raise
print("Waiting for jupyter server to come up...")
time.sleep(1)
yield (random_port, token, authorized_keys_path)
proc.kill()
proc.wait()
def get_ssh_client_options(random_port, token, authorized_keys_path):
return [
f'ProxyCommand=websocat --binary -H="Authorization: token {token}" asyncstdio: ws://%h:{random_port}/sshd/',
f'User={getpass.getuser()}',
f'IdentityFile={authorized_keys_path}',
'StrictHostKeyChecking=no' # FIXME: Validate this correctly later
]
def test_ssh_command_execution(jupyter_server):
cmd = [
'ssh',
] + [f"-o={o}" for o in get_ssh_client_options(*jupyter_server)] + ['127.0.0.1', 'hostname']
proc = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
print(proc.stderr)
assert proc.stdout.decode().strip() == socket.gethostname()
def test_ssh_interactive(jupyter_server):
# Explicitly call /bin/sh without any args, so we can run without any prompts
cmd = [
'ssh',
] + [f"-o={o}" for o in get_ssh_client_options(*jupyter_server)] + ['127.0.0.1', '/bin/sh']
proc = pexpect.spawn(shlex.join(cmd), echo=False)
proc.sendline('hostname')
assert proc.readline().decode().strip() == socket.gethostname()
proc.sendline("exit")
proc.wait()
assert proc.exitstatus == 0
# Test for both the sftp protocol (default on newer scp) ("-s"), and the older
# scp protocol ("-O").
@pytest.mark.parametrize("extra_scp_args", [["-s"], ["-O"]])
def test_scp(jupyter_server, extra_scp_args):
with tempfile.NamedTemporaryFile() as f, tempfile.TemporaryDirectory() as d:
file_contents = secrets.token_hex()
f.write(file_contents.encode())
f.flush()
target_path = os.path.join(d, "target")
cmd = [
'scp', '-v',
] + extra_scp_args + [f"-o={o}" for o in get_ssh_client_options(*jupyter_server)] + [
f.name, f'127.0.0.1:{target_path}'
]
proc = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
with open(target_path) as tpf:
assert tpf.read() == file_contents | yuvipanda/jupyter-sshd-proxy | 15 | Run sshd under Jupyter | Python | yuvipanda | Yuvi | 2i2c-org |
dokku.bash | Shell | #!/bin/bash
set -euo pipefail
# Dokku entrypoint, to make life easier
exec python3 -m jupyterbook_pub.app \
--JupyterBookPubApp.debug=true \
--JupyterBookPubApp.port=${PORT} \
--JupyterBookPubApp.built_sites_root=/opt/persistent/sites \
--JupyterBookPubApp.repo_checkout_root=/opt/persistent/repos
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/.sassrc.js | JavaScript | module.exports = {
silenceDeprecations: ['import', 'mixed-decls', 'color-functions', 'global-builtin']
}
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/App.tsx | TypeScript (TSX) | import './App.css';
import { LinkGenerator } from './LinkGenerator';
export function App() {
return (
<>
<div className='container'>
<div className='mx-auto col-8'>
<div className='text-center mt-4'>
<h1>JupyterBook.pub</h1>
<h5>Instantly build and share your JupyterBook repository wherever it is</h5>
<a href='https://github.com/yuvipanda/jupyterbook.pub/issues'>File Issues</a>
</div>
<LinkGenerator />
</div>
</div>
</>
);
}
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/LinkGenerator.tsx | TypeScript (TSX) | import { useEffect, useState } from "react";
import copy from "copy-to-clipboard";
import { Answer, resolve } from "./resolver";
import { useDebounce } from 'use-debounce';
function makeShareableLink(repoUrl: string) {
// FIXME: I am committing a cardinal sin here that makes it difficult to host this under subpaths
// but how do I get this information in here otherwise? I do not know. Forgive me for my sins
const baseUrl = window.location.origin;
return new URL("repo/" + encodeURIComponent(repoUrl) + "/", baseUrl);
}
function normalizeRepoUrl(repoUrl: string) {
// If it's a valid URL, just return it
try {
const parsedUrl = new URL(repoUrl);
return repoUrl;
} catch (error) {
if (error instanceof TypeError) {
// Invalid URL!
if (!repoUrl.startsWith("https://")) {
return "https://" + repoUrl;
} else {
return "";
}
}
throw error;
}
}
export function LinkGenerator() {
const [repoUrl, setRepoUrl] = useState<string>("");
const [shareUrl, setShareUrl] = useState<URL | null>(null);
const [resolvedRepo, setResolvedRepo] = useState<Answer | null>(null);
const [debouncedRepoUrl] = useDebounce(repoUrl, 1000);
useEffect(() => {
const validateUrl = async () => {
if (debouncedRepoUrl === "") {
setResolvedRepo(null);
return
}
const answer = await resolve(debouncedRepoUrl);
if (answer === null) {
setResolvedRepo(null);
} else {
setResolvedRepo(answer)
}
};
validateUrl();
}, [debouncedRepoUrl]);
return (
<div className="row bg-body-tertiary mt-4">
<form className="p-4">
<div className="m-3 col-12">
<div className="input-group">
<div className="form-floating">
<input type="input" className="form-control" id="repo-url" placeholder="Enter your repository URL here" onChange={e => {
const rawRepoUrl = e.target.value;
const normalizedRepoUrl = normalizeRepoUrl(rawRepoUrl);
setRepoUrl(normalizedRepoUrl);
if (normalizedRepoUrl === null) {
setShareUrl(null);
} else {
setShareUrl(makeShareableLink(normalizedRepoUrl));
}
}}></input>
<label htmlFor="repoUrl">
{(resolvedRepo === null || resolvedRepo.certainity === "DoesNotExist") ? <span>Enter your repository URL here</span> : <>
<span className="badge text-bg-secondary">{resolvedRepo.kind}</span>
{Object.entries(resolvedRepo.data).map(([key, value]) =>
<span className="mx-1" key={key}>
<code title={key}>{value}</code></span>
)}
</>
}
</label>
</div>
<button className="btn btn-primary" type="button" onClick={() => window.location.href = shareUrl.toString()} disabled={resolvedRepo === null || resolvedRepo.certainity === "DoesNotExist"}>Go!</button>
</div>
<small>Supports GitHub (Repos, <abbr title="Branches, Commits & Tags">Refs</abbr>, Action Artifacts & PRs), Public Google Drive Folders, <abbr title="On Zenodo & Dataverse">DOIs</abbr>, <a href="https://github.com/yuvipanda/repoproviders?tab=readme-ov-file#supported-repositories">and many others</a></small>
</div>
<div className="m-3 input-group">
<div className="form-floating">
<input type="input" className="form-control" id="share-url" placeholder="Shareable link to your rendered book" readOnly value={shareUrl ? shareUrl.toString() : ""}></input>
<label className="form-label">Share this link with anyone so they can see a rendered version of your JupyterBook</label>
</div>
<button className="btn btn-outline-secondary" type="button" onClick={() => copy(shareUrl?.toString())} disabled={shareUrl === null}><i className="bi bi-copy"></i></button>
</div>
</form>
</div>
)
}
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/index.html | HTML | <!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>JupyterBook.pub</title>
<link href="./index.scss" rel="stylesheet" />
</head>
<body>
<div id="root"></div>
</body>
<script src="./index.js" type="module"></script>
</html>
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/index.js | JavaScript | import { createRoot } from "react-dom/client";
import { App } from "./App";
const container = document.getElementById("root");
const root = createRoot(container)
root.render(<App />);
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/index.scss | SCSS | @import "bootstrap/scss/bootstrap";
// Update the import directory to point to it‘s location within node_modules
$bootstrap-icons-font-dir: "../node_modules/bootstrap-icons/font/fonts";
// Import the Sass files as usual
@import "bootstrap-icons/font/bootstrap-icons";
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/index.tsx | TypeScript (TSX) | import { createRoot } from 'react-dom/client';
import { StrictMode } from 'react';
import { App } from './App';
let container = document.getElementById("app")!;
let root = createRoot(container)
root.render(
<StrictMode>
<App />
</StrictMode>
);
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
js/src/resolver.ts | TypeScript | export interface Answer {
certainity: "Exists" | "MaybeExists" | "DoesNotExist"
kind: string
data: object
}
export async function resolve(question: string) : Promise<Answer | null> {
// FIXME: BaseURL support plz
const queryUrl = new URL("api/v1/resolve", window.location.origin);
queryUrl.searchParams.append("q", question);
const resp = await fetch(queryUrl);
if (!resp.ok) {
return null;
}
return (await resp.json()) as Answer;
}
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
src/jupyterbook_pub/__init__.py | Python | # Copyright (c) 2026 Yuvi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Add a docstring here for the init module.
This might include a very brief description of the package,
its purpose, and any important notes.
"""
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
src/jupyterbook_pub/app.py | Python | from __future__ import annotations
import asyncio
import logging
import mimetypes
import os
import shutil
import socket
import sys
from pathlib import Path
from typing import Optional, override
import tornado
from cachetools import TTLCache
from jinja2 import Environment, FileSystemLoader
from repoproviders import fetch, resolve
from repoproviders.resolvers import to_json
from repoproviders.resolvers.base import DoesNotExist, Exists, MaybeExists, Repo
from ruamel.yaml import YAML
from tornado.web import HTTPError, RequestHandler, StaticFileHandler, url
from traitlets import Bool, Instance, Int, Integer, Unicode
from traitlets.config import Application
from .cache import make_checkout_cache_key, make_rendered_cache_key
# We don't have to roundtrip here, because nobody reads that YAML
yaml = YAML(typ="safe")
def random_port():
"""
Get a single random port likely to be available for listening in.
"""
sock = socket.socket()
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def munge_jb_myst_yml(myst_yml_path: Path):
# If there's only one entry in toc, use article not book theme
with open(myst_yml_path, "r") as f:
data = yaml.load(f)
if len(data["project"]["toc"]) == 1:
data["site"]["template"] = "article-theme"
with open(myst_yml_path, "w") as f:
yaml.dump(data, f)
async def ensure_jb_root(repo_path: Path) -> Optional[Path]:
for dirname, _, filenames in repo_path.walk():
if "myst.yml" in filenames:
return dirname
# No `myst.yml` found. Let's make one
command = ["jupyter", "book", "init", "--write-toc"]
proc = await asyncio.create_subprocess_exec(
*command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=repo_path,
)
stdout, stderr = [s.decode() for s in await proc.communicate()]
retcode = await proc.wait()
if retcode != 0:
print(stdout, file=sys.stderr)
print(stderr, file=sys.stderr)
else:
munge_jb_myst_yml(repo_path / "myst.yml")
return repo_path
async def render_if_needed(app: JupyterBookPubApp, repo: Repo, base_url: str):
repo_path = Path(app.repo_checkout_root) / make_checkout_cache_key(repo)
built_path = Path(app.built_sites_root) / make_rendered_cache_key(repo, base_url)
env = os.environ.copy()
env["BASE_URL"] = base_url
if not built_path.exists():
if not repo_path.exists():
yield f"Fetching {repo}...\n"
await fetch(repo, repo_path)
yield f"Fetched {repo}"
jb_root = await ensure_jb_root(repo_path)
if not jb_root:
# FIXME: Better errors plz
raise ValueError("No myst.yml found in repo")
# Explicitly pass in a random port, as otherwise jupyter-book will always
# try to listen on port 5000 and hang forever if it can't.
command = ["jupyter", "book", "build", "--html", "--port", str(random_port())]
proc = await asyncio.create_subprocess_exec(
*command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=jb_root,
env=env,
)
stdout, stderr = [s.decode() for s in await proc.communicate()]
retcode = await proc.wait()
yield stdout
yield stderr
shutil.copytree(jb_root / "_build/html", built_path)
class BaseHandler(RequestHandler):
def initialize(self, app: JupyterBookPubApp):
self.app = app
self.log = app.log
class RepoHandler(BaseHandler):
def get_spec_from_request(self, prefix):
"""
Re-extract spec from request.path.
Get the original, raw spec, without tornado's unquoting.
This is needed because tornado converts 'foo%2Fbar/ref' to 'foo/bar/ref'.
"""
idx = self.request.path.index(prefix)
spec = self.request.path[idx + len(prefix) :]
return spec
async def get(self, repo_spec: str, path: str):
spec = self.get_spec_from_request("/repo/")
raw_repo_spec, _ = spec.split("/", 1)
last_answer = await self.app.resolve(repo_spec)
if last_answer is None:
raise tornado.web.HTTPError(404, f"{repo_spec} could not be resolved")
match last_answer:
case Exists(repo) | MaybeExists(repo):
# In the future, we can explicitly specify full URL here so we
# can support other kinds of domains too
base_url = f"/repo/{raw_repo_spec}"
built_path = Path(self.app.built_sites_root) / make_rendered_cache_key(
repo, base_url
)
if not built_path.exists():
async for line in render_if_needed(self.app, repo, base_url):
print(line)
# This is a *sure* path traversal attack
full_path = built_path / path
if full_path.is_dir():
full_path = full_path / "index.html"
mimetype, encoding = mimetypes.guess_type(full_path)
if encoding == "gzip":
mimetype = "application/gzip"
if mimetype:
self.set_header("Content-Type", mimetype)
with open(full_path, "rb") as f:
# hard code the chunk size for now
# 64 * 1024 is what tornado uses https://github.com/tornadoweb/tornado/blob/e14929c305019fd494c74934445f0b72af4f98ab/tornado/web.py#L3020
while True:
chunk = f.read(64 * 1024)
if not chunk:
break
self.write(chunk)
case DoesNotExist(repo):
raise tornado.web.HTTPError(404, f"{repo} could not be resolved")
class ResolveHandler(BaseHandler):
async def get(self):
question = self.get_query_argument("q")
if not question:
raise HTTPError(400, "No question provided")
answer = await self.app.resolve(question)
if answer is None:
raise HTTPError(404, "Could not resolve {question}")
self.set_header("Content-Type", "application/json")
self.write(to_json(answer))
class JupyterBookPubApp(Application):
debug = Bool(True, help="Turn on debug mode", config=True)
port = Int(
int(os.environ.get("PORT", "9200")), help="Port to listen on", config=True
)
repo_checkout_root = Unicode(
str(Path(__file__).parent.parent.parent / "repos"),
help="Path to check out repos to. Created if it doesn't exist",
config=True,
)
built_sites_root = Unicode(
str(Path(__file__).parent.parent.parent / "built_sites"),
help="Path to copy built files to. Created if it doesn't exist",
config=True,
)
resolver_cache_ttl_seconds = Integer(
10 * 60,
help="How long to cache successful resolver results (in seconds)",
config=True,
)
resolver_cache_max_size = Integer(
128, help="Max number of successful resolver results to cache", config=True
)
resolver_cache = Instance(klass=TTLCache)
async def resolve(self, question: str):
if question in self.resolver_cache:
last_answer = self.resolver_cache[question]
self.log.debug(f"Found {question} in cache")
else:
answers = await resolve(question, True)
if not answers:
return None
last_answer = answers[-1]
self.resolver_cache[question] = last_answer
self.log.info(f"Resolved {question} to {last_answer}")
return last_answer
@override
def initialize(self, argv=None) -> None:
super().initialize(argv)
if self.debug:
self.log_level = logging.DEBUG
tornado.options.options.logging = logging.getLevelName(self.log_level)
tornado.log.enable_pretty_logging()
self.log = tornado.log.app_log
self.templates_loader = Environment(
loader=FileSystemLoader(Path(__file__).parent / "templates")
)
os.makedirs(self.built_sites_root, exist_ok=True)
os.makedirs(self.repo_checkout_root, exist_ok=True)
self.resolver_cache = TTLCache(
maxsize=self.resolver_cache_max_size, ttl=10 * 60
)
async def start(self) -> None:
self.initialize()
self.web_app = tornado.web.Application(
[
url(
r"/api/v1/resolve",
ResolveHandler,
{"app": self},
name="resolve-api",
),
url(r"/repo/(.*?)/(.*)", RepoHandler, {"app": self}, name="repo"),
(
"/(.*)",
StaticFileHandler,
{
"path": str(Path(__file__).parent / "generated_static"),
"default_filename": "index.html",
},
),
],
debug=self.debug,
)
self.web_app.listen(self.port)
await asyncio.Event().wait()
if __name__ == "__main__":
app = JupyterBookPubApp()
asyncio.run(app.start())
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
src/jupyterbook_pub/cache.py | Python | """
Various caching utilities.
Since we serve a lot of static files, we will rely heavily on caching
to make sure we can serve a ton of users very cheaply.
"""
import hashlib
import json
from base64 import urlsafe_b64encode
from repoproviders.resolvers.base import MaybeExists, Repo
from repoproviders.resolvers.serialize import to_dict
def make_rendered_cache_key(repo: Repo, base_url: str) -> str:
answer = MaybeExists(repo)
key = {"answer": to_dict(answer), "base_url": base_url}
return urlsafe_b64encode(hashlib.sha256(json.dumps(key).encode()).digest()).decode()
def make_checkout_cache_key(repo: Repo) -> str:
answer = MaybeExists(repo)
return urlsafe_b64encode(
hashlib.sha256(json.dumps(to_dict(answer)).encode()).digest()
).decode()
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
src/jupyterbook_pub/templates/home.html | HTML | <!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>JupyterBook.pub</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.8/dist/css/bootstrap.min.css" rel="stylesheet"
integrity="sha384-sRIl4kxILFvY47J16cr9ZwB07vP4J8+LH7qKQnuqkuIAvNWLzeN8tE5YBujZqJLB" crossorigin="anonymous">
</head>
<body>
<div class="container">
<div class="form">
<h1>jupyterbook.pub</h1>
<h2>Instantly Render your JupyterBook Repos</h2>
<div class="input-group mb-3">
<input type="text" id="repoUrl" class="form-control" placeholder="Your Repo URL" aria-label="Your Repo URL"
>
<button class="btn btn-outline-secondary" type="button" id="go">Go</button>
</div>
<div class="mb-3">
<input type="url" class="form-control" disabled id="shareUrl" placeholder="Enter repo to get URL">
</div>
</div>
</div>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.8/dist/js/bootstrap.bundle.min.js"
integrity="sha384-FKyoEForCGlyvwx9Hj09JcYn3nv7wiPVlz7YYwJrWVcXK/BmnVDxM+D2scQbITxI"
crossorigin="anonymous"></script>
</body>
<script src="/static/index.js"></script>
</html>
| yuvipanda/jupyterbook.pub | 0 | Python | yuvipanda | Yuvi | 2i2c-org | |
src/repoproviders/__init__.py | Python | # Copyright (c) 2024 Yuvi Panda
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .fetchers.fetcher import fetch # noqa: F401
from .resolvers.resolver import resolve # noqa: F401
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/__main__.py | Python | import argparse
import asyncio
import logging
import sys
from pathlib import Path
from repoproviders.resolvers.base import DoesNotExist, Exists, MaybeExists
from .fetchers import fetch
from .resolvers import resolve
async def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"--log-level",
help="Level of logs to print",
choices=["warning", "debug", "info"],
default="warning",
)
subparsers = argparser.add_subparsers(required=True, dest="command")
resolve_subparser = subparsers.add_parser("resolve")
resolve_subparser.add_argument("question", help="What should we try to resolve?")
resolve_subparser.add_argument(
"--no-recurse",
help="Do not recurse, return after first answer",
action="store_true",
)
fetch_subparser = subparsers.add_parser("fetch")
fetch_subparser.add_argument("question", help="What should we try to fetch?")
fetch_subparser.add_argument("output_dir", help="Where to output the fetched repo")
args = argparser.parse_args()
log = logging.getLogger()
log.setLevel(args.log_level.upper())
log.addHandler(logging.StreamHandler())
if args.command == "resolve":
answers = await resolve(args.question, not args.no_recurse, log)
if answers:
for a in answers:
print(a)
else:
print(f"Unable to resolve {args.question}")
elif args.command == "fetch":
output_dir = Path(args.output_dir)
# output_dir must not exist, or be an empty directory
if output_dir.exists():
if output_dir.is_file():
print(
f"{output_dir} should either not exist, or be an empty directory. Is a file",
file=sys.stderr,
)
sys.exit(1)
else:
if any(output_dir.iterdir()):
print(
f"{output_dir} should either not exist, or be an empty directory. Is a non-empty directory",
file=sys.stderr,
)
sys.exit(1)
else:
output_dir.mkdir(parents=True)
answers = await resolve(args.question, True, log)
if answers:
last_answer = answers[-1]
match last_answer:
case Exists(repo) | MaybeExists(repo):
await fetch(repo, Path(args.output_dir), log)
case DoesNotExist(kind, message):
print(
f"{args.question} detected to be of kind {kind.__name__} but does not exist: {message}",
file=sys.stderr,
)
sys.exit(1)
else:
print(f"Unable to resolve {args.question}")
def cli_entrypoint():
asyncio.run(main())
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/__init__.py | Python | from .fetcher import fetch # noqa: F401
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/base.py | Python | from logging import Logger
from pathlib import Path
from typing import Any, Protocol
class SupportsFetch(Protocol):
async def fetch(self, repo: Any, output_dir: Path, log: Logger):
pass
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/ckan.py | Python | from logging import Logger
from pathlib import Path
import aiohttp
from yarl import URL
from ..resolvers.repos import CKANDataset
from ..utils import download_file
class CKANFetcher:
async def fetch(self, repo: CKANDataset, output_dir: Path, log: Logger):
api_url = (repo.installationUrl / "api/3/action/package_show").with_query(
id=repo.dataset_id
)
async with aiohttp.ClientSession() as session:
resp = await session.get(api_url)
# FIXME: Handle this is not found correctly
resp.raise_for_status()
data = await resp.json()
resources = data["result"]["resources"]
for r in resources:
file_download_url = URL(r["url"])
# There isn't a consistent naming situation for files. We try the last part of file url as filename
parts = [s for s in file_download_url.path.split("/") if s]
file_name = parts[-1]
# FIXME: Do we support folder structures?
file_path = file_name
await download_file(
session, file_download_url, output_dir / file_path, log
)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/dataverse.py | Python | import os
from logging import Logger
from pathlib import Path
import aiohttp
from ..resolvers.doi import DataverseDataset
from ..utils import download_file
class DataverseFetcher:
async def fetch(self, repo: DataverseDataset, output_dir: Path, log: Logger):
files_url = repo.installationUrl / "api/datasets/:persistentId"
files_url = files_url.with_query(persistentId=repo.persistentId)
async with aiohttp.ClientSession() as session:
resp = await session.get(files_url)
# FIXME: Do we assume this persistent_id has been *verified* to exist?
# What kind of guarantee can our resolver actually give us? hmm
resp.raise_for_status()
files = (await resp.json())["data"]["latestVersion"]["files"]
for f in files:
file_download_url = (
repo.installationUrl / f"api/access/datafile/{f['dataFile']['id']}"
)
file_download_url = file_download_url.with_query(format="original")
file_name = f["dataFile"].get("originalFileName", f["label"])
file_path = Path(os.path.join(f.get("directoryLabel", ""), file_name))
await download_file(
session, file_download_url, output_dir / file_path, log
)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/fetcher.py | Python | import inspect
import types
import typing
from logging import Logger, getLogger
from pathlib import Path
from typing import Optional
from repoproviders.fetchers.ckan import CKANFetcher
from repoproviders.fetchers.hydroshare import HydroshareFetcher
from repoproviders.fetchers.rclone import GoogleDriveFolderFetcher
from repoproviders.fetchers.zenodo import ZenodoFetcher
from repoproviders.resolvers.base import Repo
from .base import SupportsFetch
from .dataverse import DataverseFetcher
from .figshare import FigshareFetcher
from .git import GitHubActionArtifactFetcher, ImmutableGitFetcher
ALL_FETCHERS: list[SupportsFetch] = [
ImmutableGitFetcher(),
DataverseFetcher(),
GitHubActionArtifactFetcher(),
FigshareFetcher(),
ZenodoFetcher(),
GoogleDriveFolderFetcher(),
HydroshareFetcher(),
CKANFetcher(),
]
FETCHER_BY_TYPE: dict[type, SupportsFetch] = {}
for R in ALL_FETCHERS:
annotations = inspect.get_annotations(R.fetch)
supported_types = annotations["repo"]
if isinstance(supported_types, type):
# Only supports a single type
FETCHER_BY_TYPE[supported_types] = R
elif isinstance(supported_types, types.UnionType):
for t in typing.get_args(supported_types):
FETCHER_BY_TYPE[t] = R
async def fetch(question: Repo, output_dir: Path, log: Optional[Logger] = None):
if log is None:
# Use default named logger
log = getLogger("repoproviders")
fetcher = FETCHER_BY_TYPE[type(question)]
await fetcher.fetch(question, output_dir, log)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/figshare.py | Python | from logging import Logger
from pathlib import Path
from tempfile import NamedTemporaryFile
from zipfile import ZipFile
import aiohttp
from ..resolvers.doi import ImmutableFigshareDataset
from ..utils import FIGSHARE_PUBLIC_TOKEN, download_file
class FigshareFetcher:
async def fetch(
self, repo: ImmutableFigshareDataset, output_dir: Path, log: Logger
):
download_url = (
repo.installation.apiUrl
/ "articles"
/ str(repo.articleId)
/ "versions"
/ str(repo.version)
/ "download"
)
async with aiohttp.ClientSession(
headers={"Authorization": f"token {FIGSHARE_PUBLIC_TOKEN}"}
) as session:
with NamedTemporaryFile() as temp_file:
await download_file(session, download_url, Path(temp_file.name), log)
compressed_file = ZipFile(temp_file.name)
compressed_file.extractall(output_dir)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/git.py | Python | from logging import Logger
from pathlib import Path
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from zipfile import ZipFile
import aiohttp
from yarl import URL
from repoproviders.resolvers.repos import GitHubActionArtifact
from ..resolvers.git import ImmutableGit
from ..utils import GITHUB_PUBLIC_PAT, download_file, exec_process
class ImmutableGitFetcher:
async def fetch(self, repo: ImmutableGit, output_dir: Path, log: Logger):
# Assume output_dir is empty
command = [
"git",
"clone",
"--filter=tree:0",
"--recurse-submodules",
repo.repo,
str(output_dir),
]
retcode, stdout, stderr = await exec_process(command, log)
log.info(f"Cloned git repository {repo.repo} to {output_dir}")
if retcode != 0:
# FIXME: Raise a more helpful error?
raise CalledProcessError(retcode, command, stdout, stderr)
command = ["git", "checkout", repo.ref]
retcode, stdout, stderr = await exec_process(command, log, cwd=str(output_dir))
log.info(f"Checked out ref {repo.ref}")
if retcode != 0:
# FIXME: Raise a more helpful error?
raise CalledProcessError(retcode, command, stdout, stderr)
class GitHubActionArtifactFetcher:
async def fetch(self, repo: GitHubActionArtifact, output_dir: Path, log: Logger):
# Assume this standard archive URL holds
# If this starts to fail, we shall make an additional GitHub API request
# to this URL: https://docs.github.com/en/rest/actions/artifacts?apiVersion=2022-11-28#get-an-artifact
# FIXME: Support other installations
download_url = (
URL("https://api.github.com/repos/")
/ repo.account
/ repo.repo
/ "actions/artifacts"
/ str(repo.artifact_id)
/ "zip"
)
async with aiohttp.ClientSession(
headers={"Authorization": f"Bearer {GITHUB_PUBLIC_PAT}"}
) as session:
with NamedTemporaryFile() as temp_file:
await download_file(session, download_url, Path(temp_file.name), log)
compressed_file = ZipFile(temp_file.name)
compressed_file.extractall(output_dir)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/hydroshare.py | Python | from logging import Logger
from pathlib import Path
from tempfile import NamedTemporaryFile
from zipfile import ZipFile
import aiohttp
from yarl import URL
from ..resolvers.repos import HydroshareDataset
from ..utils import download_file
class HydroshareFetcher:
async def fetch(self, repo: HydroshareDataset, output_dir: Path, log: Logger):
# This sometimes takes a while, as the zip file is dynamically generated on first GET
# However, aiohttp seems to handle this behavior just fine no problem
download_url = (
URL("https://www.hydroshare.org/django_irods/download/bags/")
/ repo.resource_id
)
async with aiohttp.ClientSession() as session:
with NamedTemporaryFile() as temp_file:
await download_file(session, download_url, Path(temp_file.name), log)
compressed_file = ZipFile(temp_file.name)
# We want to only extract files from the data/contents
contents_path = f"{repo.resource_id}/data/contents/"
for file_info in compressed_file.infolist():
if file_info.filename.startswith(contents_path):
target_file_name = file_info.filename[len(contents_path) :]
file_info.filename = target_file_name
compressed_file.extract(file_info, output_dir)
log.debug(f"Extracted {target_file_name} to {output_dir}")
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/rclone.py | Python | import json
import subprocess
from logging import Logger
from pathlib import Path
from shutil import which
from tempfile import NamedTemporaryFile
from repoproviders.resolvers.rclone import GoogleDriveFolder, ImmutableGoogleDriveFolder
from ..utils import GCP_PUBLIC_SERVICE_ACCOUNT_KEY
class GoogleDriveFolderFetcher:
async def fetch(
self,
repo: ImmutableGoogleDriveFolder | GoogleDriveFolder,
output_dir: Path,
log: Logger,
):
if not which("rclone"):
raise FileNotFoundError(
"rclone must be installed to fetch folders from Google Drive"
)
# FIXME: We don't actually check the dirhash of the ImmutableGoogleDriveFolder, so it may have
# mutated since we asked for things to be done to it. I don't exactly know what to do about that.
with NamedTemporaryFile("w") as service_account_key:
json.dump(GCP_PUBLIC_SERVICE_ACCOUNT_KEY, service_account_key)
service_account_key.flush()
connection_string = f":drive,scope=drive.readonly,service_account_file={service_account_key.name}:"
rclone_cmd = [
"rclone",
"copy",
connection_string,
"--drive-root-folder-id",
repo.id,
str(output_dir),
]
subprocess.check_call(rclone_cmd)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/fetchers/zenodo.py | Python | import os
import shutil
from logging import Logger
from pathlib import Path
from tempfile import NamedTemporaryFile
from zipfile import ZipFile
import aiohttp
from ..resolvers.doi import ZenodoDataset
from ..utils import download_file
class ZenodoFetcher:
async def fetch(self, repo: ZenodoDataset, output_dir: Path, log: Logger):
files_url = repo.installationUrl / "api/records" / repo.recordId / "files"
async with aiohttp.ClientSession() as session:
resp = await session.get(files_url)
# FIXME: Do we assume this persistent_id has been *verified* to exist?
# What kind of guarantee can our resolver actually give us? hmm
resp.raise_for_status()
data = await resp.json()
# Handle case when we only have one entry, and it's a zip file
if len(data["entries"]) == 1:
# Only do this if mimetype is zip
entry = data["entries"][0]
if entry["mimetype"] == "application/zip":
download_url = entry["links"]["content"]
with NamedTemporaryFile() as temp_file:
await download_file(
session, download_url, Path(temp_file.name), log
)
compressed_file = ZipFile(temp_file.name)
compressed_file.extractall(output_dir)
log.debug(f"Extracted {temp_file.name} to {output_dir}")
# If there's only one subdirectory, move its contents to the output directory
subdirs = list(output_dir.iterdir())
if len(subdirs) == 1:
# Just recursively move everything inside the subdir, rather than copy them
# This is safer (and probably faster) than using shutil.copytree
for d in subdirs[0].iterdir():
shutil.move(d, output_dir)
os.rmdir(subdirs[0])
# We're all done, no more processing to do
return
# For cases with more than one entry, or if the one entry isn't a zip file
for entry in data["entries"]:
file_download_url = entry["links"]["content"]
# FIXME: Handle path traverseal attacks here?
file_name = entry["key"]
# Zenodo doesn't support directory structures,
# so we don't need to handle nesting https://support.zenodo.org/help/en-gb/1-upload-deposit/74-can-i-upload-folders-directories
await download_file(
session, file_download_url, output_dir / file_name, log
)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/__init__.py | Python | from .resolver import resolve # noqa: F401
from .serialize import to_json # noqa: F401
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/base.py | Python | from dataclasses import Field, dataclass
from logging import Logger
from typing import Any, ClassVar, Protocol, runtime_checkable
@dataclass(frozen=True)
class DoesNotExist[T: Repo]:
"""
Resolver recognizes this question, but while resolving determined it does not exist
"""
# womp womp, we can't really retrieve the value of T at runtime so gotta still pass
# that in.
# FIXME: See if we can enforce somehow that kind is also a `Repo`
kind: type
message: str
@dataclass(frozen=True)
class Exists[T: Repo]:
repo: T
@dataclass(frozen=True)
class MaybeExists[T: Repo]:
repo: T
@runtime_checkable
class Repo(Protocol):
"""
Represents a Repository
"""
# Set to true if the repo is identified to be immutable. This usually means one of:
#
# 1. The identifier used to refer to the repo is guaranteed to not change (like with zenodo record ids)
# 2. A version identifier is included as part of the definition (like with Figshare)
# 3. A content identifiable hash is included as part of the definition (like with ImmutableGit)
immutable: bool
# Must also be a dataclass
__dataclass_fields__: ClassVar[dict[str, Field[Any]]]
class SupportsResolve(Protocol):
async def resolve(
self, question: Any, log: Logger
) -> Exists | DoesNotExist | MaybeExists | None:
pass
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/doi.py | Python | import os
from logging import Logger
import aiohttp
from yarl import URL
from ..utils import FIGSHARE_PUBLIC_TOKEN
from .base import DoesNotExist, Exists, MaybeExists
from .repos import (
DataverseDataset,
DataverseURL,
Doi,
FigshareDataset,
FigshareURL,
ImmutableFigshareDataset,
ZenodoDataset,
ZenodoURL,
)
class DoiResolver:
"""
A *handle* resolver, called a Doi resolver because that's the most common handle
"""
async def resolve(
self, question: URL, log: Logger
) -> Exists[Doi] | DoesNotExist[Doi] | None:
# Check if this is a valid doi or handle
if question.scheme in ("doi", "hdl"):
doi = question.path
elif question.scheme in ("http", "https") and question.host in (
"doi.org",
"www.doi.org",
"hdl.handle.net",
):
doi = question.path.lstrip("/")
elif question.scheme == "" and question.path.startswith("10."):
# Handles in general are defined as <naming-authority>/<handle> (https://datatracker.ietf.org/doc/html/rfc3650#section-3)
# however, this is far too broad, as even a file path like `hello/world` will satisfy it. Eventually, could find a list
# of registered handle prefixes to validate the `<naming-authority>` part. In the meantime, we only check for a `10.` prefix,
# which is for the most popular kind of handle - a DOI.
# This is only for convenience - in cases when the user pastes in a DOI but doesn't actually say doi:.
doi = question.path
else:
# Not a DOI or handle
return None
# TODO: Make the handle resolver we use configurable
api_url = f"https://doi.org/api/handles/{doi}"
# FIXME: Reuse this
async with aiohttp.ClientSession() as session:
resp = await session.get(api_url)
if resp.status == 404:
# This is a validly *formatted* DOI, but it's not actually a dOI
return DoesNotExist(Doi, f"{doi} is not a registered DOI or handle")
elif resp.status == 200:
data = await resp.json()
# Pick the first URL we find from the doi response
for v in data["values"]:
if v["type"] == "URL":
return Exists(Doi(URL(v["data"]["value"])))
# No URLs found for this DOI, so we treat it as DoesNotExist
return DoesNotExist(Doi, f"{doi} does not point to any URL")
else:
# Some other kind of failure, let's propagate our error up
log.error(f"Error resolving doi {doi} through {api_url}")
resp.raise_for_status()
# This should not actually be reached, but the explicit return None makes mypy happy
return None
class DataverseResolver:
async def get_dataset_id_from_file_id(
self, installation_url: URL, file_id: str
) -> str | None:
"""
Return the persistent_id (DOI) of a dataset that a given file_id (int or doi) belongs to
"""
if file_id.isdigit():
# the file_id is an integer, rather than a persistent id (DOI)
api_url = installation_url / "api/files" / file_id
api_url = api_url.with_query(returnDatasetVersion="true")
else:
# the file_id is a doi itself
api_url = installation_url / "api/files/:persistentId"
api_url = api_url.with_query(
returnDatasetVersion="true", persistentId=file_id
)
async with aiohttp.ClientSession() as session:
resp = await session.get(api_url)
if resp.status == 404:
return None
else:
resp.raise_for_status()
data = (await resp.json())["data"]
return data["datasetVersion"]["datasetPersistentId"]
async def resolve(
self, question: DataverseURL, log: Logger
) -> Exists[DataverseDataset] | DoesNotExist[DataverseDataset] | None:
url = question.url
path = url.path
qs = url.query
# https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/TJCLKP
if path.startswith("/citation"):
persistent_id = qs["persistentId"]
# We don't know if this is a dataset or file id yet
verified_dataset = False
# https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/TJCLKP
elif path.startswith("/dataset.xhtml"):
# https://dataverse.harvard.edu/api/access/datafile/3323458
persistent_id = qs["persistentId"]
# We haven't verified this dataset exists
verified_dataset = False
elif path.startswith("/api/access/datafile"):
# What we have here is an entity id, which we can use to get a persistentId
file_id = os.path.basename(path)
pid_maybe = await self.get_dataset_id_from_file_id(
question.installation, file_id
)
if pid_maybe is None:
return DoesNotExist(
DataverseDataset,
f"No file with id {file_id} found in dataverse installation {question.installation}",
)
else:
persistent_id = pid_maybe
# We know persistent_id is a dataset, because we asked the API!
verified_dataset = True
elif path.startswith("/file.xhtml"):
file_id = qs["persistentId"]
pid_maybe = await self.get_dataset_id_from_file_id(
question.installation, file_id
)
if pid_maybe is None:
return DoesNotExist(
DataverseDataset,
f"No file with id {file_id} found in dataverse installation {question.installation}",
)
else:
persistent_id = pid_maybe
# We know persistent_id is a dataset, because we asked the API!
verified_dataset = True
else:
# This URL is not actually a dataverse dataset URL
return None
if not verified_dataset:
# citations can be either datasets or files - we don't know. The most common case is that it is
# a dataset, so we check if it is first.
api_url = question.installation / "api/datasets/:persistentId"
api_url = api_url.with_query(persistentId=persistent_id)
async with aiohttp.ClientSession() as session:
resp = await session.get(api_url)
if resp.status == 404:
# This persistent id is *not* a dataset. Maybe it's a file?
pid_maybe = await self.get_dataset_id_from_file_id(
question.installation, persistent_id
)
if pid_maybe is None:
# This is not a file either, so this citation doesn't exist
return DoesNotExist(
DataverseDataset,
f"{persistent_id} is neither a file nor a dataset in {question.installation}",
)
else:
persistent_id = pid_maybe
elif resp.status == 200:
# This *is* a dataset, we just verified it with this query
verified_dataset = True
else:
# Any other errors should propagate
resp.raise_for_status()
return None
return Exists(DataverseDataset(question.installation, persistent_id))
class ZenodoResolver:
"""
A resolver for datasets hosted on https://inveniosoftware.org/ (such as Zenodo)
"""
async def resolve(
self, question: ZenodoURL, log: Logger
) -> MaybeExists[ZenodoDataset] | DoesNotExist[ZenodoDataset] | None:
if not (
# After the base URL, the URL structure should start with either record or records
question.url.path[len(question.installation.path) :].startswith("record/")
or question.url.path[len(question.installation.path) :].startswith(
"records/"
)
or question.url.path[len(question.installation.path) :].startswith("doi/")
):
return None
# For URLs of form https://zenodo.org/doi/<doi>, the record_id can be resolved by making a
# HEAD request and following it. This is absolutely *unideal* - you would really instead want
# to make an API call. But I can't seem to find anything in the REST API that would let me give
# it a DOI and return a record_id. And these dois can resolve to *different* records over time,
# so let's actively resolve them here to match the ZenodoDataset's immutable property
if question.url.path[len(question.installation.path) :].startswith("doi/"):
url_parts = question.url.path.split("/")
if len(url_parts) != 4:
# Not a correctly formatted DOI URL
return None
async with aiohttp.ClientSession() as session:
resp = await session.head(question.url)
if resp.status == 404:
return DoesNotExist(
ZenodoDataset, f"{question.url} is not a valid Zenodo DOI URL"
)
redirect_location = resp.headers["Location"]
return await self.resolve(
ZenodoURL(question.installation, URL(redirect_location)), log
)
else:
# URL is /record or /records
# Record ID is the last part of the URL path
return MaybeExists(ZenodoDataset(question.installation, question.url.name))
class FigshareResolver:
async def resolve(
self, question: FigshareURL, log: Logger
) -> MaybeExists[FigshareDataset] | None:
# After the base URL, the URL structure should start with either articles or account/articles
if not (
question.url.path[len(question.installation.url.path) :].startswith(
"articles/"
)
or question.url.path[len(question.installation.url.path) :].startswith(
"account/articles/"
)
):
return None
# Figshare article IDs are integers, and so are version IDs
# If last two segments of the URL are integers, treat them as article ID and version ID
# If not, treat it as article ID only
parts = question.url.path.split("/")
if parts[-1].isdigit() and parts[-2].isdigit():
return MaybeExists(
FigshareDataset(question.installation, int(parts[-2]), int(parts[-1]))
)
elif parts[-1].isdigit():
return MaybeExists(
FigshareDataset(question.installation, int(parts[-1]), None)
)
else:
return None
class ImmutableFigshareResolver:
async def resolve(
self, question: FigshareDataset, log: Logger
) -> (
Exists[ImmutableFigshareDataset]
| MaybeExists[ImmutableFigshareDataset]
| DoesNotExist[ImmutableFigshareDataset]
| None
):
if question.version is not None:
# Version already specified, just return
return MaybeExists(
ImmutableFigshareDataset(
question.installation, question.articleId, question.version
)
)
api_url = (
question.installation.apiUrl
/ "articles"
/ str(question.articleId)
/ "versions"
)
async with aiohttp.ClientSession(
headers={"Authorization": f"token {FIGSHARE_PUBLIC_TOKEN}"}
) as session:
resp = await session.get(api_url)
if resp.status == 404:
return DoesNotExist(
ImmutableFigshareDataset,
f"Article ID {question.articleId} not found on figshare installation {question.installation.url}",
)
elif resp.status == 200:
data = await resp.json()
return Exists(
ImmutableFigshareDataset(
question.installation, question.articleId, data[-1]["version"]
)
)
else:
# All other status codes should raise an error
resp.raise_for_status()
# This should not actually be reached, but the explicit return None makes mypy happy
return None
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/feature_detect.py | Python | from json import JSONDecodeError
from logging import Logger
import aiohttp
from yarl import URL
from .base import Exists, MaybeExists
from .repos import CKANDataset, DataverseURL, Git, GitLabURL
class FeatureDetectResolver:
"""
Use external network calls to detect what kinda URL this is.
Resolver of last resort!
"""
async def is_git_repo(
self, session: aiohttp.ClientSession, url: URL, log: Logger
) -> Exists[Git] | None:
"""
Return true if url is a git repository that supports the smart HTTP git protocol
"""
# Read https://github.com/git/git/blob/master/Documentation/gitprotocol-http.txt
# to understand the smart HTTP git protocol better.
# Short version is that we look for a 200 response in $GIT_URL/info/refs?service=git-upload-pack
# To determine if this is a git repo
refs_url = url / "info/refs"
refs_url = refs_url.with_query(service="git-upload-pack")
resp = await session.get(refs_url)
if resp.status == 200:
log.debug(f"Found git repo at {url} via 200 OK response to {refs_url}")
return Exists(Git(str(url), "HEAD"))
# Not a smart git URL
return None
async def is_dataverse(
self, session: aiohttp.ClientSession, url: URL, log: Logger
) -> MaybeExists[DataverseURL] | None:
"""
Check if a given URL is under a dataverse install
"""
# Make an API call to check if this is a dataverse instance
# https://guides.dataverse.org/en/latest/api/native-api.html#show-dataverse-software-version-and-build-number
# FIXME: This assumes that the dataverse instance is hosted at the root of the server,
# without any other path prefix
installation = url.with_path("/").with_fragment(None).with_query(None)
api_url = installation.with_path("/api/info/version")
resp = await session.get(api_url)
if resp.status == 200:
try:
version_data = await resp.json()
if version_data.get("status") == "OK" and "version" in version_data.get(
"data", {}
):
log.debug(
f"Detected dataverse installation at {installation} via 200 response to {api_url}"
)
return MaybeExists(DataverseURL(installation, url))
except:
pass
return None
async def is_gitlab(
self, session: aiohttp.ClientSession, question: URL, log: Logger
) -> MaybeExists[GitLabURL] | None:
# A lot of GitLab APIs seem to require auth to hit, including the version API
# So instead, we hit the OpenID Connect Well Known Endpoint (https://docs.gitlab.com/ee/integration/openid_connect_provider.html#settings-discovery)
# And check for GitLab specific supported claims.
installation = question.with_path("/").with_query(None).with_fragment(None)
openid_config_url = installation / ".well-known/openid-configuration"
resp = await session.get(openid_config_url)
if resp.status != 200:
return None
try:
data = await resp.json()
except JSONDecodeError:
return None
if "https://gitlab.org/claims/groups/owner" in data.get("claims_supported", {}):
log.debug(
f"Found GitLab installation at {installation} by looking for `claims_supported` in {openid_config_url}"
)
return MaybeExists(GitLabURL(installation, question))
else:
return None
async def is_ckan(
self, session: aiohttp.ClientSession, question: URL, log: Logger
) -> MaybeExists[CKANDataset] | None:
# If there's no "/dataset/" it's not CKAN
if "/dataset/" not in question.path:
return None
# Determine the API URL, so we can make a call to the status_show endpoint
# CKAN can be under a url prefix so we should support that
parts = question.path.split("/")
dataset_identifier_index = parts.index("dataset")
# Only one segment after 'dataset', which is hte dataset id
if len(parts) != dataset_identifier_index + 2:
return None
url_prefix = "/".join(parts[:dataset_identifier_index])
base_url = question.with_path(url_prefix)
dataset_id = parts[dataset_identifier_index + 1]
if not dataset_id:
# Empty dataset_id is no-go
return None
status_api_endpoint = base_url / "api/3/action/status_show"
resp = await session.get(status_api_endpoint)
# Using startswith as Content-Type can also have encoding specified
if resp.status != 200 or not resp.headers.get("Content-Type", "").startswith(
"application/json"
):
return None
data = await resp.json()
ckan_version = data.get("result", {}).get("ckan_version")
if not ckan_version:
return None
log.debug(
f"Detected CKAN installation at {base_url} - found ckan_version={ckan_version} in {status_api_endpoint}"
)
return MaybeExists(CKANDataset(base_url, dataset_id))
async def resolve(
self, question: URL, log: Logger
) -> (
Exists[Git]
| MaybeExists[DataverseURL]
| MaybeExists[GitLabURL]
| MaybeExists[CKANDataset]
| None
):
if question.scheme not in ("http", "https"):
return None
detectors = (self.is_dataverse, self.is_gitlab, self.is_git_repo, self.is_ckan)
async with aiohttp.ClientSession() as session:
for g in detectors:
maybe_answer = await g(session, question, log)
if maybe_answer is not None:
return maybe_answer
return None
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/git.py | Python | import re
from logging import Logger
from aiohttp import ClientSession
from yarl import URL
from repoproviders.utils import exec_process
from .base import DoesNotExist, Exists, MaybeExists
from .repos import (
GistURL,
Git,
GitHubActionArtifact,
GitHubPR,
GitHubURL,
GitLabURL,
ImmutableGit,
)
class GitHubResolver:
async def resolve(
self, question: GitHubURL, log: Logger
) -> (
MaybeExists[Git]
| MaybeExists[GitHubPR]
| MaybeExists[GitHubActionArtifact]
| None
):
url = question.url
# Split the URL into parts, discarding empty parts to account for leading and trailing slashes
parts = [p for p in url.path.split("/") if p.strip() != ""]
if len(parts) == 2:
# Handle <user|org>/<repo>
# Reconstruct the URL so we normalize any
return MaybeExists(
Git(repo=str(url.with_path(f"{parts[0]}/{parts[1]}")), ref="HEAD")
)
elif len(parts) >= 4 and parts[2] in ("tree", "blob"):
# Handle <user|org>/<repo>/<tree|blob>/<ref>(/<possible-path>)
# Note: We ignore any paths specified here, as we only care about the repo
return MaybeExists(
Git(repo=str(url.with_path(f"{parts[0]}/{parts[1]}")), ref=parts[3])
)
elif len(parts) == 4 and parts[2] == "pull" and parts[3].isdigit():
# Resolve pull requests to the branch their head ref points to
return MaybeExists(GitHubPR(question.installation, question.url))
elif len(parts) == 7 and parts[5] == "artifacts" and parts[6].isdigit():
# An artifact uploaded by GitHub actions. We don't know if this is still unexpired
return MaybeExists(
GitHubActionArtifact(
question.installation, parts[0], parts[1], int(parts[6])
)
)
else:
# This is not actually a valid GitHub URL we support
return None
class GitHubPRResolver:
async def resolve(
self, question: GitHubPR, log: Logger
) -> MaybeExists[Git] | DoesNotExist[GitHubPR] | None:
parts = [p for p in question.url.path.split("/") if p.strip() != ""]
org = parts[0]
repo = parts[1]
pull_req_id = int(parts[3])
async with ClientSession() as session:
# FIXME: Support enterprise github installations
# FIXME: Add authentication support
api_url = (
URL("https://api.github.com/repos")
/ org
/ repo
/ "pulls"
/ str(pull_req_id)
)
resp = await session.get(api_url)
data = await resp.json()
if resp.status == 404:
return DoesNotExist(
GitHubPR, f"PR {pull_req_id} does not exist at {question.url}"
)
repo_url = data["head"]["repo"]["html_url"]
ref = data["head"]["ref"]
return MaybeExists(Git(repo_url, ref))
class GistResolver:
async def resolve(self, question: GistURL, log: Logger) -> MaybeExists[Git] | None:
url = question.url
# Split the URL into parts, discarding empty parts to account for leading and trailing slashes
parts = [p for p in url.path.split("/") if p.strip() != ""]
if len(parts) == 2:
# Handle user/gist-id
return MaybeExists[Git](repo=Git(str(question.url), "HEAD"))
# FIXME: Handle ways to specify individual refs in Gist.
# I can't seem to find a
return None
class GitLabResolver:
async def resolve(
self, question: GitLabURL, log: Logger
) -> MaybeExists[Git] | None:
url = question.url
# Split the URL into parts, discarding empty parts to account for leading and trailing slashes
parts = [p for p in url.path.split("/") if p.strip() != ""]
if len(parts) in (2, 3):
# Handle <user|org>/<repo> as well as <user|org>/<namespace>/<repo>
# Reconstruct the URL so we normalize any
return MaybeExists(
# Clear out the URL to remove query params & fragments
Git(repo=str(url.with_query(None).with_fragment(None)), ref="HEAD")
)
elif "-" in parts:
dash_index = parts.index("-")
if len(parts) == dash_index + 1:
# The dash is the last part of the URL, which isn't something we recognize to be anything
return None
if not parts[dash_index + 1] in ("tree", "blob"):
# GitLab has dashes in lots of URLs, we only care about tree and blob ones
return None
return MaybeExists(
Git(
str(url.with_path("/".join(parts[0:dash_index]))),
parts[dash_index + 2],
)
)
else:
# This is not actually a valid GitLab URL we support
return None
class ImmutableGitResolver:
async def resolve(
self, question: Git, log: Logger
) -> (
Exists[ImmutableGit]
| MaybeExists[ImmutableGit]
| DoesNotExist[ImmutableGit]
| None
):
command = ["git", "ls-remote", "--", question.repo, question.ref]
retcode, stdout, stderr = await exec_process(command)
if retcode:
# `git` may follow redirects here, so the repo we pass may not always be the repo we
# get back. So we loosely check for a 'not found' message.
if re.search(r"fatal: repository '(.+)' not found", stderr, re.MULTILINE):
return DoesNotExist(
ImmutableGit, f"Could not access git repository at {question.repo}"
)
# If it's another error, let's raise it directly
raise RuntimeError(
f"Unable to run git ls-remote to resolve {question}: {stderr}"
)
if stdout == "":
# The remote repo exists, and we can talk to it. But no *ref* with the given name
# exists. We check if this looks like a commit sha, and if it is, assume it exists.
# FIXME: Decide if we just do this check *before* trying ls-remotes? Faster, but it means
# we can't guarantee that the repo itself exists
if re.match(r"[0-9a-f]{40}", question.ref):
resolved_ref = question.ref
return MaybeExists(ImmutableGit(question.repo, resolved_ref))
else:
return DoesNotExist(
ImmutableGit, f"No ref {question.ref} found in repo {question.repo}"
)
else:
resolved_ref = stdout.split("\t", 1)[0]
return Exists(ImmutableGit(question.repo, resolved_ref))
class GitUrlResolver:
"""
Resolves raw git URLs
URL structure is inspired by what `pip` supports: https://pip.pypa.io/en/stable/topics/vcs-support/#git
"""
async def resolve(self, question: URL, log: Logger) -> MaybeExists[Git] | None:
# List of supported protocols is from https://pip.pypa.io/en/stable/topics/vcs-support/#git
if question.scheme not in (
"git+https",
"git+ssh",
"git",
"git+file",
"git+http",
"git+git",
):
return None
repo = question.with_scheme(question.scheme.replace("git+", ""))
if "@" in question.path:
parts = question.path.split("@", 1)
ref = parts[1]
repo = repo.with_path(parts[0])
else:
ref = "HEAD"
return MaybeExists(Git(str(repo), ref))
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/rclone.py | Python | import json
from dataclasses import dataclass
from logging import Logger
from shutil import which
from tempfile import NamedTemporaryFile
from repoproviders.resolvers.base import DoesNotExist, Exists
from ..utils import GCP_PUBLIC_SERVICE_ACCOUNT_KEY, exec_process, make_dir_hash
@dataclass(frozen=True)
class GoogleDriveFolder:
id: str
immutable = False
@dataclass(frozen=True)
class ImmutableGoogleDriveFolder:
id: str
dir_hash: str
immutable = True
class GoogleDriveFolderResolver:
async def resolve(
self, question: GoogleDriveFolder, log: Logger
) -> Exists[ImmutableGoogleDriveFolder] | DoesNotExist[GoogleDriveFolder] | None:
if not which("rclone"):
raise FileNotFoundError(
"rclone must be installed to resolve folders from Google Drive"
)
with NamedTemporaryFile("w") as service_account_key:
json.dump(GCP_PUBLIC_SERVICE_ACCOUNT_KEY, service_account_key)
service_account_key.flush()
connection_string = f":drive,scope=drive.readonly,service_account_file={service_account_key.name}:"
cmd = [
"rclone",
"lsjson",
connection_string,
"--recursive",
"--hash",
"--drive-root-folder-id",
question.id,
]
returncode, stdout, stderr = await exec_process(cmd)
if returncode != 0:
# Failure in one way or another. Let's just write out the failure message lines
# that refer to lsjson, so we avoid messages about missing config files
# Cut off first 20 chars, as it prints out the date
stderr_lines = [l[20:] for l in stderr.splitlines()]
message = " ".join(l for l in stderr_lines if "lsjson" in l)
# FIXME: Does this leak sensitive info?
return DoesNotExist(GoogleDriveFolder, message)
data = json.loads(stdout)
if len(data) == 0:
# No items were returned. Let's treat this as a DoesNotExist, as this usually means we don't
# have permissions, or the directory doesn't exist
return DoesNotExist(
GoogleDriveFolder,
"The Google Drive Folder either does not exist, is empty or is not public",
)
hash_input = {}
for item in data:
# Use (in order of preference), sha256, sha1, md5 and modtime based on what is present
hashes = item.get("Hashes", {})
h = hashes.get(
"sha256", hashes.get("sha1", hashes.get("md5", item["ModTime"]))
)
hash_input[item["Path"]] = h
dirhash = make_dir_hash(hash_input)
return Exists(ImmutableGoogleDriveFolder(question.id, dirhash))
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/repos.py | Python | from dataclasses import dataclass
from yarl import URL
@dataclass(frozen=True)
class Git:
repo: str
ref: str
immutable = False
@dataclass(frozen=True)
class ImmutableGit:
"""
Same as Git, but marked to be fully resolved. This implies:
1. The repository exists, and can be contacted
2. If ref was a branch or tag, it has been resolved into an immutable commit sha
3. If ref *looks* like a sha, we assume it exists (without testing it)
"""
repo: str
ref: str
immutable = True
@dataclass(frozen=True)
class GitHubURL:
"""
A GitHub URL of any sort.
Not just a repository URL.
"""
installation: URL
url: URL
# URLs can point to whatever
immutable = False
@dataclass(frozen=True)
class GitHubPR:
"""
A GitHub Pull Request
"""
installation: URL
url: URL
# PRs can change to whatever
immutable = False
@dataclass(frozen=True)
class GitHubActionArtifact:
"""
A downloadable GitHub Action Artifact
"""
installation: URL
account: str
repo: str
artifact_id: int
# Artifacts don't change after upload
immutable = True
@dataclass(frozen=True)
class GistURL:
"""
A Gist URL of any sort.
Not just cloneable repo URL
"""
installation: URL
url: URL
# URL can point to whatever
immutable = False
@dataclass(frozen=True)
class GitLabURL:
"""
A GitLab URL of any sort.
Not just a repository URL.
"""
installation: URL
url: URL
# URLs can point to whatever
immutable = False
@dataclass(frozen=True)
class Doi:
url: URL
# This needs further investigation
immutable = False
@dataclass(frozen=True)
class DataverseURL:
"""
Any kind of URL in any dataverse installation
Not just for datasets.
"""
installation: URL
url: URL
immutable = False
@dataclass(frozen=True)
class DataverseDataset:
installationUrl: URL
persistentId: str
# Dataverse Datasets also have versions, which are not represented here.
immutable = False
@dataclass(frozen=True)
class ZenodoURL:
"""
Any kind of URL in any Zenodo / Invenio installation.
Not just for records.
"""
installation: URL
url: URL
immutable = False
@dataclass(frozen=True)
class ZenodoDataset:
installationUrl: URL
recordId: str
# Zenodo records are immutable: https://help.zenodo.org/docs/deposit/about-records/#life-cycle
# When a new version is published, it gets its own record id!
immutable = True
@dataclass(frozen=True)
class FigshareInstallation:
url: URL
apiUrl: URL
@dataclass(frozen=True)
class FigshareURL:
"""
Any kind of URL in any Figshare installation.
Not just for articles / datasets.
"""
installation: FigshareInstallation
url: URL
immutable = False
@dataclass(frozen=True)
class FigshareDataset:
installation: FigshareInstallation
articleId: int
version: int | None
# Figshare articles have versions, and here we don't know if this one does or not
immutable = False
@dataclass(frozen=True)
class ImmutableFigshareDataset:
installation: FigshareInstallation
articleId: int
# version will always be present when immutable
version: int
# We *know* there's a version here
immutable = True
@dataclass(frozen=True)
class HydroshareDataset:
resource_id: str
# Hydroshare Datasets are mutable
immutable = False
@dataclass(frozen=True)
class CKANDataset:
installationUrl: URL
dataset_id: str
immutable = False
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/resolver.py | Python | import inspect
import types
import typing
from logging import Logger, getLogger
from typing import Any, Optional
from yarl import URL
from repoproviders.resolvers.feature_detect import FeatureDetectResolver
from .base import DoesNotExist, Exists, MaybeExists, SupportsResolve
from .doi import (
DataverseResolver,
DoiResolver,
FigshareResolver,
ImmutableFigshareResolver,
ZenodoResolver,
)
from .git import (
GistResolver,
GitHubPRResolver,
GitHubResolver,
GitLabResolver,
GitUrlResolver,
ImmutableGitResolver,
)
from .rclone import GoogleDriveFolderResolver
from .wellknown import WellKnownProvidersResolver
ALL_RESOLVERS: list[SupportsResolve] = [
WellKnownProvidersResolver(),
GitHubResolver(),
GitHubPRResolver(),
GistResolver(),
GoogleDriveFolderResolver(),
GitUrlResolver(),
GitLabResolver(),
DoiResolver(),
ZenodoResolver(),
FigshareResolver(),
ImmutableFigshareResolver(),
DataverseResolver(),
ImmutableGitResolver(),
FeatureDetectResolver(),
]
RESOLVER_BY_TYPE: dict[type, list[SupportsResolve]] = {}
for R in ALL_RESOLVERS:
annotations = inspect.get_annotations(R.resolve)
supported_types = annotations["question"]
if isinstance(supported_types, type):
# Only supports a single type
RESOLVER_BY_TYPE.setdefault(supported_types, []).append(R)
elif isinstance(supported_types, types.UnionType):
for t in typing.get_args(supported_types):
RESOLVER_BY_TYPE.setdefault(t, []).append(R)
async def resolve(
question: str | Any, recursive: bool, log: Optional[Logger] = None
) -> list[Exists | MaybeExists | DoesNotExist] | None:
if isinstance(question, str):
question = URL(question)
if log is None:
# Use default named logger
log = getLogger("repoproviders")
answers: list[Exists | MaybeExists | DoesNotExist] = []
resp = None
while True:
# Get a list of applicable resolvers we can try
applicable_resolvers = RESOLVER_BY_TYPE.get(type(question), [])
if not applicable_resolvers:
# No applicable resolvers found for this question type, we are done
break
for r in applicable_resolvers:
resp = await r.resolve(question, log)
if resp is not None:
# We found an answer!
answers.append(resp)
# Break out of the for after we find an answer in each round
break
if recursive:
# If we want a recursive answer, we have to continue iterating
match resp:
case DoesNotExist():
# Some resolver detected this but we have confirmed it does not exist
break
case Exists(repo):
# TODO: Should an "Exists" be further resolved, or does it always indicate an end?
question = repo
case MaybeExists(repo):
question = repo
case None:
# No answer was found this round so we are done
break
# We *did* find an answer this round, so we should continue and see if we find more
resp = None
else:
# We are not recursive, so we are done after 1 round regardless
break
return answers
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/serialize.py | Python | import dataclasses
import json
from typing import Any
from yarl import URL
from repoproviders.resolvers.base import DoesNotExist, Exists, MaybeExists
from .base import Repo
class JSONEncoder(json.JSONEncoder):
def default(self, o: URL | Any | type) -> str | Any:
if isinstance(o, URL):
return str(o)
elif isinstance(o, type):
return o.__name__
else:
return super().default(o)
def to_json(answer: DoesNotExist[Repo] | Exists[Repo] | MaybeExists[Repo]):
"""
Convert an answer into a canonical JSON representation
"""
return json.dumps(to_dict(answer), cls=JSONEncoder)
def to_dict(
answer: DoesNotExist[Repo] | Exists[Repo] | MaybeExists[Repo],
) -> dict[str, Any]:
"""
Convert an answer into a canonical dict representation
"""
match answer:
case DoesNotExist(_, _):
return {
"certainity": answer.__class__.__name__,
"kind": answer.kind.__name__,
"data": dataclasses.asdict(answer),
}
case Exists(repo) | MaybeExists(repo):
return {
"certainity": answer.__class__.__name__,
"kind": answer.repo.__class__.__name__,
"data": dataclasses.asdict(repo),
}
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/resolvers/wellknown.py | Python | import json
from logging import Logger
from pathlib import Path
from typing import Callable
from yarl import URL
from repoproviders.resolvers.rclone import GoogleDriveFolder
from .base import MaybeExists, Repo
from .repos import (
DataverseURL,
Doi,
FigshareInstallation,
FigshareURL,
GistURL,
GitHubURL,
GitLabURL,
HydroshareDataset,
ZenodoURL,
)
class WellKnownProvidersResolver:
def detect_github(self, question: URL, log: Logger) -> GitHubURL | None:
# git+<scheme> urls are handled by a different resolver
if question.scheme not in ("http", "https") or (
question.host != "github.com" and question.host != "www.github.com"
):
# TODO: Allow configuring for GitHub enterprise
return None
else:
return GitHubURL(URL("https://github.com"), question)
def detect_gist(self, question: URL, log: Logger) -> GistURL | None:
if question.host == "gist.github.com":
return GistURL(URL("https://gist.github.com"), question)
else:
# TODO: Allow configuring for GitHub enterprise
return None
def detect_gitlab(self, question: URL, log: Logger) -> GitLabURL | None:
# git+<scheme> urls are handled by a different resolver
if question.scheme not in ("http", "https") or (
question.host != "gitlab.com" and question.host != "www.gitlab.com"
):
# TODO: Allow configuring for GitHub enterprise
return None
else:
return GitLabURL(URL("https://gitlab.com"), question)
def detect_zenodo(self, question: URL, log: Logger) -> ZenodoURL | None:
KNOWN_INSTALLATIONS = [
URL("https://sandbox.zenodo.org/"),
URL("https://zenodo.org/"),
URL("https://data.caltech.edu/"),
]
# Intentionally don't check for record/ records/ doi/ etc. That should be all
# handled by the zenodo resolver
installation = next(
(
installation
for installation in KNOWN_INSTALLATIONS
# Intentionally don't check for scheme validity, to support interchangeable http and https URLs
if installation.host == question.host
# Check for base URL, to support installations on base URL other than /
and question.path.startswith(installation.path)
),
None,
)
if installation is None:
return None
return ZenodoURL(installation, question)
def detect_dataverse(self, question: URL, log: Logger) -> DataverseURL | None:
if not hasattr(self, "_dataverse_installation_urls"):
# Get a list of installation URLs for known dataverse installations
data_file = Path(__file__).parent / "dataverse.json"
with open(data_file) as fp:
installations = json.load(fp)["installations"]
# Parse all the URLs of installations once, so we can quickly use them for validating URLs passed in
# TODO: Use a better datastructure here (like a trie?)
# The structure says 'hostname' but it sometimes has URL prefixes! But no scheme ever
self._dataverse_installation_urls = [
URL(f"https://{i['hostname']}") for i in installations
]
# Check if URL is under one of the installation URLs we have.
installation = next(
(
installation
for installation in self._dataverse_installation_urls
# Intentionally don't check for scheme validity, to support interchangeable http and https URLs
if installation.host == question.host
and question.path.startswith(installation.path)
),
None,
)
if installation is None:
return None
else:
return DataverseURL(installation, question)
def detect_figshare(self, question: URL, log: Logger) -> FigshareURL | None:
KNOWN_INSTALLATIONS = [
FigshareInstallation(
URL("https://figshare.com/"), URL("https://api.figshare.com/v2/")
)
]
# Check if this is a figshare URL of any sort - *not* that its' a figshare 'article' URL
# That logic lives within the FigshareResolver itself, so it can be used for other FigshareURLs
# generated by guessers
installation = next(
(
installation
for installation in KNOWN_INSTALLATIONS
# Intentionally don't check for scheme validity, to support interchangeable http and https URLs
if installation.url.host == question.host
# Check for base URL, to support installations on base URL other than /
and question.path.startswith(installation.url.path)
),
None,
)
if installation is None:
return None
return FigshareURL(installation, question)
def detect_google_drive(
self, question: URL, log: Logger
) -> GoogleDriveFolder | None:
if question.host == "drive.google.com":
parts = question.path.split("/")
if parts[1] == "drive" and parts[2] == "folders":
return GoogleDriveFolder(parts[3])
return None
def detect_hydroshare(self, question: URL, log: Logger) -> HydroshareDataset | None:
if question.host == "www.hydroshare.org" or question.host == "hydroshare.org":
# Strip out leading and trailing / to make our work easier
parts = [p for p in question.path.split("/") if p.strip()]
if len(parts) == 2 and parts[0] == "resource":
return HydroshareDataset(parts[1])
return None
async def resolve(
self, question: URL | Doi, log: Logger
) -> MaybeExists[Repo] | None:
# These detectors are *intentionally* not async, as they should not be doing any
# network calls
detectors: list[Callable[[URL, Logger], Repo | None]] = [
self.detect_github,
self.detect_gist,
self.detect_google_drive,
self.detect_dataverse,
self.detect_zenodo,
self.detect_figshare,
self.detect_gitlab,
self.detect_hydroshare,
]
match question:
case URL():
url = question
case Doi(doi_url):
url = doi_url
for d in detectors:
maybe_answer = d(url, log)
if maybe_answer is not None:
return MaybeExists(maybe_answer)
return None
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/repoproviders/utils.py | Python | import asyncio
import binascii
import hashlib
import json
import time
from base64 import standard_b64decode, urlsafe_b64encode
from logging import Logger
from pathlib import Path
from typing import Optional
import aiohttp
from yarl import URL
# A public service account JSON key, used to make API requests for Google Drive.
# Some values are base 64 decoded so we don't get dinged by GitHub security scanning
# This service account has no rights, only used for identity
GCP_PUBLIC_SERVICE_ACCOUNT_KEY = {
"type": "service_account",
"project_id": "repoproviders-iam",
"private_key_id": standard_b64decode(
"MDY5ODJhNmY4ZWM5MTM3MDU0YmU5Mjk5NzIwYTQ1OWFhYjgxMmY0Yg=="
).decode(),
"private_key": standard_b64decode(
"LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRRE9ycUx2cEdUQ281TjgKakl3OGdvU3FTTjQ2Mm5nalhQTk5hMkcraFdiS3lKRVM3c3krMXhHcUNqRXM1WVVuR2k4am5WaHNKTDhXVUg2NAphdzJpczcwV0loK1c1OUs4NDhJbzdLQzc1VmRsMjBGZkgyVldEVU1uTGhWOUlHMmJWNHNpWGJyL1JJWlhMYlpCClNkN2lSZnZ5b1U4VjJ0eDI5Zm9menFVUEhrbGpEdnV0Nmc3dXZrUUN1bEJFV1hFQjdId2MwL2Y3RFAzb09ZQ3IKMkttUEsxNUJBa2dZNUova2Q5VW5EZDJMaGN6U3MwZVhUUWt2K2xWcEVGWlNaN3VLdVRsWm4rTmNDa0pDV1k2KwoybVJmRWJoTFRscVV4Zk5yRnExd1BSNDBhVXhNOCtWNWVWbTZWSDYwdldhOTJTc1lybnc1SUt0TVEvWE9hZ0pxCkZDQ0lRK25CQWdNQkFBRUNnZ0VBSWxzcFBvaW5MeXVkd3FCaVYxU2kxT09UL1ZoL2kvRlJpRmNIUW5KT2FmcjUKRXBxYmNrV3g3QUFSdUpMV2xXOW0wcGEvTWdyUnNOWGttcWRBOXdERnQ3NG9YaDlmQ29NWnJVL1lVQ09KYWFjTwpzTTg1T3hxdFJRQUdGbXlqaTZUN3ZkU3kxdWYvSk5LMmJ4Zm1jdHFMVFFPL2I3U1gzVFo2UTN0SU9NRWlGZE1GClJCMDNvTVhhcWxsL2dsbWFXais4YUVrSmQ0MmtEd0l3YzluNjMwYU5jRkx0MGZLdlIydHkvU2p3WHJvTlFJK1UKT3o5VE04ZkkzdTF1WUFKUEJJdDJDZS9kQTlObVdDMFRYYW1paEI4SU1SSXBWeGVONWFubVNrY3ZJZHIxUTh5MQpjTk5zcHZvQUJlN2ZRcktFRWNEVGJaVTg2TlJRNnVvcjRYV3pGVjVPb1FLQmdRRHRjbTV3OE8yMjRQbDVDaG9KClZ2YUVnUWdtSHBnc3EzWHhseU9sS2g0bGc4ck9Qd1lhOG93MmR3MzdDcXNRc08va0ZYQU5vWm5MUi93U21KNlcKS0d3MlFZWjlsaVhneERpQ1VudlFHQ0dPVUFIU2F1cUl6V2JmbWMvclRyMDQ4djl6M0JVYXh5WGRIWHJlV2szbgo3dVZRdzZ3MnltVjNhRTR4SnhnTjhKc2ZCUUtCZ1FEZTFOTyt2K0lNUWZGOHdtQXhlM0dDclJySVpvNzFJUHRuCjFoaGF5NUdOWE5CL3pKcVQ3MTJJeFo3WUgrTU4yUDJCelVKTTdtc2xXUmdXZXI2d01uSDhienlIcW9lQ0VwQkIKNDl6Y0RKK3lDaGhhbzcveU9YMjBkRTV0d3Z3NmU3TkdZeVBxM0VkVUw2ZU5HVXEzTWlGbnAzSUw3elNaeFIwZApYRk9lSndURWpRS0JnUUNTQTdWd2xHZko5d3pTWnVqZDUzRk95RDRubXRhL1dXejg5SkZCNXVXRThrZUxqdXdGCk5EUU81aVZkeEJDd0FlNXpGcy9DUWliZC85VTk1a1pYVm1JODl3eHFQQ1BzMVIxZTNyUXVvamc0V0hEV1lWTDYKYnowY3NXeFBhaXNvVXgzTnRIL3g2SmNiSXg3RWowbXJINWc2a3lsYXhCbWpWU3dJUTk4aDYxeW90UUtCZ0FKcgp2WUV0QkgrdGw3b0xRcEJIRHd5a1pNNFlqeVVLbnJDYUd0bWhySXNrbnY5RWNjbDVxRUo4SXlXbDh3bUxlZldYCkRVbFlyY0ZTSG5qZ0RJSk5pZjk4RmVSRGJnVnp2aTE1RkVVdnZleHBQNnA4YlBGc3ZuamZhcHEycTViWEVUT0sKa0RGVkExRmUweXN0UXlxS1dPS1BaeVhLQzRCQUsvak5yL3JmNGFWaEFvR0JBSnhwbDNVZnpaSFAxaVdHNGJUWApBY3A0WTR5cG1wME5aVWlrNHUycnFubTFmSDJZYmRYZGQvUlRWNlpYRmgrM0lpVmNkMFY2cDhyNnBqMUdkaHpHCnBLTEhoU1NTNi95ZzF6cnFhWWhQV0FWeVJVT1BvMEVOeGZIWmc4cHErcStDdDVHQmdQS1BNT3lzRmw2RzRzVDkKOFNpNVd3a1V2cXMwVyt3TWJ6QWp6bEFQCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K"
).decode(),
"client_email": "repoproviders-no-rights-public@repoproviders-iam.iam.gserviceaccount.com",
"client_id": "107622683369583114795",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/repoproviders-no-rights-public%40repoproviders-iam.iam.gserviceaccount.com",
"universe_domain": "googleapis.com",
}
# Publicly visible GitHub PAT with just public repo permissions of account yuvipanda-repoproviders-public
# Primarily useful for downloading GitHubActionsArtifact resources, as you need a PAT to do so
GITHUB_PUBLIC_PAT = binascii.a2b_uu(
b"H9VAP7UE%9&%N;U5O34YF4712:%HQ9VQP,VUF,U),3D%J9S%Q8GHW:P \n"
).decode()
# Publicly visible figshare token, for account yuvipanda+repoproviders-public@gmail.com
# Used to have better luck with hitting the figshare API, which has a very protective & aggressive
# AWS Web Application Firewall in front
FIGSHARE_PUBLIC_TOKEN = standard_b64decode(
"OWU5ZjdjMGIzMDA5NWRiZDJmMDEyNGJhODFiZDAyMjAwZTFjNDA2NTdmNTg0MmFlNDZhYjM1NTMxNzcyNTBkNzEwNWY3MDYxYTdhYWY5Njg3MjdjOGUyYTQ0ZjU4ODE5NWI1ZmEyNzVlNjcwYjcwNjYzYTY0YzVjZjZmMDdhZmQ="
).decode()
def make_dir_hash(data: dict[str, str]) -> str:
"""
Convert a directory structure to a urlsafe base64 encoded sha256 hash.
Input is a dict where key is full paths of files, and value is either a hash or a last modified
timestamp. Sorted by key explicitly before hashing.
"""
data = dict(sorted(data.items()))
return urlsafe_b64encode(
hashlib.sha256(json.dumps(data).encode()).digest()
).decode()
async def download_file(
session: aiohttp.ClientSession, url: URL, output_path: Path, log: Logger
):
# Read in 4k chunks
CHUNK_SIZE = 4 * 1024
start_time = time.monotonic()
resp = await session.get(url)
if resp.status == 200 and "Location" in resp.headers:
# Some providers (lookin at you, data.caltech.edu) send a Location header
# *but with a 200 status code*. This is invalid and bogus, yet we have to
# honor it. Sigh
log.debug(
"Got a Location header response but with 200 OK status code. Doing a manual redirect..."
)
return await download_file(
session, URL(resp.headers["Location"]), output_path, log
)
resp.raise_for_status()
if not output_path.parent.exists():
# Leave the exist_ok=True here despite the check, to handle possible
# race conditions in the future if we ever parallelize this
log.debug(f"Created directory {output_path.parent}")
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "wb") as f:
async for chunk in resp.content.iter_chunked(CHUNK_SIZE):
f.write(chunk)
duration = time.monotonic() - start_time
log.debug(f"Downloaded {url} to {output_path} in {duration:0.02f}s")
async def exec_process(
cmd: list[str], log: Optional[Logger] = None, **kwargs
) -> tuple[int, str, str]:
"""
Execute given command and return return code, stdout, stderr
"""
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, **kwargs
)
stdout, stderr = [s.decode() for s in await proc.communicate()]
returncode = await proc.wait()
if log:
# FIXME: Stream these directly out
log.debug(stdout)
log.debug(stderr)
return returncode, stdout, stderr
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/conftest.py | Python | import logging
import pytest
@pytest.fixture()
def log() -> logging.Logger:
return logging.getLogger()
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_ckan.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
(
# Don't forget this happened
"https://catalog.data.gov/dataset/cumulative-provisional-counts-of-deaths-by-sex-race-and-age",
),
{
"rows.rdf": "30c4b07b6fa28835db5daca0e2d6174e",
"rows.csv": "48833a8e1e30ce06cc5a7562efa0b662",
"rows.xml": "990d83bff832d6dd08e40037c97fca45",
"rows.json": "5c76a18b6039ec5922e32b9bdc8bfde9",
},
)
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert not isinstance(answers[-1], DoesNotExist)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_dataverse.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import Exists
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
(
"doi:10.7910/DVN/TJCLKP",
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/TJCLKP",
),
{
"data/primary/primary-data.zip": "a8f6fc3fc58f503cd48e23fa8b088694",
"data/2023-01-03.tsv": "6fd497bf13dab9a06fe737ebc22f1917",
"code/language.py": "9d61582bcf497c83bbd1ed0eed3c772e",
},
),
(
(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ",
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ",
"doi:10.7910/DVN/6ZXAGT/3YRRYJ",
),
{
"ArchaeoGLOBE-master/analysis/figures/1_response_distribution.png": "243c6a3dd66bc3c84102829b277ef333",
"ArchaeoGLOBE-master/analysis/figures/2_trends_map_knowledge.png": "2ace6ae9d470dda6cf2f9f9a6588171a",
"ArchaeoGLOBE-master/analysis/figures/3_trends_global.png": "63ccd0a7b2d20440cd8f418d4ee88c4d",
"ArchaeoGLOBE-master/analysis/figures/4_consensus_transitions.png": "facfaedabeac77c4496d4b9e962a917f",
"ArchaeoGLOBE-master/analysis/figures/5_ArchaeoGLOBE_HYDE_comparison.png": "8e002e4d50f179fc1808f562b1353588",
"ArchaeoGLOBE-master/apt.txt": "b4224032da6c71d48f46c9b78fc6ed77",
"ArchaeoGLOBE-master/analysis/archaeoglobe.pdf": "f575be4790efc963ef1bd40d097cc06d",
"ArchaeoGLOBE-master/analysis/archaeoglobe.Rmd": "f37d5f7993fde9ebd64d16b20fc22905",
"ArchaeoGLOBE-master/ArchaeoGLOBE.Rproj": "d0250e7918993bab1e707358fe5633e0",
"ArchaeoGLOBE-master/CONDUCT.md": "f87ef290340322089c32b4e573d8f1e8",
"ArchaeoGLOBE-master/.circleci/config.yml": "6eaa54073a682b3195d8fab3a9dd8344",
"ArchaeoGLOBE-master/CONTRIBUTING.md": "b3a6abfc749dd155a3049f94a855bf9f",
"ArchaeoGLOBE-master/DESCRIPTION": "745ef979494999e483987de72c0adfbd",
"ArchaeoGLOBE-master/dockerfile": "aedce68e5a7d6e79cbb24c9cffeae593",
"ArchaeoGLOBE-master/.binder/Dockerfile": "7564a41246ba99b60144afb1d3b6d7de",
"ArchaeoGLOBE-master/.gitignore": "62c1482e4febbd35dc02fb7e2a31246b",
"ArchaeoGLOBE-master/analysis/data/derived-data/hyde_crop_prop.RDS": "2aea7748b5586923b0de9d13af58e59d",
"ArchaeoGLOBE-master/analysis/data/derived-data/kk_anthro_prop.RDS": "145a9e5dd2c95625626a720b52178b70",
"ArchaeoGLOBE-master/LICENSE.md": "3aa9d41a92a57944bd4590e004898445",
"ArchaeoGLOBE-master/analysis/data/derived-data/placeholder": "d41d8cd98f00b204e9800998ecf8427e",
"ArchaeoGLOBE-master/.Rbuildignore": "df15e4fed49abd685b536fef4472b01f",
"ArchaeoGLOBE-master/README.md": "0b0faabe580c4d76a0e0d64a4f54bca4",
"ArchaeoGLOBE-master/analysis/data/derived-data/README.md": "547fd1a6e874f6178b1cf525b5b9ae72",
"ArchaeoGLOBE-master/analysis/figures/S1_FHG_consensus.png": "d2584352e5442b33e4b23e361ca70fe1",
"ArchaeoGLOBE-master/analysis/figures/S2_EXAG_consensus.png": "513eddfdad01fd01a20263a55ca6dbe3",
"ArchaeoGLOBE-master/analysis/figures/S3_INAG_consensus.png": "b16ba0ecd21b326f873209a7e55a8deb",
"ArchaeoGLOBE-master/analysis/figures/S4_PAS_consensus.png": "05695f9412337a00c1cb6d1757d0ec5c",
"ArchaeoGLOBE-master/analysis/figures/S5_URBAN_consensus.png": "10119f7495d3b8e7ad7f8a0770574f15",
"ArchaeoGLOBE-master/analysis/figures/S6_trends_map_landuse.png": "b1db7c97f39ccfc3a9e094c3e6307af0",
"ArchaeoGLOBE-master/analysis/figures/S7_ArchaeoGLOBE_KK10_comparison.png": "30341748324f5f66acadb34c114c3e9d",
},
),
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert isinstance(answers[-1], Exists)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
# We are using md5 instead of something more secure because that is what
# dataverse itself uses
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_figshare.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
("https://figshare.com/account/articles/31337494",),
{
"tests/test_utils.py": "8cc64b23ca72e6c2d2b8116a69a8764b",
"tests/fetchers/test_google_drive.py": "26dde4ac8621070939cd5840ed3cf364",
"tests/fetchers/test_github_actions_artifacts.py": "23d2577046eb52302466195b939ed72a",
"tests/fetchers/test_zenodo.py": "83c16eedb777a1fe62b58f7b50e82105",
"tests/fetchers/test_dataverse.py": "dcda177e04c9b36aa4c519dc2de5bd58",
"tests/resolvers/test_google_drive.py": "bdb50593d6bc21036b9b744db4e9ae85",
"tests/resolvers/test_resolve.py": "961ab5fb4fb9d8fbc81f9c5fc567ca2d",
"tests/resolvers/test_figshare.py": "ad7c199437ef098904605466c1ab0fbb",
"tests/resolvers/test_github.py": "f0e753e032a682504bd14b0fce4aef36",
"tests/resolvers/test_wellknown.py": "9bc1d525dabefd123a34cb35c8e8f688",
"tests/resolvers/test_feature_detect.py": "ce8cd6f5bb5378e4a283cedfad8ad341",
"tests/resolvers/test_gist.py": "12e4f33eb78dca763d4940889e1fcc93",
"tests/resolvers/test_doi.py": "5d7981b1f9f854557eaaefb90e8a7082",
"tests/resolvers/test_giturl.py": "4080c8d6c3f0c015852340139ddc3636",
"tests/resolvers/test_serialize.py": "b8c52284b9ede5b4adaed5416341002e",
"tests/resolvers/test_zenodo.py": "78c6626331808a502d1115f1a0eac40c",
"tests/resolvers/test_gitlab.py": "8570dc96679c953e5070132359965505",
"tests/resolvers/test_immutablegit.py": "a37b556bca603e2fed5f06e41ecceef2",
"tests/resolvers/test_dataverse.py": "119292866587531f2e3a3d0523491fd4",
},
)
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert not isinstance(answers[-1], DoesNotExist)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_github_actions_artifacts.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
# These expire after 90 days. We should set up our own github automation for this.
# However, instead, I'll buy us 90 days by using a recent run.
# We expect this to fail roughly 90 days from now.
# When that happens, you can either:
# 1. Fix this properly by rewriting the test here to dynamically look for artifacts
# 2. Go to https://github.com/jupyterlab/jupyterlab/actions/workflows/galata.yml, pick a
# recent completed run, find the URL for `documentation-test-assets` and update the content
# https://github.com/yuvipanda/repoproviders/issues/28 has more information
(
"https://github.com/jupyterlab/jupyterlab/actions/runs/21958742242/artifacts/5487665511",
),
{
"test-documentation-workspa-aae0b-bar-Workspaces-context-menu-documentation/video.webm": "e2f7acd5785ed28ec6177081e8a7f7e8",
"test-documentation-workspa-aae0b-bar-Workspaces-context-menu-documentation/error-context.md": "de239c63a0b17f6f3309e5afd2c47428",
"test-documentation-customi-38159-t-should-use-default-layout-documentation/video.webm": "4f190c86be532f2d044099011306a38e",
"test-documentation-customi-38159-t-should-use-default-layout-documentation/default-terminal-position-single-actual.png": "5a0a2040f0fc5b1457986f3575117433",
"test-documentation-customi-38159-t-should-use-default-layout-documentation/default-terminal-position-single-diff.png": "a042a221e77dcc4954a0fa4e912eee9b",
"test-documentation-customi-38159-t-should-use-default-layout-documentation/error-context.md": "8e72e868618f8ac3052df7ccc1689e5a",
"test-documentation-customi-38159-t-should-use-default-layout-documentation/default-terminal-position-single-expected.png": "0ef5ee2bada38346cc2df97b1e4d16f1",
},
),
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert not isinstance(answers[-1], DoesNotExist)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_google_drive.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
# Fetch an immutable subdir set up in yuvipanda's drive and is public
# We pick a small but still nested dir to make the test faster
(
"https://drive.google.com/drive/folders/12zoh_LubWyRMZG-KR9GGyukBsO8SCU9j",
),
{
"PULL_REQUEST_TEMPLATE.md": "0dc1f0612f21713189ed4ccc0d00197b",
"SUPPORT.md": "14ddad47a068e1660d37fcef01026763",
"workflows/release.yml": "0646032f1588e8b8621cd1c153bb6fbf",
"workflows/test.yml": "d778015caa14985a6f61757de44bd5e0",
"workflows/cron.yml": "3d055970af426e2065403d11dfa526b6",
"workflows/main.yml": "70271a5c0e3dab1a25df2f6b3f8449eb",
"ISSUE_TEMPLATE/02-question.yml": "7d7fceedd5b01853f575e0dff757f536",
"ISSUE_TEMPLATE/03-feature-request.yml": "a1d4f142c8503aa7220220877416106a",
"ISSUE_TEMPLATE/config.yml": "0311551ec2b71e6f8892ba688ea57dd7",
"ISSUE_TEMPLATE/01-bug-report.yml": "c23bc48db00e2a5f48185f395614fbf7",
},
),
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert not isinstance(answers[-1], DoesNotExist)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_hydroshare.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
("https://www.hydroshare.org/resource/e42d440acb0b438793b3cdf3bcc09315/",),
{
"kf-plantsoiln-alldata.csv": "02c8c5ce2d4673a5fcfbc9e2252bcd2c",
"kf-soil-n-methods.pdf": "1ba86c4b05b54afe245ec7d9de68e154",
"kf-variablesreported.csv": "ca96c95e9e4e413ced09005d0ccbaf94",
"kf-resinn-alldata.csv": "858d2c52ae5f8f0a8920b400217fd8dc",
"kf-four-plots.gpx.txt": "b9a3e2cde40fb5b7cbf550cb502ff99f",
},
)
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert not isinstance(answers[-1], DoesNotExist)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/fetchers/test_zenodo.py | Python | import hashlib
from logging import Logger
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repoproviders.fetchers import fetch
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist
@pytest.mark.parametrize(
("questions", "md5tree"),
[
(
# Test fetching a record with a single zip file that we then extract,
# Also accounting for data.caltech.edu sending back 200 response with Location headers
("https://data.caltech.edu/records/996aw-mf266",),
{
"CaltechDATA_Usage_Graphs.ipynb": "3bb3a97b879112b1ab70923636d63e87",
"LICENSE": "be9d12a5904d8e4ef6df52c86d2db34a",
"requirements.txt": "b0081730d38ec5f28f3fea4f843816ce",
"README.md": "59a0e5157faef752532fe51a2d490c9c",
".gitignore": "2a9ac83919f923cc25ea380b19a2a7d9",
"codemeta.json": "8aa017724932fbdff3d2240c932487b7",
},
),
(
# Test fetching a record with a single zip file, without the 200 / Location header
# issue
("https://sandbox.zenodo.org/records/432153",),
{
"LICENSE": "65d3616852dbf7b1a6d4b53b00626032",
"README.md": "041917c62158f2dec74eb1ead07662f1",
"rutgers.txt": "2f501f69915cbcf0fb185f9e8bdb1c96",
},
),
(
# Test fetching a record with a single file that is *not* a zip file
("https://sandbox.zenodo.org/records/415845",),
{"136_poster_-_Dalia_Al-Shahrabi.pdf": "663a007fead7da7f9f6f7ddae71db254"},
),
(
# Test fetching multiple files without a zip file
("https://sandbox.zenodo.org/records/98954",),
{
"sampleFile.json": "9c9cf2a3740a65cc3268e12567eed67b",
"sampleFile.txt": "fedb53c2017c1aad5bf9293b7ce03a71",
},
),
],
)
async def test_fetch(questions: list[str], md5tree: dict[str, str], log: Logger):
for question in questions:
with TemporaryDirectory() as d:
output_dir = Path(d)
answers = await resolve(question, True, log)
assert answers is not None
assert not isinstance(answers[-1], DoesNotExist)
await fetch(answers[-1].repo, output_dir, log)
# Verify md5 sum of the files we expect to find
for subpath, expected_sha in md5tree.items():
with open(output_dir / subpath, "rb") as f:
h = hashlib.md5()
h.update(f.read())
assert h.hexdigest() == expected_sha
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_dataverse.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import DoesNotExist, Exists
from repoproviders.resolvers.doi import DataverseResolver
from repoproviders.resolvers.repos import DataverseDataset, DataverseURL
@pytest.mark.parametrize(
("url", "expected"),
(
# A dataset citation returns the dataset correctly
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/TJCLKP"
),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/TJCLKP"
)
),
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/TJCLKP"
),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/TJCLKP"
)
),
),
# Asking for specific files should give us the whole dataset they are a part of
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL("https://dataverse.harvard.edu/api/access/datafile/3323458"),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/3MJ7IR"
)
),
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/6ZXAGT"
)
),
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/6ZXAGT"
)
),
),
# Asking for datasets that don't exist should return DoesNotExist
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/not-found"
),
),
DoesNotExist(
DataverseDataset,
"doi:10.7910/not-found is neither a file nor a dataset in https://dataverse.harvard.edu",
),
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/not-found"
),
),
DoesNotExist(
DataverseDataset,
"doi:10.7910/not-found is neither a file nor a dataset in https://dataverse.harvard.edu",
),
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL("https://dataverse.harvard.edu/api/access/datafile/0"),
),
DoesNotExist(
DataverseDataset,
"No file with id 0 found in dataverse installation https://dataverse.harvard.edu",
),
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL("https://dataverse.harvard.edu/blaaaah"),
),
None,
),
(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/not-found"
),
),
DoesNotExist(
DataverseDataset,
"No file with id doi:10.7910/not-found found in dataverse installation https://dataverse.harvard.edu",
),
),
),
)
async def test_dataverse(url, expected, log):
dv = DataverseResolver()
assert await dv.resolve(url, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_doi.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import Exists
from repoproviders.resolvers.doi import Doi, DoiResolver
@pytest.mark.parametrize(
("url", "expected"),
(
("https://example.com/something", None),
# doi schema'd URI
(
"doi:10.7910/DVN/6ZXAGT/3YRRYJ",
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
),
),
# handle schema'd URI
(
"hdl:11529/10016",
Exists(
Doi(
URL(
"https://data.cimmyt.org/dataset.xhtml?persistentId=hdl:11529/10016"
)
)
),
),
# For convenience, we do accept DOIs without a scheme
(
"10.7910/DVN/6ZXAGT/3YRRYJ",
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
),
),
# But not handles without a scheme
("11529/10016", None),
# Three DOI resolution URLs
(
"https://doi.org/10.7910/DVN/6ZXAGT/3YRRYJ",
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
),
),
(
"https://www.doi.org/10.7910/DVN/6ZXAGT/3YRRYJ",
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
),
),
(
"https://hdl.handle.net/10.7910/DVN/6ZXAGT/3YRRYJ",
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
),
),
),
)
async def test_doi(url, expected, log):
doi = DoiResolver()
assert await doi.resolve(URL(url), log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_feature_detect.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import Exists, MaybeExists
from repoproviders.resolvers.feature_detect import FeatureDetectResolver
from repoproviders.resolvers.git import Git
from repoproviders.resolvers.repos import CKANDataset, DataverseURL
@pytest.mark.parametrize(
("url", "expected"),
(
("https://example.com/something", None),
# Try a raw git repo
(
"https://git.kernel.org/pub/scm/virt/kvm/kvm.git/",
Exists(Git("https://git.kernel.org/pub/scm/virt/kvm/kvm.git/", "HEAD")),
),
# A Codeberg repo
(
"https://codeberg.org/Codeberg/Documentation",
Exists(Git("https://codeberg.org/Codeberg/Documentation", "HEAD")),
),
# A dataverse URL from a dataverse installation that is *not* in our well known list
(
"https://demo.dataverse.org/dataset.xhtml?persistentId=doi:10.70122/FK2/MBQA9G",
MaybeExists(
DataverseURL(
URL("https://demo.dataverse.org"),
URL(
"https://demo.dataverse.org/dataset.xhtml?persistentId=doi:10.70122/FK2/MBQA9G"
),
)
),
),
# A dataverse URL that does exist in our list, but good to make sure guess works with it still
(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/TJCLKP",
MaybeExists(
DataverseURL(
URL("https://dataverse.harvard.edu/"),
URL(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/TJCLKP"
),
)
),
),
# A working CKAN instance
(
"https://catalog.data.gov/dataset/authorizations-from-10-01-2006-thru-12-31-2022",
MaybeExists(
CKANDataset(
URL("https://catalog.data.gov"),
"authorizations-from-10-01-2006-thru-12-31-2022",
)
),
),
# Looks like, but isn't actually a CKAN dataset
("https://catalog.data.gov/dataset/", None),
# A CKAN instance with a base_url
(
"https://open.canada.ca/data/en/dataset/90fed587-1364-4f33-a9ee-208181dc0b97",
MaybeExists(
CKANDataset(
URL("https://open.canada.ca/data/en"),
"90fed587-1364-4f33-a9ee-208181dc0b97",
)
),
),
),
)
async def test_doi(url, expected, log):
fd = FeatureDetectResolver()
assert await fd.resolve(URL(url), log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_figshare.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import DoesNotExist, Exists, MaybeExists
from repoproviders.resolvers.doi import FigshareResolver, ImmutableFigshareResolver
from repoproviders.resolvers.repos import (
FigshareDataset,
FigshareInstallation,
FigshareURL,
ImmutableFigshareDataset,
)
@pytest.mark.parametrize(
("url", "expected"),
(
# A non-dataset URL
(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL("https://figshare.com/browse"),
),
None,
),
# A non-dataset URL that looks suspiciously like a dataset URL
(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/collections/Risk_reduction_in_SARS-CoV-2_infection_and_reinfection_conferred_by_humoral_antibody_levels_among_essential_workers_during_Omicron_predominance/7605487",
),
),
None,
),
# Some old school URLs
(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/articles/title/9782777",
),
),
MaybeExists(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
None,
)
),
),
(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/articles/title/9782777/2",
),
),
MaybeExists(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
2,
)
),
),
# New style URLs
(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/articles/code/Binder-ready_openSenseMap_Analysis/9782777",
),
),
MaybeExists(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
None,
)
),
),
(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/articles/code/Binder-ready_openSenseMap_Analysis/9782777/3",
),
),
MaybeExists(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
3,
)
),
),
),
)
async def test_figshare(url, expected, log):
fs = FigshareResolver()
assert await fs.resolve(url, log) == expected
@pytest.mark.parametrize(
("question", "expected"),
(
(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"), URL("https://api.figshare.com/v2/")
),
9782777,
None,
),
Exists(
ImmutableFigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
3,
)
),
),
(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"), URL("https://api.figshare.com/v2/")
),
9782777,
2,
),
MaybeExists(
ImmutableFigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
2,
)
),
),
# Non existent things
(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"), URL("https://api.figshare.com/v2/")
),
97827778384384634634634863463434343,
None,
),
DoesNotExist(
ImmutableFigshareDataset,
"Article ID 97827778384384634634634863463434343 not found on figshare installation https://figshare.com/",
),
),
),
)
async def test_immutable_figshare(question, expected, log):
ifs = ImmutableFigshareResolver()
assert await ifs.resolve(question, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_gist.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import MaybeExists
from repoproviders.resolvers.git import GistResolver, Git
from repoproviders.resolvers.repos import GistURL
@pytest.mark.parametrize(
("url", "expected"),
(
# Gist URLs that aren't gists
(
GistURL(
URL("https://gist.github.com"), URL("https://gist.github.com/yuvipanda")
),
None,
),
# We don't support revisions yet
(
GistURL(
URL("https://gist.github.com"),
URL("https://gist.github.com/JakeWharton/5423616/revisions"),
),
None,
),
# An actual gist
(
GistURL(
URL("https://gist.github.com"),
URL("https://gist.github.com/JakeWharton/5423616"),
),
MaybeExists(
repo=Git(
repo="https://gist.github.com/JakeWharton/5423616",
ref="HEAD",
)
),
),
),
)
async def test_gist(url, expected, log):
gh = GistResolver()
assert await gh.resolve(url, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_github.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import DoesNotExist, MaybeExists
from repoproviders.resolvers.git import Git, GitHubPRResolver, GitHubResolver
from repoproviders.resolvers.repos import GitHubActionArtifact, GitHubPR, GitHubURL
@pytest.mark.parametrize(
("url", "expected"),
(
# GitHub URLs that are not repos
(
GitHubURL(URL("https://github.com"), URL("https://github.com/pyOpenSci")),
None,
),
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/yuvipanda/repoproviders/actions/runs/12552733471/job/34999118812"
),
),
None,
),
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/yuvipanda/repoproviders/settings"),
),
None,
),
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/jupyter/docker-stacks/pull/2194"),
),
MaybeExists(
GitHubPR(
URL("https://github.com"),
URL("https://github.com/jupyter/docker-stacks/pull/2194"),
)
),
),
# Simple github repo URL
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/pyOpenSci/pyos-package-template"),
),
MaybeExists(
Git("https://github.com/pyOpenSci/pyos-package-template", "HEAD")
),
),
# Trailing slash normalized?
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/pyOpenSci/pyos-package-template/"),
),
MaybeExists(
Git("https://github.com/pyOpenSci/pyos-package-template", "HEAD")
),
),
# blobs and tree
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/pyOpenSci/pyos-package-template/tree/main/includes/licenses"
),
),
MaybeExists(
Git("https://github.com/pyOpenSci/pyos-package-template", "main")
),
),
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/pyOpenSci/pyos-package-template/tree/original-cookie/docs"
),
),
MaybeExists(
Git(
"https://github.com/pyOpenSci/pyos-package-template",
"original-cookie",
)
),
),
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/pyOpenSci/pyos-package-template/blob/b912433bfae541972c83529359f4181ef0fe9b67/README.md"
),
),
MaybeExists(
Git(
"https://github.com/pyOpenSci/pyos-package-template",
ref="b912433bfae541972c83529359f4181ef0fe9b67",
)
),
),
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/yuvipanda/does-not-exist-e43"),
),
MaybeExists(
Git(repo="https://github.com/yuvipanda/does-not-exist-e43", ref="HEAD")
),
),
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/jupyterlab/jupyterlab/actions/runs/21701082973/artifacts/5385867847"
),
),
MaybeExists(
GitHubActionArtifact(
URL("https://github.com"), "jupyterlab", "jupyterlab", 5385867847
)
),
),
),
)
async def test_github(url, expected, log):
gh = GitHubResolver()
assert await gh.resolve(url, log) == expected
@pytest.mark.parametrize(
("url", "expected"),
(
(
GitHubPR(
URL("https://github.com"),
URL("https://github.com/jupyter/docker-stacks/pull/2194"),
),
MaybeExists(
Git(
"https://github.com/mathbunnyru/docker-stacks",
"update_oracledb_version",
)
),
),
(
GitHubPR(
URL("https://github.com"),
URL("https://github.com/jupyter/docker-stacks/pull/219400000000"),
),
DoesNotExist(
GitHubPR,
"PR 219400000000 does not exist at https://github.com/jupyter/docker-stacks/pull/219400000000",
),
),
),
)
async def test_github_pr(url, expected, log):
gh_pr = GitHubPRResolver()
assert await gh_pr.resolve(url, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.