id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1656881 | <reponame>dued/dued
import os
from os import name
import sys
import imp
from sys import path
from . import Config
from .excepciones import ColeccionNoEcontrada
from .util import debug
class Cargador(object):
"""
Clase abstracta que define cómo buscar/importar una `.Coleccion` basada
en sesión(es).
.. versionadded:: 1.0
"""
def __init__(self, config=None):
"""
Configure un nuevo cargador con alguna configuración `.Config`.
:param config:
Una `.Config` explícita para usar; se hace referencia a esta para
las opciones de configuración relacionadas con la carga.
Por defecto es una ``Config()`` anónima si no se proporciona
ninguna.
"""
if config is None:
config = Config()
self.config = config
def buscar(self, nombre):
"""
Método de busqueda específico de la implementación que busca la
colección ``nombre``
Debe devolver una tupla de 4 válidos para el uso de `imp.load_module`,
que suele ser una cadena de nombre seguida del contenido de la tupla
de 3 devuelta por `imp.find_module` (``archivo``, ``nombre_de_ruta``,
``descripcion``.)
Para ver una implementación de muestra, consulte
`.CargaDesdeElSitemaDeArchivos`.
.. versionadded:: 1.0
"""
raise NotImplementedError
def cargar(self, nombre=None):
"""
Carga y devuelve el módulo de colección identificado por ``nombre``.
Este método requiere una implementación funcional de `.buscar` para
funcionar.
Además de importar el módulo nombrado, agregará el directorio
principal del módulo al frente de `sys.path` para proporcionar un
comportamiento de importación normal de Python (es decir, para que
el módulo cargado pueda cargar módulos o paquetes locales)
:returns:
Dos tuplas de ``(módulo, directorio)``' donde ``módulo`` es el
objeto de módulo de Python que contiene la coleccion, y
``directorio`` es una cadena de la ruta al directorio en el que
se encontró el módulo.
.. versionadded:: 1.0
"""
if nombre is None:
nombre = self.config.artefactos.nombre_de_coleccion
# Busca el módulo de artefactos nombrado, según la implementación.
# Generará una excepción si no se encuentra.
fd, ruta, desc = self.buscar(nombre)
try:
# Asegúrese de que el directorio contenedor esté en sys.path en
# caso de que el módulo que se está importando esté intentando
# cargar-nombres-locales.
padre = os.path.dirname(ruta)
if padre not in sys.path:
sys.path.insert(0, padre)
# Importación actual
module = imp.load_module(nombre, fd, ruta, desc)
# Retorna module + path.
# TODO: ¿hay alguna razón por la que los clientes no se refieren
# simplemente a os.path.dirname(module .__ file__)?
return module, padre
finally:
# Asegúrese de limpiar el objeto de archivo abierto devuelto por
# buscar(), si había uno (por ejemplo, paquetes encontrados,
# vs módulos, no abra ningún archivo).
if fd:
fd.close()
class CargaDesdeElSitemaDeArchivos(Cargador):
"""
Carga archivos Python desde el sistema de archivos
(por ejemplo, ``artefactos.py``.)
Busca de forma recursiva hacia la raíz del sistema de archivos desde
un punto de inicio determinado.
.. versionadded:: 1.0
"""
# TODO: podría introducir config obj aquí para su transmisión a Coleccion
# TODO: de lo contrario, Cargador tiene que saber sobre bits específicos para
# transmitir, como guiones-automáticos, y tiene que hacer crecer uno de esos
# por cada bit que Coleccion necesita saber
def __init__(self, inicio=None, **kwargs):
super(CargaDesdeElSitemaDeArchivos, self).__init__(**kwargs)
if inicio is None:
inicio = self.config.artefactos.dir_raiz
self._start = inicio
@property
def iniciar(self):
# Determine perezosamente la CWD predeterminada si el valor configurado es falso
return self._start or os.getcwd()
def buscar(self, nombre):
# Acumule todos los directorios principales
iniciar = self.iniciar
debug("CargaDesdeElSitemaDeArchivos busca iniciando en {!r}".format(iniciar))
padres = [os.path.abspath(iniciar)]
padres.append(os.path.dirname(padres[-1]))
while padres[-1] != padres[-2]:
padres.append(os.path.dirname(padres[-1]))
# Asegúrese de que no tengamos duplicados al final
if padres[-1] == padres[-2]:
padres = padres[:-1]
# Use find_module con nuestra lista de padres. ImportError de find_module
# significa "no se pudo encontrar" no "se encontró y no se pudo importar",
# por lo que lo convertimos en una clase de excepción más obvia.
try:
tup = imp.find_module(nombre, padres)
debug("Modulo encontrado: {!r}".format(tup[1]))
return tup
except ImportError:
msj = "ImportError cargando {!r}, levantando ColeccionNoEcontrada"
debug(msj.format(nombre))
raise ColeccionNoEcontrada(nombre=nombre, inicio=iniciar)
| StarcoderdataPython |
9759113 | <filename>methods/call.py<gh_stars>0
# __call__ tutorial
class Pay:
total_hours = 0
total_pay = 0
def __init__(self, hourly_wage):
self.hourly_wage = hourly_wage
def __call__(self, hours_worked):
self.total_hours += hours_worked
self.total_pay += hours_worked * self.hourly_wage
return hours_worked * self.hourly_wage
pay = Pay(15)
print('Week 1:', pay.total_hours, pay.total_pay)
pay(8)
print('Week 2:', pay.total_hours, pay.total_pay)
pay(8)
print('Week 3:', pay.total_hours, pay.total_pay)
class Manager:
pay = Pay(15)
def __init__(self, name):
self.name = name.lower()
michael = Manager('michael')
print('michael', michael.pay.total_hours, michael.pay.total_pay)
michael.pay(8)
print('michael', michael.pay.total_hours, michael.pay.total_pay)
michael.pay(8)
print('michael', michael.pay.total_hours, michael.pay.total_pay) | StarcoderdataPython |
4857564 | import os
from argparse import Namespace
from tqdm import tqdm
import time
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
import sys
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from datasets.inference_dataset import InferenceDataset
from editing.latent_editor import LatentEditor
from models.e4e import e4e
from options.test_options import TestOptions
from utils.common import tensor2im
from utils.inference_utils import get_average_image, run_on_batch
def run():
"""
This script can be used to perform inversion and editing. Please note that this script supports editing using
only the ReStyle-e4e model and currently supports editing using three edit directions found using InterFaceGAN
(age, smile, and pose) on the faces domain.
For performing the edits please provide the arguments `--edit_directions` and `--factor_ranges`. For example,
setting these values to be `--edit_directions=age,smile,pose` and `--factor_ranges=5,5,5` will use a lambda range
between -5 and 5 for each of the attributes. These should be comma-separated lists of the same length. You may
get better results by playing around with the factor ranges for each edit.
"""
test_opts = TestOptions().parse()
out_path_results = os.path.join(test_opts.exp_dir, 'editing_results')
out_path_coupled = os.path.join(test_opts.exp_dir, 'editing_coupled')
os.makedirs(out_path_results, exist_ok=True)
os.makedirs(out_path_coupled, exist_ok=True)
# update test options with options used during training
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts = Namespace(**opts)
net = e4e(opts)
net.eval()
net.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
if opts.dataset_type != "ffhq_encode":
raise ValueError("Editing script only supports edits on the faces domain!")
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path,
transform=transforms_dict['transform_inference'],
opts=opts)
dataloader = DataLoader(dataset,
batch_size=opts.test_batch_size,
shuffle=False,
num_workers=int(opts.test_workers),
drop_last=False)
if opts.n_images is None:
opts.n_images = len(dataset)
latent_editor = LatentEditor(net.decoder)
opts.edit_directions = opts.edit_directions.split(',')
opts.factor_ranges = [int(factor) for factor in opts.factor_ranges.split(',')]
if len(opts.edit_directions) != len(opts.factor_ranges):
raise ValueError("Invalid edit directions and factor ranges. Please provide a single factor range for each"
f"edit direction. Given: {opts.edit_directions} and {opts.factor_ranges}")
avg_image = get_average_image(net, opts)
global_i = 0
global_time = []
for input_batch in tqdm(dataloader):
if global_i >= opts.n_images:
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
tic = time.time()
result_batch = edit_batch(input_cuda, net, avg_image, latent_editor, opts)
toc = time.time()
global_time.append(toc - tic)
resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size)
for i in range(input_batch.shape[0]):
im_path = dataset.paths[global_i]
results = result_batch[i]
inversion = results.pop('inversion')
input_im = tensor2im(input_batch[i])
all_edit_results = []
for edit_name, edit_res in results.items():
res = np.array(input_im.resize(resize_amount)) # set the input image
res = np.concatenate([res, np.array(inversion.resize(resize_amount))], axis=1) # set the inversion
for result in edit_res:
res = np.concatenate([res, np.array(result.resize(resize_amount))], axis=1)
res_im = Image.fromarray(res)
all_edit_results.append(res_im)
edit_save_dir = os.path.join(out_path_results, edit_name)
os.makedirs(edit_save_dir, exist_ok=True)
res_im.save(os.path.join(edit_save_dir, os.path.basename(im_path)))
# save final concatenated result if all factor ranges are equal
if opts.factor_ranges.count(opts.factor_ranges[0]) == len(opts.factor_ranges):
coupled_res = np.concatenate(all_edit_results, axis=0)
im_save_path = os.path.join(out_path_coupled, os.path.basename(im_path))
Image.fromarray(coupled_res).save(im_save_path)
global_i += 1
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
def edit_batch(inputs, net, avg_image, latent_editor, opts):
y_hat, latents = get_inversions_on_batch(inputs, net, avg_image, opts)
# store all results for each sample, split by the edit direction
results = {idx: {'inversion': tensor2im(y_hat[idx])} for idx in range(len(inputs))}
for edit_direction, factor_range in zip(opts.edit_directions, opts.factor_ranges):
edit_res = latent_editor.apply_interfacegan(latents=latents,
direction=edit_direction,
factor_range=(-1 * factor_range, factor_range))
# store the results for each sample
for idx, sample_res in edit_res.items():
results[idx][edit_direction] = sample_res
return results
def get_inversions_on_batch(inputs, net, avg_image, opts):
result_batch, result_latents = run_on_batch(inputs, net, opts, avg_image)
# we'll take the final inversion as the inversion to edit
y_hat = [result_batch[idx][-1] for idx in range(len(result_batch))]
latents = [torch.from_numpy(result_latents[idx][-1]).cuda() for idx in range(len(result_batch))]
return y_hat, torch.stack(latents)
if __name__ == '__main__':
run() | StarcoderdataPython |
1919553 | from shallowflow.api.source import AbstractSimpleSource
from shallowflow.api.config import Option
from shallowflow.api.vars import VariableName
from shallowflow.api.compatibility import Unknown
class GetVariable(AbstractSimpleSource):
"""
Outputs the value of the specified variable.
"""
def description(self):
"""
Returns a description for the actor.
:return: the actor description
:rtype: str
"""
return "Outputs the value of the specified variable."
def _define_options(self):
"""
For configuring the options.
"""
super()._define_options()
self._option_manager.add(Option(name="var_name", value_type=VariableName, def_value=VariableName("var"),
help="The name of the variable"))
def generates(self):
"""
Returns the types that get generated.
:return: the list of types
:rtype: list
"""
return [Unknown]
def setup(self):
"""
Prepares the actor for use.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super().setup()
if result is None:
if len(self.get("var_name")) == 0:
result = "No variable name provided!"
return result
def _do_execute(self):
"""
Performs the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
name = self.get("var_name")
if self.variables.has(name):
self._output.append(self.variables.get(name))
else:
result = "Variable not available: %s" % name
return result
| StarcoderdataPython |
1832122 | import numpy as np
def coco_contour_to_cv2(contour, dtype):
"""
translate coco format [x_1,y_1,x_2,y_2,...]
to opencv contour [[[x_1,y_1]], [[x_2,y_2]],...]
args:
contour: list, the coco format contour
dtype: the dtype of output needed, cv2 is a little bit weird on the dtype
"""
return np.array([[[contour[2*i], contour[2*i+1]]]
for i in range(int(len(contour)/2))],
dtype = dtype)
| StarcoderdataPython |
1720505 | <reponame>glennmatthews/aagen<gh_stars>0
# JSON encoding/decoding for AAGen
from json import JSONEncoder
from .map import SortedSet, DungeonMap, Region, Connection, Decoration
from .direction import Direction
import aagen.geometry
class MapEncoder(JSONEncoder):
def default(self, obj):
"""Convert AAGen object instances to JSON-encodable data.
"""
if isinstance(obj, SortedSet):
return list(obj)
elif isinstance(obj, DungeonMap):
return {
'__type__': 'DungeonMap',
'regions': obj.regions,
'connections': obj.connections,
'decorations': obj.decorations
}
elif isinstance(obj, Region):
return {
'__type__': 'Region',
'kind': obj.kind,
'polygon': obj.polygon
}
elif isinstance(obj, Connection):
return {
'__type__': 'Connection',
'kind': obj.kind,
'line': obj.line,
'direction': obj.direction
}
elif isinstance(obj, Decoration):
return {
'__type__': 'Decoration',
'kind': obj.kind,
'polygon': obj.polygon,
'orientation': obj.orientation
}
elif isinstance(obj, Direction):
return obj.name
elif hasattr(obj, 'geom_type'):
return aagen.geometry.to_string(obj)
# Default case:
return JSONEncoder.default(self, obj)
def map_from_dict(obj):
"""Convert a dict (as read by JSON) into the corresponding
AAGen object instance.
"""
if not '__type__' in obj:
return obj
if obj['__type__'] == 'Region':
return Region(obj['kind'], aagen.geometry.from_string(obj['polygon']))
elif obj['__type__'] == 'Connection':
# backward compatibility
if 'grow_direction' in obj.keys():
return Connection(obj['kind'],
aagen.geometry.from_string(obj['line']),
dir=Direction.named(obj['grow_direction']))
else:
return Connection(obj['kind'],
aagen.geometry.from_string(obj['line']),
dir=Direction.named(obj['direction']))
elif obj['__type__'] == 'Decoration':
return Decoration(obj['kind'],
aagen.geometry.from_string(obj['polygon']),
Direction.named(obj['orientation']))
elif obj['__type__'] == 'DungeonMap':
dungeon_map = DungeonMap()
for reg in obj['regions']:
dungeon_map.add_region(reg)
for conn in obj['connections']:
dungeon_map.add_connection(conn)
for dec in obj['decorations']:
dungeon_map.add_decoration(dec)
return dungeon_map
return obj
| StarcoderdataPython |
1942673 | <gh_stars>1-10
"""
Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
unless there exists a 'from future_builtins import zip' statement in the
top-level namespace.
We avoid the transformation if the zip() call is directly contained in
iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name, Call, in_special_context
class FixZip(fixer_base.ConditionalFix):
PATTERN = """
power< 'zip' args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.zip"
def transform(self, node, results):
if self.should_skip(node):
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| StarcoderdataPython |
11250701 | <filename>strings/anagram.py
def valid_anagram(val1: str, val2: str) -> bool:
if len(val1) != len(val2):
return False
char_counter = {}
for char in val1:
if char in char_counter:
char_counter[char] += 1
else:
char_counter[char] = 1
for char in val2:
if char in char_counter:
char_counter[char] -= 1
else:
return False
for result in char_counter.values():
if result != 0:
return False
return True
| StarcoderdataPython |
12807859 | <reponame>rom1504/embedding-reader
"""
This is an example on how to use embedding reader to do an inference over a set of billion
of clip vit-l/14 embeddings to predict whether the corresponding images are safe or not
"""
from embedding_reader import EmbeddingReader
import fire
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import numpy as np
import fsspec
import math
import pandas as pd
import gc
def load_safety_model():
"""load the safety model"""
import autokeras as ak # pylint: disable=import-outside-toplevel
from tensorflow.keras.models import load_model # pylint: disable=import-outside-toplevel
from os.path import expanduser # pylint: disable=import-outside-toplevel
home = expanduser("~")
cache_folder = home + "/.cache/clip_retrieval"
model_dir = cache_folder + "/clip_autokeras_binary_nsfw"
if not os.path.exists(model_dir):
os.makedirs(cache_folder, exist_ok=True)
from urllib.request import urlretrieve # pylint: disable=import-outside-toplevel
path_to_zip_file = cache_folder + "/clip_autokeras_binary_nsfw.zip"
url_model = (
"https://raw.githubusercontent.com/LAION-AI/CLIP-based-NSFW-Detector/main/clip_autokeras_binary_nsfw.zip"
)
urlretrieve(url_model, path_to_zip_file)
import zipfile # pylint: disable=import-outside-toplevel
with zipfile.ZipFile(path_to_zip_file, "r") as zip_ref:
zip_ref.extractall(cache_folder)
loaded_model = load_model(model_dir, custom_objects=ak.CUSTOM_OBJECTS)
loaded_model.predict(np.random.rand(10**3, 768).astype("float32"), batch_size=10**3)
return loaded_model
import mmh3
def compute_hash(url, text):
if url is None:
url = ""
if text is None:
text = ""
total = (url + text).encode("utf-8")
return mmh3.hash64(total)[0]
def main(
embedding_folder="https://mystic.the-eye.eu/public/AI/cah/laion5b/embeddings/laion1B-nolang/img_emb/",
metadata_folder="https://mystic.the-eye.eu/public/AI/cah/laion5b/embeddings/laion1B-nolang/laion1B-nolang-metadata/",
output_folder="output",
batch_size=10**5,
end=None,
):
"""main function"""
reader = EmbeddingReader(
embedding_folder, metadata_folder=metadata_folder, file_format="parquet_npy", meta_columns=["url", "caption"]
)
fs, relative_output_path = fsspec.core.url_to_fs(output_folder)
fs.mkdirs(relative_output_path, exist_ok=True)
model = load_safety_model()
total = reader.count
batch_count = math.ceil(total // batch_size)
padding = int(math.log10(batch_count)) + 1
import tensorflow as tf # pylint: disable=import-outside-toplevel
for i, (embeddings, ids) in enumerate(reader(batch_size=batch_size, start=0, end=end)):
predictions = model.predict_on_batch(embeddings)
batch = np.hstack(predictions)
padded_id = str(i).zfill(padding)
output_file_path = os.path.join(relative_output_path, padded_id + ".parquet")
df = pd.DataFrame(batch, columns=["prediction"])
df["hash"] = [compute_hash(x, y) for x, y in zip(ids["url"], ids["caption"])]
df["url"] = ids["url"]
with fs.open(output_file_path, "wb") as f:
df.to_parquet(f)
if __name__ == "__main__":
fire.Fire(main)
| StarcoderdataPython |
9615062 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@name - regularflow
@contains - API to use regularflow. You can create and delete Agents that communicate beetwen each other to regulate circulation by a training
@autor - <NAME>
"""
from .apiFlow import newAgent
from .apiFlow import cycleManager
from .apiFlow import startAgent
from .apiFlow import startDemo
| StarcoderdataPython |
6614713 | <reponame>coreybobco/setlistspy-api<filename>setlistspy/app/models.py
import os
from django.db import models
from setlistspy.app.base_model import BaseSetSpyModel
from playhouse.postgres_ext import *
# def get_db():
# return PostgresqlExtDatabase(
# os.getenv('POSTGRES_HOST'),
# user=os.getenv('POSTGRES_USER'),
# password=os.getenv('<PASSWORD>PASSWORD'),
# host="localhost",
# port=os.getenv('POSTGRES_PORT'),
# register_hstore=False
# )
class DJ(BaseSetSpyModel):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255, unique=True)
xml_md5 = models.CharField(max_length=32, default='')
last_check_time = models.DateTimeField(null=True, blank=True)
class Meta:
indexes = [
models.Index(fields=['name']),
models.Index(fields=['last_check_time']),
models.Index(fields=['name', 'last_check_time'])
]
def __str__(self):
return f'{self.name}'
class Setlist(BaseSetSpyModel):
dj = models.ForeignKey(DJ, on_delete=models.PROTECT, related_name='setlists')
title = models.CharField(max_length=255)
mixesdb_id = models.IntegerField()
mixesdb_mod_time = models.DateTimeField()
xml_sha1 = models.CharField(max_length=31, null=True)
b2b = models.NullBooleanField('Other DJs on deck', null=True)
class Meta:
indexes = [
models.Index(fields=['dj']),
models.Index(fields=['mixesdb_mod_time']),
models.Index(fields=['dj', 'mixesdb_mod_time'])
]
unique_together = (
('dj', 'mixesdb_id'),
)
def __str__(self):
return f'{self.title}'
class Artist(BaseSetSpyModel):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return f'{self.name}'
class Label(BaseSetSpyModel):
name = models.CharField(max_length=255, unique=True)
discogs_id = models.IntegerField(null=True)
def __str__(self):
return f'{self.name}'
class Track(BaseSetSpyModel):
artist = models.ForeignKey(Artist, on_delete=models.PROTECT, related_name="tracks")
title = models.CharField(max_length=255)
setlists = models.ManyToManyField(Setlist, through="TrackPlay", related_name="tracks")
def __str__(self):
return f'{self.artist.name} - {self.title}'
class Meta:
indexes = [
models.Index(fields=['artist']),
models.Index(fields=['title']),
models.Index(fields=['artist', 'title']),
]
unique_together = (
('artist', 'title'),
)
class TrackPlay(BaseSetSpyModel):
track = models.ForeignKey(Track, related_name='plays', on_delete=models.PROTECT)
setlist = models.ForeignKey(Setlist, related_name='track_plays', on_delete=models.PROTECT)
set_order = models.IntegerField()
label = models.ForeignKey(Label, null=True, related_name='track_plays', on_delete=models.PROTECT)
class Meta:
indexes = [
models.Index(fields=['track']),
models.Index(fields=['setlist']),
models.Index(fields=['track', 'setlist']),
]
unique_together = (
('setlist', 'set_order'),
)
def __str__(self):
return f'{self.setlist.title} - {self.set_order}. {self.track.artist.name} - {self.track.title}' | StarcoderdataPython |
159287 | <reponame>SupremaLex/UO-test-funcs
from .test_function import *
from .support_funcs import *
from .exceptions import *
def FLETCBV3(n):
name = "FLETCBV3 function (CUTE)"
print(name)
p, h = 1e-8, 1 / (n + 1)
# move last n-member of second series to f and and create one series from 1 to n-1
f = lambda: 0.5*p*(xi(1) + xi(n))**2 - ((h**2 + 2) * xi(n) - sp.cos(xi(n))) * p / h**2
sm_1 = lambda i: 0.5*p*(xi(i) - xi(i+1))
sm_2 = lambda i: -((h**2 + 2) * xi(i) - sp.cos(xi(i))) * p / h**2
sm = lambda i: sm_1(i) + sm_2(i)
x0 = (np.arange(1, n + 1) * h).reshape((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_3)
def FLETCHCR(n):
name = "FLETCHCR function (CUTE)"
sm = lambda i: 100 * (xi(i+1) - xi(i) + 1 - xi(i)**2)**2
x0 = np.zeros((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def BDQRTIC(n):
name = "BDQRTIC function (CUTE)"
sm_1 = lambda i: (-4*xi(i) + 3)**2
sm_2 = lambda i: (xi(i) + 2*xi(i+1)**2 + 3*xi(i+2)**2 + 4*xi(i+3) + 5*xi(n)**2) ** 2
sm = lambda i: sm_1(i) + sm_2(i)
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range, limits=(1, n - 3))
def TRIDIA(n):
name = "TRIDIA function (CUTE)"
alpha, beta, gamma, sigma = 2, 1, 1, 1
f = lambda: gamma * (sigma * xi(1) - 1) ** 2
sm = lambda i: i * (alpha * xi(i) - beta * xi(i-1)) ** 2
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, first=f,
range_func=default_range, limits=(2, n + 1))
def ARGLINB(n):
name = "ARGLINB function (CUTE)"
sm = lambda i: sum([i*j*xi(j) - 1 for j in range(1, n + 1)]) ** 2
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_1)
def ARWHEAD(n):
name = "ARWHEAD function (CUTE)"
sm = lambda i: (-4*xi(i) + 3)**2 + (xi(i)**2 + xi(n)**2) ** 2
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def NONDIA(n):
name = "NONDIA function (CUTE)"
f = lambda: (xi(1) - 1) ** 2
sm = lambda i: 100 * (xi(i) - xi(i-1)**2)**2
x0 = np.ones((n, 1)) * -1.0
return create_test_function(name, n, sm, x0,
first=f, range_func=default_range, limits=(2, n + 1))
def NONDQUAR(n):
name = "NONDQUAR function (CUTE)"
f = lambda: (xi(1) - xi(2)) ** 2
sm_1 = lambda i: (xi(i) + xi(i+1) + xi(n)) ** 4
sm_2 = lambda: (xi(n-1) + xi(n)) ** 2
sm = lambda i: sm_1(i) + sm_2()
x0 = construct_x0([[1.0], [-1.0]], n)
return create_test_function(name, n, sm, x0,
first=f, range_func=default_range, limits=(2, n - 1))
def DQDRTIC(n):
name = "DQDRTIC function (CUTE)"
c, d = 100, 100
sm = lambda i: xi(i) + c*xi(i+1)**2 + d*xi(i+2)**2
x0 = np.ones((n, 1)) * 3.0
return create_test_function(name, n, sm, x0,
range_func=default_range, limits=(2, n - 1), min_dimesion=3)
def EG2(n):
name = "EG2 function (CUTE)"
sm = lambda i: sp.sin(xi(1) + xi(i)**2 - 1) + 0.5 * sp.sin(xi(n)**2)
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def CURLY20(n):
def q(i, k=20):
if i <= n - k:
return sum([xi(i) for i in range(1, i + k + 1)])
else:
return sum([xi(i) for i in range(1, n + 1)])
name = "CURLY20 function (CUTE)"
sm = lambda i: q(i)**2 - 20*q(i)**2 - 0.1*q(i)
x0 = np.ones((n, 1)) * 0.001 / (n + 1)
return create_test_function(name, n, sm, x0, range_func=default_range_1)
def LIARWHD_1(n):
name = "LIARWHD1 function (CUTE)"
sm = lambda i: 4*(-xi(1) + xi(i)**2)**2 + (xi(i) - 1)**2
x0 = np.ones((n, 1)) * 4.0
return create_test_function(name, n, sm, x0, range_func=default_range_1)
def POWER(n):
name = "POWER function (CUTE)"
sm = lambda i: (i * xi(i)) ** 2
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_1)
def ENGVAL1(n):
name = "ENGVAL1 function (CUTE)"
sm = lambda i: (xi(i)**2 + xi(i+1)**2)**2 + (-4*xi(i) + 3)
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def CRAGGLVY(n):
name = "CRAGGLVY function (CUTE)"
if n % 2 or n < 4:
raise DimensionError(name, 2, n, 4)
sm_1 = lambda i: (sp.exp(xi(2*i-1) - xi(2*i)))**4 + 100*(xi(2*i) - xi(2*i-1))**6
sm_2 = lambda i: (sp.tan(xi(2*i+1)+xi(2*i+2)) + xi(2*i+1) - xi(2*i+2))**4
sm_3 = lambda i: xi(2*i-1)**8 + (xi(2*i+2)-1)**2
sm = lambda i: sm_1(i) + sm_2(i) + sm_3(i)
x0 = np.ones((n, 1)) * 2.0
x0[0][0] = 1.0
return create_test_function(name, n, sm, x0, range_func=default_range, limits=(1, n // 2))
def EDENSCH(n):
name = "EDENSCH function (CUTE)"
f = lambda: 16
sm = lambda i: (xi(i) - 2)**4 + (xi(i)*xi(i+1) - 2*xi(i+1))**2 +(xi(i+1) + 1)**2
x0 = np.zeros((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_3)
def INDEF(n):
name = "INDEF function (CUTE)"
f = lambda: xi(1) + xi(n)
sm = lambda i: xi(i) + 0.5 * sp.cos(2*xi(i) - xi(n) - xi(1))
x0 = (np.arange(1, n + 1) * 1 / (n + 1)).reshape((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range, limits=(2, n))
def CUBE(n):
name = "CUBE function (CUTE)"
f = lambda: (xi(1) - 1)**2
sm = lambda i: 100*(xi(i) - xi(i-1)**3)**2
x0 = construct_x0([[-1.2], [1.0]], n)
return create_test_function(name, n, sm, x0,
first=f, range_func=default_range, limits=(2, n + 1))
def EXPLIN1(n):
name = "EXPLIN1 function (CUTE)"
f = lambda: -10 * n * xi(n)
sm = lambda i: sp.exp(0.1*xi(i)*xi(i+1)) - 10*(i*xi(i))
x0 = np.zeros((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_3)
def EXPLIN2(n):
name = "EXPLIN2 function (CUTE)"
f = lambda: -10 * n * xi(n)
sm = lambda i: sp.exp(i*xi(i)*xi(i+1) / (10*n)) - 10*(i*xi(i))
x0 = np.zeros((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_3)
def ARGLINC(n):
name = "ARGLINC function (CUTE)"
f = lambda: 2
sm = lambda i: sum([j*xi(j)*(i-1) - 1 for j in range(2, n)])**2
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0,
first=f, range_func=default_range, limits=(2, n))
def BDEXP(n):
name = "BDEXP function (CUTE)"
sm = lambda i: (xi(i) + xi(i+1)) * sp.exp(-xi(i+2)*(xi(i) + xi(i+1)))
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range, limits=(1, n - 1))
def HARKERP2(n):
name = "HARKERP2 function (CUTE)"
f1 = lambda j: sum([xi(j) for j in range(j, n + 1)]) ** 2
f2 = lambda: sum([f1(j) for j in range(2, n + 1)])
f = lambda: f1(1) + 2 * f2()
sm = lambda i: -(xi(i) + 0.5*xi(i)**2)**2
x0 = np.arange(1, n + 1).reshape((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_1)
def GENHUMPS(n):
name = "GENHUMPS function (CUTE)"
sm_1 = lambda i: (sp.sin(2*xi(i))**2) * sp.sin(2*xi(i+1))**2
sm_2 = lambda i: 0.05*(xi(i)**2 + xi(i+1)**2)
sm = lambda i: sm_1(i) + sm_2(i)
x0 = np.ones((n, 1)) * -506.2
x0[0][0] = 506.0
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def MCCORMCK(n):
name = "MCCORMCK function (CUTE)"
sm_1 = lambda i: -1.5*xi(i) + 2.5*xi(i+1) + 1
sm_2 = lambda i: (xi(i) - xi(i+1))**2 + sp.sin(xi(i) + xi(i+1))
sm = lambda i: sm_1(i) + sm_2(i)
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def NONSCOMP(n):
name = "NONSCOMP function (CUTE)"
f = lambda: (xi(1) - 1) ** 2
sm = lambda i: 4 * (xi(i) - xi(i-1)**2)**2
x0 = np.ones((n, 1)) * 3.0
return create_test_function(name, n, sm, x0,
first=f, range_func=default_range, limits=(2, n + 1))
def VARDIM(n):
name = "VARDIM function (CUTE)"
sm_f = lambda i: i* xi(i) - n * (n + 1) / 2
ff = lambda: sum([sm_f(j) for j in range(1, n + 1)])
f = lambda: ff() ** 2 + ff() ** 4
sm = lambda i: (xi(i) - 1) ** 2
x0 = (np.ones(n) - np.arange(1, n + 1) / n).reshape((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_1)
def QUARTC(n):
name = "QUARTC function (CUTE)"
sm = lambda i: (xi(i) - 1) ** 4
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, range_func=default_range_1)
def SINQUAD(n):
name = "SINQUAD function (CUTE)"
f = lambda: (xi(1) - 1) ** 4
sm_1 = lambda i: (sp.sin(xi(i) - xi(n)) - xi(1) + xi(i)**2)**2
sm_2 = lambda: (xi(n)**2 - xi(1)**2) ** 2
sm = lambda i: sm_1(i) + sm_2()
x0 = np.ones((n, 1)) * 0.1
return create_test_function(name, n, sm, x0,
first=f, range_func=default_range, limits=(2, n))
def ext_DENSCHNB(n):
name = "Extended DENSCHNB function (CUTE)"
if n % 2:
raise DimensionError(name, 2, n)
sm_1 = lambda i: (xi(2*i-1) - 2)**2 +(xi(2*i-1) - 2) * xi(2*i) ** 2
sm_2 = lambda i: (xi(2*i) + 1) ** 2
sm = lambda i: sm_1(i) + sm_2(i)
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0)
def ext_DENSCHNF(n):
name = "Extended DENSCHNF function (CUTE)"
if n % 2:
raise DimensionError(name, 2, n)
sm_1 = lambda i: (2*(xi(2*i-1) + xi(2*i))**2 + (xi(2*i-1) - xi(2*i))**2 - 8) ** 2
sm_2 = lambda i: (5*xi(2*i-1)**2 + (xi(2*i) - 3)**2 - 9) ** 2
sm = lambda i: sm_1(i) + sm_2(i)
x0 = construct_x0([[0.0], [2.0]], n)
return create_test_function(name, n, sm, x0)
def LIARWHD_2(n):
name = "LIARWHD2 function (CUTE)"
sm = lambda i: 4*(xi(i)**2 - xi(i)) ** 2 + (xi(i) - 1) ** 2
x0 = np.ones((n, 1)) * 4.0
return create_test_function(name, n, sm, x0, range_func=default_range_1)
def DIXON3DQ(n):
name = "DIXON3DQ function (CUTE)"
f = lambda: (xi(1) - 1) ** 2
sm = lambda i: (xi(i) - xi(i+1)) ** 2 + (xi(n) - 1) ** 2
x0 = np.ones((n, 1)) * -1.0
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_3)
def COSINE(n):
name = "COSINE function (CUTE)"
sm = lambda i: sp.cos(-0.5*xi(i+1) + xi(i)**2)
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def SINE(n):
name = "SINE function (CUTE)"
sm = lambda i: sp.sin(-0.5*xi(i+1) + xi(i)**2)
x0 = np.ones((n, 1))
return create_test_function(name, n, sm, x0, range_func=default_range_3)
def BIGGSB1(n):
name = "BIGGSB1 function (CUTE)"
f = lambda: (xi(1) - 1) ** 2
sm = lambda i: (xi(i+1) - xi(i)) ** 2 + (1 - xi(n)) ** 2
x0 = np.zeros((n, 1))
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_3)
def SINCOS(n):
name = "SINCOS function (CUTE)"
if n % 2:
raise DimensionError(name, 2, n)
sm_1 = lambda i: (xi(2*i-1)**2 + xi(2*i)**2 + xi(2*i-1)*xi(2*i)) ** 2
sm_2 = lambda i: sp.sin(xi(2*i-1)) ** 2 + sp.cos(xi(2*i)) ** 2
sm = lambda i: sm_1(i) + sm_2(i)
x0 = construct_x0([[3.0], [0.1]], n)
return create_test_function(name, n, sm, x0)
def HIMMELBG(n):
name = "HIMMELBG function (CUTE)"
if n % 2:
raise DimensionError(name, 2, n)
sm = lambda i: (2*xi(2*i-1)**2 + 3*xi(2*i)**2) * sp.exp(-xi(2*i-1) - xi(2*i))
x0 = np.ones((n, 1)) * 1.5
return create_test_function(name, n, sm, x0)
def HIMMELH(n):
name = "HIMMELH function (CUTE)"
if n % 2:
raise DimensionError(name, 2, n)
sm = lambda i: -3*xi(2*i-1) - 2*xi(2*i) + 2 + xi(2*i-1) ** 2 + xi(2*i) ** 2
x0 = np.ones((n, 1)) * 1.5
return create_test_function(name, n, sm, x0) | StarcoderdataPython |
9745443 | # The local version of the Process object
from pathlib import Path
from typing import Dict, List, Union
from openghg.store import ObsSurface
from openghg.types import DataTypes
__all__ = ["process_files"]
def process_files(
files: Union[str, List],
data_type: str,
site: str,
network: str,
inlet: str = None,
instrument: str = None,
overwrite: bool = False,
) -> Dict:
"""Process the passed file(s)
Args:
files: Path of files to be processed
data_type: Type of data to be processed (CRDS, GC etc)
site: Site code or name
network: Network name
instrument: Instrument name
overwrite: Should this data overwrite data
stored for these datasources for existing dateranges
Returns:
dict: UUIDs of Datasources storing data of processed files keyed by filename
"""
data_type = DataTypes[data_type.upper()].name
if not isinstance(files, list):
files = [files]
obs = ObsSurface.load()
results = {}
# Ensure we have Paths
# TODO: Delete this, as we already have the same warning in read_file?
if data_type == "GCWERKS":
if not all(isinstance(item, tuple) for item in files):
raise TypeError(
"If data type is GC, a list of tuples for data and precision filenames must be passed"
)
files = [(Path(f), Path(p)) for f, p in files]
else:
files = [Path(f) for f in files]
r = obs.read_file(
filepath=files,
data_type=data_type,
site=site,
network=network,
instrument=instrument,
inlet=inlet,
overwrite=overwrite,
)
results.update(r)
return results
| StarcoderdataPython |
9639772 | #-*- coding: utf-8 -*-
#!/usr/bin/python3
"""
Copyright (c) 2020 LG Electronics Inc.
SPDX-License-Identifier: MIT
"""
import os
import json
import logging
from distutils.spawn import find_executable
import re
from ..context import WrapperContext
LOGGER = logging.getLogger('SAGE')
WRAPPER_MAP = {}
def load_tools():
# TODO: load tools from plugin path
from . import cppcheck
from . import cpplint
from . import clang_tidy
from . import metrixpp
from . import duplo
def get_tool_wrapper(toolname):
return WRAPPER_MAP[toolname]
def register_wrapper(name, clazz):
global WRAPPER_MAP
WRAPPER_MAP[name] = clazz
def get_tool_list():
return WRAPPER_MAP.keys()
class ToolWrapper():
def __init__(self, executable_name, cmd_line_option):
self.executable_name = executable_name
self.cmd_line_option = cmd_line_option
def get_tool_path(self, ctx):
if ctx.tool_path:
return find_executable(self.executable_name, ctx.tool_path)
else:
return find_executable(self.executable_name)
def get_tool_option(self, ctx):
if self.cmd_line_option:
return self.cmd_line_option
else:
return ""
def run(self, ctx):
pass
run.__annotations__ = {'ctx': WrapperContext}
| StarcoderdataPython |
4993082 | import DBCon
import DataReduction
import sklearn.preprocessing
import numpy as np
import matplotlib.pyplot as plt
import Constants
import ClusterUtil
import pandas as pd
import Cluster
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import silhouette_score
from sklearn.decomposition import FactorAnalysis
from DataReduction import ClusterCharacter
from pprint import pprint
from datetime import datetime
'''
ChosenAlgorithm = 3
0 = 'K-means'
1 = 'WARD'
2 = 'Spectral'
3 = 'DBSCAN'
4 = 'BANG'
5 = 'SOM'
6 = 'Fuzzy C-Means']
'''
'''
ClusterUtil.getDescription(tupleCharactersAtributes,'Total')
for i in range(7):
print("================================================================Applying Clustering: ",Constants.CLUSTERS[ChosenAlgorithm],"\n")
beginTime = time.time()
#ClusterUtil.plotKnnDistance(arrayNormalizedCharacters)
(clustersResult,labels) = Cluster.getCluster(arrayNormalizedCharacters,scaler,Constants.CLUSTERS[i])
timeElapsed = time.time() - beginTime
print("\nTempo de Pesquisa: ", "{:.5f}".format(timeElapsed)," Seconds")
print("\n================================================================Clustering Applied\n")
#Analisar caracteristicas de cada Cluster, media, variancia, extremos, etc
ClusterUtil.getDescriptions(clustersResult,Constants.CLUSTERS[i])
print("================================================================Statistical Analysis Completed\n")
#Avaliar qualidade do Cluster
ClusterUtil.printEvaluations(arrayNormalizedCharacters,labels,Constants.CLUSTERS[i])
print("================================================================Evaluation Completed\n")
'''
def prepareClusterization(month,year,nullCheck,preProcessing):
Constants.MONTH = month
Constants.YEAR = year
if(preProcessing):
print("\n\n================================================================Análise para Mes: {} Ano: {}\n".format(Constants.MONTH,Constants.YEAR))
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora de Inicio = ", current_time)
characters = DBCon.getCharacters(nullCheck)
clusterCharacters = DataReduction.getClusterCharacters(characters)
#print('Colunas: physicalMeeleExp,physicalRangeExp,tankExp,magicRangeExp,healerExp,arcanistExp,craftExp,gatherExp,minimalCPTime,hildibrand, PreHw, Marriage\n')
tupleCharactersAtributes = ClusterUtil.getTupleCharactersAtributes(clusterCharacters,not preProcessing)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora da Finalização da Redução de Dimensão = ", current_time)
print("\n================================================================Redução Completa\n")
if(preProcessing):
#Evaluate Cluster Tendency
print("Estatística de Hopkins: ",ClusterUtil.hopkins(tupleCharactersAtributes),"\n")
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora da Finalização da Análise da Estatistica de Hopkins = ", current_time)
print("\n================================================================Avaliação de Tendência para Clusterização Completa\n")
#Normalizes Data
(normalizedCharacters,scaler) = ClusterUtil.getNormalizedAndScaler(tupleCharactersAtributes)
arrayNormalizedCharacters = np.array(normalizedCharacters)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora da Finalização da Normalização = ", current_time)
print("\n================================================================Normalização Completa\n")
if(preProcessing):
#Using Pearson Correlation
ClusterUtil.plotHeatMap(arrayNormalizedCharacters,str(Constants.MONTH)+str(Constants.YEAR))
print("\n================================================================Mapa de Calor Criado\n")
ClusterUtil.plotElbow(arrayNormalizedCharacters,str(Constants.MONTH)+str(Constants.YEAR))
print("\n================================================================Gráfico do Método do Cotovelo Criado\n")
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora de Fim = ", current_time)
print("\n")
else:
ClusterUtil.getDescription(tupleCharactersAtributes,'Total',month,year)
return (arrayNormalizedCharacters,scaler)
def clusterize(month,year):
Constants.MONTH = month
Constants.YEAR = year
print("\n\n================================================================Clusterização para Mes: {} Ano: {}\n".format(Constants.MONTH,Constants.YEAR))
(arrayNormalizedCharacters,scaler) = prepareClusterization(month,year,False,False)
for i in range(7):
print("================================================================Aplicando Algoritmo de Clusterização: ",Constants.CLUSTERS[i],"\n")
(clustersResult,labels) = Cluster.getCluster(arrayNormalizedCharacters,scaler,Constants.CLUSTERS[i])
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora de Finalização da Clusterização = ", current_time)
print("\n================================================================Clusterização Aplicada\n")
#Analisar caracteristicas de cada Cluster, media, variancia, extremos, etc
ClusterUtil.getDescriptions(clustersResult,Constants.CLUSTERS[i],Constants.MONTH,Constants.YEAR)
print("\n================================================================Análise Estatística Aplicada\n")
#Avaliar qualidade do Cluster
ClusterUtil.printEvaluations(arrayNormalizedCharacters,labels,Constants.CLUSTERS[i])
print("\n================================================================Avaliação Completa\n")
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Hora de Fim = ", current_time)
print("\n")
'''
prepareClusterization('6','18',True,True)
prepareClusterization('6','19',True,True)
prepareClusterization('10','17',True,True)
prepareClusterization('10','18',True,True)
prepareClusterization('2','18',True,True)
prepareClusterization('2','19',True,True)
'''
clusterize('6','18')
clusterize('6','19')
clusterize('10','17')
clusterize('10','18')
clusterize('2','18')
clusterize('2','19') | StarcoderdataPython |
269466 | <gh_stars>1-10
#!/usr/bin/env python
#coding:utf-8
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
class Button(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
x, y, w, h = 190, 190, 96, 32
login_btn = QtGui.QPushButton("Quit", self)
login_btn.setGeometry(x, y, w, h)
login_btn.clicked.connect(self.do_quit)
def do_quit(self):
QtGui.qApp.quit()
def main():
app = QtGui.QApplication(sys.argv)
btn = Button()
btn.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| StarcoderdataPython |
1671422 | <filename>data/codeup/template/problem.py
"""
Problem URL: {Please provide the problem url}
"""
from io import TextIOWrapper
from typing import Tuple
def read_input(file: TextIOWrapper) -> Tuple[int, int]:
# You can use 'input()' to get inputs
input = lambda: file.readline().rstrip()
solution_arg1 = int(input())
solution_arg2 = int(input())
return solution_arg1, solution_arg2
def solution(arg1: int, arg2: int) -> int:
# You should provide skeleton code of solution
answer = 0
return answer
def print_output(output: int) -> str:
# You should convert output object to string
return str(output)
| StarcoderdataPython |
3304435 | <gh_stars>10-100
from numpy import linspace
# version
VERSION = '0.3.0'
# constants
IDEAL_GAS_CONSTANT_KCAL = 1.987204118E-3
TEMPERATURE_CELSIUS = 25.
# calculated constants
ZERO_KELVIN = 273.15
TEMPERATURE_KELVIN = ZERO_KELVIN + TEMPERATURE_CELSIUS
RT = IDEAL_GAS_CONSTANT_KCAL * TEMPERATURE_KELVIN
# error
FITTING_PENALTY = 1e10
# display properties
FONT_SIZE = 18
MARKER_SIZE = 8
LABEL_SIZE = 18
LINE_WIDTH = 3
# error options
CONFIDENCE_INTERVAL = 95
# default range to simulate fit function
XSIM = linspace(0.,10.,100)
if __name__ == "__main__":
pass
| StarcoderdataPython |
4936431 | <filename>kernel/scrum.py
import requests
from config.settings import BACKLOG_AUTH
__author__ = '<NAME>'
class UnknownEnabler(Exception):
pass
class InvalidConection(Exception):
pass
class ScrumServer:
def __init__(self, url):
url = url or 'backlog.fiware.org'
self.url = 'http://{}'.format(url)
# self.url = 'http://127.0.0.1:5000'
def getbacklog(self, enablername):
url = self.url + '/api/backlog/enabler/' + enablername
answer = requests.get(url, auth=BACKLOG_AUTH)
if not answer.ok:
raise InvalidConection
return answer.json()
def gethelpdesk(self, enablername):
url = self.url + '/api/helpdesk/enabler/' + enablername
answer = requests.get(url, auth=BACKLOG_AUTH)
if not answer.ok:
raise InvalidConection
return answer.json()
def getworkingmode(self):
url = self.url + '/api/enabler/working_mode'
answer = requests.get(url, auth=BACKLOG_AUTH)
if not answer.ok:
raise InvalidConection
return answer.json()
| StarcoderdataPython |
5025926 | '''
_ _ _ _ _ _ _ _ _ _ _
/ \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \
( P | O | L | Y | G | O | N | S | O | U | P )
\_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/
Plotter-friendly graphics utilities
© <NAME> (@colormotor) 2021 - ...
plut - visualization utils (matplotlib-based)
'''
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import Path, PathPatch
from matplotlib.colors import ListedColormap
import matplotlib
import numpy as np
import matplotlib as mpl
import polygonsoup.geom as geom
cfg = lambda: None
cfg.dpi = 100
# Make it a bit prettier
cfg.default_style = 'seaborn-darkgrid'
cfg.plotter = None
class NoPlotter:
'''Default dummy plotter
Use plotters.AxiDrawClient or plotters.AxiPlotter to plot something
'''
def __init__(self):
pass
def _set_bounds(self, w, h):
pass
def _stroke(self, P):
pass
def _plot(self, title='', padding=0, box=None):
pass
paper_sizes = {
'A4': (11.7, 8.3),
'A3': (16.5, 11.7),
'A5': (8.3, 5.8)
}
def set_theme(style=cfg.default_style, fontsize=6):
if style:
# https://towardsdatascience.com/a-new-plot-theme-for-matplotlib-gadfly-2cffc745ff84
# Place `gadfly.mplstyle` in `~/.matplotlib/stylelib`
try:
plt.rcParams.update(plt.rcParamsDefault)
plt.style.use(style)
#print('Setting style ' + style)
except OSError:
print('Style ' + style + ' not found')
#plt.style.use('seaborn') #fivethirtyeight') #seaborn-poster') #seaborn-poster')
#return
params = {
# Illustrator compatible fonts
# http://jonathansoma.com/lede/data-studio/matplotlib/exporting-from-matplotlib-to-open-in-adobe-illustrator/
'pdf.fonttype': 42,
'ps.fonttype': 42,
'axes.titlesize': fontsize+1,
'font.family': 'Times New Roman',
'text.color': 'k',
'lines.markersize': 2,
'lines.linewidth': 0.75,
'lines.markeredgewidth':0.25,
'axes.labelsize': fontsize,
'axes.facecolor': (1.,1.,1.,1),
'xtick.major.pad':0.0,
'ytick.major.pad':0.0,
'figure.facecolor': (1.,1.,1.,1),
'savefig.facecolor': (1.,1.,1.,1),
'legend.fontsize': 4,
'xtick.labelsize': fontsize*0.9,
'ytick.labelsize': fontsize*0.9,
'xtick.color': '333333',
'ytick.color': '333333',
'axes.edgecolor' : '666666',
'axes.grid': True,
'grid.color': 'cccccc',
'grid.alpha': 1.,#'dfdfdf',
'grid.linestyle': ':',
'grid.linewidth' : 0.25,
'figure.figsize': [3, 3],
}
mpl.rcParams.update(params)
set_theme()
def stroke(S, clr='k', closed=False, **kwargs):
if type(S)==list and not S:
# print('Empty shape')
return
if geom.is_compound(S):
for P in S:
stroke(P, clr=clr, closed=closed, **kwargs)
return
# Send out
P = [p for p in S]
if closed:
P = P + [P[0]]
P = np.array(S).T
if closed:
P = np.vstack([P, P[0]])
plt.plot(P[0], P[1], color=mpl.colors.to_rgb(clr), **kwargs)
def fill(S, clr, **kwargs):
if type(S)==list and not S:
# print('Empty shape')
return
if not geom.is_compound(S):
S = [S]
if not S:
# print('Empty shape')
return
path = []
cmds = []
for P in S:
path += [p for p in P] + [P[0]]
cmds += [Path.MOVETO] + [Path.LINETO for p in P[:-1]] + [Path.CLOSEPOLY]
plt.gca().add_patch(PathPatch(Path(path, cmds), color=clr, fill=True, linewidth=0, **kwargs))
def fill_stroke(S, clr, strokeclr, **kwargs):
if not S:
# print('Empty shape')
return
if not geom.is_compound(S):
S = [S]
path = []
cmds = []
for P in S:
# Send out
cfg.plotter.stroke(P)
path += [p for p in P] + [P[0]]
cmds += [Path.MOVETO] + [Path.LINETO for p in P[:-1]] + [Path.CLOSEPOLY]
plt.gca().add_patch(PathPatch(Path(path, cmds), facecolor=clr, edgecolor=strokeclr, fill=True, **kwargs))
def stroke_rect(rect, clr='k', plot=True, **kwargs):
x, y = rect[0]
w, h = rect[1] - rect[0]
# Send out
if plot:
cfg.plotter._stroke(geom.rect_corners(rect, close=True))
plt.gca().add_patch(
patches.Rectangle((x, y), w, h, fill=False, edgecolor=clr, **kwargs))
def fill_rect(rect, clr, **kwargs):
x, y = rect[0]
w, h = rect[1] - rect[0]
plt.gca().add_patch(
patches.Rectangle((x, y), w, h, fill=True, facecolor=clr, **kwargs))
def fill_circle(pos, radius, clr, **kwargs):
plt.gca().add_patch(
patches.Circle(pos, radius, fill=True, facecolor=clr, **kwargs)) #alpha=alpha, zorder=zorder))
def stroke_circle(pos, radius, clr, **kwargs):
plt.gca().add_patch(
patches.Circle(pos, radius, fill=False, edgecolor=clr, **kwargs)) #alpha=alpha, zorder=zorder))
def draw_markers(P, color, marker='o', **kwargs):
if type(color) == str:
plt.plot(P[:,0], P[:,1], color, linestyle='None', marker=marker, **kwargs)
else:
plt.plot(P[:,0], P[:,1], color=color, linestyle='None', marker=marker, **kwargs)
def draw_line(a, b, clr, **kwargs):
p = np.vstack([a,b])
plt.plot(p[:,0], p[:,1], color=clr, solid_capstyle='round', dash_capstyle='round', **kwargs)
def det22(mat):
return mat[0,0] * mat[1,1] - mat[0,1]*mat[1,0]
def draw_arrow(a, b, clr, alpha=1., head_width=0.5, head_length=None, overhang=0.3, zorder=None, **kwargs):
if head_length is None:
head_length = head_width
linewidth = 1.0
if 'lw' in kwargs:
linewidth = kwargs['lw']
if 'linewidth' in kwargs:
linewidth = kwargs['linewidth']
# Uglyness, still does not work
axis = plt.gca()
trans = axis.transData.inverted()
scale = np.sqrt(det22(trans.get_matrix()))*axis.figure.dpi*100
head_width = (linewidth*head_width)*scale
head_length = (linewidth*head_length)*scale
a, b = np.array(a), np.array(b)
d = b - a
draw_line(a, b - geom.normalize(d)*head_length*0.5, clr, linewidth=linewidth)
plt.arrow(a[0], a[1], d[0], d[1], lw=0.5, overhang=overhang,
head_width=head_width, head_length=head_length, length_includes_head=True,
fc=clr, ec='none', zorder=zorder)
def set_axis_limits(box, pad=0, invert=True, ax=None, y_limits_only=False):
# UNUSED
if ax is None:
ax = plt.gca()
xlim = [box[0][0]-pad, box[1][0]+pad]
ylim = [box[0][1]-pad, box[1][1]+pad]
ax.set_ylim(ylim)
ax.set_ybound(ylim)
if not y_limits_only:
ax.set_xlim(xlim)
ax.set_xbound(xlim)
# Hack to get matplotlib to actually respect limits?
stroke_rect([geom.vec(xlim[0], ylim[0]), geom.vec(xlim[1], ylim[1])], 'r', plot=False, alpha=0)
# ax.set_clip_on(True)
if invert:
ax.invert_yaxis()
def set_axis_limits(P, pad=0, invert=True, ax=None, y_limits_only=False):
if ax is None:
ax = plt.gca()
if type(P) == tuple or (type(P)==list and len(P)==2):
box = P
xlim = [box[0][0]-pad, box[1][0]+pad]
ylim = [box[0][1]-pad, box[1][1]+pad]
else:
if type(P) == list:
P = np.hstack(P)
xlim = [np.min(P[0,:])-pad, np.max(P[0,:])+pad]
ylim = [np.min(P[1,:])-pad, np.max(P[1,:])+pad]
ax.set_ylim(ylim)
ax.set_ybound(ylim)
if not y_limits_only:
ax.set_xlim(xlim)
ax.set_xbound(xlim)
# Hack to get matplotlib to actually respect limits?
stroke_rect([geom.vec(xlim[0],ylim[0]), geom.vec(xlim[1], ylim[1])], 'r', alpha=0)
# ax.set_clip_on(True)
if invert:
ax.invert_yaxis()
def show_drawing(drawing, size='A4', title='', padding=0, plotter=NoPlotter()):
''' Plots/draws a axi.Drawing object'''
figure(size, plotter)
for path in drawing.paths:
P = [np.array(p) for p in path]
stroke(P, 'k')
show(title, padding)
def figure(size="A5", plotter=NoPlotter()):
if type(size)==str:
w, h = paper_sizes[size]
else:
w, h = size
fig = plt.figure(dpi=cfg.dpi)
fig.set_size_inches(w, h)
if plotter is None:
plotter = NoPlotter()
cfg.plotter = plotter
plotter._set_bounds(w, h)
return fig
def show(title='', padding=0, box=None, axis=False, ydown=True, file='', debug_box=False):
if title:
plt.title(title)
setup(ydown, axis, box, debug_box)
if file:
plt.savefig(file, transparent=True)
cfg.plotter._plot(title, padding, box=box)
plt.show()
def setup(ydown=True, axis=False, box=None, debug_box=False):
ax = plt.gca()
ax.axis('scaled')
if not axis:
ax.axis('off')
else:
ax.axis('on')
if ydown:
ax.invert_yaxis()
if debug_box and box is not None:
stroke_rect(box, 'r')
if box is not None:
set_axis_limits(box, invert=ydown, ax=ax, y_limits_only=False)
categorical_palettes = {
'Tabular':[
(0.12156862745098039, 0.4666666666666667, 0.7058823529411765),
(0.6823529411764706, 0.7803921568627451, 0.9098039215686274),
(1.0, 0.4980392156862745, 0.054901960784313725),
(1.0, 0.7333333333333333, 0.47058823529411764),
(0.17254901960784313, 0.6274509803921569, 0.17254901960784313),
(0.596078431372549, 0.8745098039215686, 0.5411764705882353),
(0.8392156862745098, 0.15294117647058825, 0.1568627450980392),
(1.0, 0.596078431372549, 0.5882352941176471),
(0.5803921568627451, 0.403921568627451, 0.7411764705882353),
(0.7725490196078432, 0.6901960784313725, 0.8352941176470589),
(0.5490196078431373, 0.33725490196078434, 0.29411764705882354),
(0.7686274509803922, 0.611764705882353, 0.5803921568627451),
(0.8901960784313725, 0.4666666666666667, 0.7607843137254902),
(0.9686274509803922, 0.7137254901960784, 0.8235294117647058),
(0.4980392156862745, 0.4980392156862745, 0.4980392156862745),
(0.7803921568627451, 0.7803921568627451, 0.7803921568627451),
(0.7372549019607844, 0.7411764705882353, 0.13333333333333333),
(0.8588235294117647, 0.8588235294117647, 0.5529411764705883),
(0.09019607843137255, 0.7450980392156863, 0.8117647058823529),
(0.6196078431372549, 0.8549019607843137, 0.8980392156862745)
],
'Dark2_8':[
(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)
],
'Custom':[
[104.0/255, 175.0/255, 252.0/255],
[66.0/255, 47.0/255, 174.0/255],
[71.0/255, 240.0/255, 163.0/255],
[29.0/255, 104.0/255, 110.0/255],
[52.0/255, 218.0/255, 234.0/255],
[45.0/255, 93.0/255, 168.0/255],
[219.0/255, 119.0/255, 230.0/255],
[165.0/255, 46.0/255, 120.0/255],
[171.0/255, 213.0/255, 51.0/255],
[29.0/255, 109.0/255, 31.0/255],
[143.0/255, 199.0/255, 137.0/255],
[226.0/255, 50.0/255, 9.0/255],
[93.0/255, 242.0/255, 62.0/255],
[94.0/255, 64.0/255, 40.0/255],
[247.0/255, 147.0/255, 2.0/255],
[255.0/255, 0.0/255, 135.0/255],
[226.0/255, 150.0/255, 163.0/255],
[216.0/255, 197.0/255, 152.0/255],
[97.0/255, 8.0/255, 232.0/255],
[243.0/255, 212.0/255, 38.0/255]
],
'Paired_12':[ # Okeish
(0.6509803921568628, 0.807843137254902, 0.8901960784313725),
(0.12156862745098039, 0.47058823529411764, 0.7058823529411765),
(0.6980392156862745, 0.8745098039215686, 0.5411764705882353),
(0.2, 0.6274509803921569, 0.17254901960784313),
(0.984313725490196, 0.6039215686274509, 0.6),
(0.8901960784313725, 0.10196078431372549, 0.10980392156862745),
(0.9921568627450981, 0.7490196078431373, 0.43529411764705883),
(1.0, 0.4980392156862745, 0.0),
(0.792156862745098, 0.6980392156862745, 0.8392156862745098),
(0.41568627450980394, 0.23921568627450981, 0.6039215686274509),
(1.0, 1.0, 0.6),
(0.6941176470588235, 0.34901960784313724, 0.1568627450980392)
],
'Tableau_20':[
(0.12156862745098039, 0.4666666666666667, 0.7058823529411765),
(0.6823529411764706, 0.7803921568627451, 0.9098039215686274),
(1.0, 0.4980392156862745, 0.054901960784313725),
(1.0, 0.7333333333333333, 0.47058823529411764),
(0.17254901960784313, 0.6274509803921569, 0.17254901960784313),
(0.596078431372549, 0.8745098039215686, 0.5411764705882353),
(0.8392156862745098, 0.15294117647058825, 0.1568627450980392),
(1.0, 0.596078431372549, 0.5882352941176471),
(0.5803921568627451, 0.403921568627451, 0.7411764705882353),
(0.7725490196078432, 0.6901960784313725, 0.8352941176470589),
(0.5490196078431373, 0.33725490196078434, 0.29411764705882354),
(0.7686274509803922, 0.611764705882353, 0.5803921568627451),
(0.8901960784313725, 0.4666666666666667, 0.7607843137254902),
(0.9686274509803922, 0.7137254901960784, 0.8235294117647058),
(0.4980392156862745, 0.4980392156862745, 0.4980392156862745),
(0.7803921568627451, 0.7803921568627451, 0.7803921568627451),
(0.7372549019607844, 0.7411764705882353, 0.13333333333333333),
(0.8588235294117647, 0.8588235294117647, 0.5529411764705883),
(0.09019607843137255, 0.7450980392156863, 0.8117647058823529),
(0.6196078431372549, 0.8549019607843137, 0.8980392156862745)
],
'Bold_10':[
(0.4980392156862745, 0.23529411764705882, 0.5529411764705883),
(0.06666666666666667, 0.6470588235294118, 0.4745098039215686),
(0.2235294117647059, 0.4117647058823529, 0.6745098039215687),
(0.9490196078431372, 0.7176470588235294, 0.00392156862745098),
(0.9058823529411765, 0.24705882352941178, 0.4549019607843137),
(0.5019607843137255, 0.7294117647058823, 0.35294117647058826),
(0.9019607843137255, 0.5137254901960784, 0.06274509803921569),
(0.0, 0.5254901960784314, 0.5843137254901961),
(0.8117647058823529, 0.10980392156862745, 0.5647058823529412),
(0.9764705882352941, 0.4823529411764706, 0.4470588235294118)
],
'Prism_10':[
(0.37254901960784315, 0.27450980392156865, 0.5647058823529412),
(0.11372549019607843, 0.4117647058823529, 0.5882352941176471),
(0.2196078431372549, 0.6509803921568628, 0.6470588235294118),
(0.058823529411764705, 0.5215686274509804, 0.32941176470588235),
(0.45098039215686275, 0.6862745098039216, 0.2823529411764706),
(0.9294117647058824, 0.6784313725490196, 0.03137254901960784),
(0.8823529411764706, 0.48627450980392156, 0.0196078431372549),
(0.8, 0.3137254901960784, 0.24313725490196078),
(0.5803921568627451, 0.20392156862745098, 0.43137254901960786),
(0.43529411764705883, 0.25098039215686274, 0.4392156862745098)
],
'ColorBlind_10':[
(0.0, 0.4196078431372549, 0.6431372549019608),
(1.0, 0.5019607843137255, 0.054901960784313725),
(0.6705882352941176, 0.6705882352941176, 0.6705882352941176),
(0.34901960784313724, 0.34901960784313724, 0.34901960784313724),
(0.37254901960784315, 0.6196078431372549, 0.8196078431372549),
(0.7843137254901961, 0.3215686274509804, 0.0),
(0.5372549019607843, 0.5372549019607843, 0.5372549019607843),
(0.6352941176470588, 0.7843137254901961, 0.9254901960784314),
(1.0, 0.7372549019607844, 0.4745098039215686),
(0.8117647058823529, 0.8117647058823529, 0.8117647058823529)
],
'BlueRed_12':[
(0.17254901960784313, 0.4117647058823529, 0.6901960784313725),
(0.7098039215686275, 0.7843137254901961, 0.8862745098039215),
(0.9411764705882353, 0.15294117647058825, 0.12549019607843137),
(1.0, 0.7137254901960784, 0.6901960784313725),
(0.6745098039215687, 0.3803921568627451, 0.23529411764705882),
(0.9137254901960784, 0.7647058823529411, 0.6078431372549019),
(0.4196078431372549, 0.6392156862745098, 0.8392156862745098),
(0.7098039215686275, 0.8745098039215686, 0.9921568627450981),
(0.6745098039215687, 0.5294117647058824, 0.38823529411764707),
(0.8666666666666667, 0.788235294117647, 0.7058823529411765),
(0.7411764705882353, 0.0392156862745098, 0.21176470588235294),
(0.9568627450980393, 0.45098039215686275, 0.47843137254901963)
],
'plut_categorical_12':[
(1.0, 0.42745098039215684, 0.6823529411764706),
(0.8313725490196079, 0.792156862745098, 0.22745098039215686),
(0.0, 0.7450980392156863, 1.0),
(0.9215686274509803, 0.6745098039215687, 0.9803921568627451),
(0.6196078431372549, 0.6196078431372549, 0.6196078431372549),
(0.403921568627451, 0.8823529411764706, 0.7098039215686275),
(0.8392156862745098, 0.15294117647058825, 0.1568627450980392),
(0.6039215686274509, 0.8156862745098039, 1.0),
(0.8862745098039215, 0.5215686274509804, 0.26666666666666666),
(0.12156862745098039, 0.4666666666666667, 0.7058823529411765),
(1.0, 0.4980392156862745, 0.054901960784313725),
(0.36470588235294116, 0.6941176470588235, 0.35294117647058826)
]
}
default_palette_name = 'plut_categorical_12' #Tableau_20' #'Tabular' #'BlueRed_12' #'Tabular' #'BlueRed_12' #'Tabular' #'BlueRed_12'# 'Paired_12' #OK #'ColorBlind_10' #OK # 'Dark2_8'# OKeish 'Bold_10' #OK #
def get_default_colors():
return categorical_palettes[default_palette_name]
#return plt.get_cmap('tab20').colors + plt.get_cmap('tab20b').colors + plt.get_cmap('tab20c').colors
#return plt.rcParams['axes.prop_cycle'].by_key()['color'] + list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors) + list(plt.get_cmap('tab20c').colors)
def categorical(name=None):
if name is None:
name = palette_name
return categorical_palettes[name]
def categorical_cmap(name=None):
return ListedColormap(categorical(name))
def default_color(i):
clrs = get_default_colors() #plt.rcParams['axes.prop_cycle'].by_key()['color']
return mpl.colors.to_rgb(clrs[i%len(clrs)])
def default_color_alpha(i, alpha):
rgb = default_color(i) #default_colors[i%len(default_colors)]
return list(rgb) + [alpha]
def cmap(v, colormap='turbo'): #'PuRd'):
c = matplotlib.cm.get_cmap(colormap)
return c(v)
| StarcoderdataPython |
9667786 | <gh_stars>0
# -*- encoding: utf-8 -*-
"""
Autorzy
"""
from datetime import date, datetime, timedelta
from autoslug import AutoSlugField
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db import IntegrityError, models, transaction
from django.db.models import CASCADE, SET_NULL, Sum
from django.urls.base import reverse
from django.contrib.postgres.search import SearchVectorField as VectorField
from bpp.core import zbieraj_sloty
from bpp.models import (
LinkDoPBNMixin,
ModelZAdnotacjami,
ModelZNazwa,
NazwaISkrot,
const,
)
from bpp.models.abstract import ModelZPBN_ID
from bpp.util import FulltextSearchMixin
class Tytul(NazwaISkrot):
class Meta:
verbose_name = "tytuł"
verbose_name_plural = "tytuły"
app_label = "bpp"
ordering = ("skrot",)
class Plec(NazwaISkrot):
class Meta:
verbose_name = "płeć"
verbose_name_plural = "płcie"
app_label = "bpp"
def autor_split_string(text):
text = text.strip().replace("\t", " ").replace("\n", " ").replace("\r", " ")
while text.find(" ") >= 0:
text = text.replace(" ", " ")
text = [x.strip() for x in text.split(" ", 1)]
if len(text) != 2:
raise ValueError(text)
if not text[0] or not text[1]:
raise ValueError(text)
return text[0], text[1]
class AutorManager(FulltextSearchMixin, models.Manager):
def create_from_string(self, text):
"""Tworzy rekord autora z ciągu znaków. Używane, gdy dysponujemy
wpisanym ciągiem znaków z np AutorAutocomplete i chcemy utworzyć
autora z nazwiskiem i imieniem w poprawny sposób."""
text = autor_split_string(text)
return self.create(
**dict(nazwisko=text[0].title(), imiona=text[1].title(), pokazuj=False)
)
class Autor(LinkDoPBNMixin, ModelZAdnotacjami, ModelZPBN_ID):
url_do_pbn = const.LINK_PBN_DO_AUTORA
imiona = models.CharField(max_length=512, db_index=True)
nazwisko = models.CharField(max_length=256, db_index=True)
tytul = models.ForeignKey(Tytul, CASCADE, blank=True, null=True)
pseudonim = models.CharField(
max_length=300,
blank=True,
null=True,
help_text="""
Jeżeli w bazie danych znajdują się autorzy o zbliżonych imionach, nazwiskach i tytułach naukowych,
skorzystaj z tego pola aby ułatwić ich rozróżnienie. Pseudonim pokaże się w polach wyszukiwania
oraz na podstronie autora, po nazwisku i tytule naukowym.""",
)
aktualny = models.BooleanField(
"Aktualny?",
default=False,
help_text="""Jeżeli zaznaczone, pole to oznacza,
że autor jest aktualnie - na dziś dzień - przypisany do jakiejś jednostki w bazie danych i jego przypisanie
do tej jednostki nie zostało zakończone wraz z konkretną datą w
przeszłości.""",
db_index=True,
)
aktualna_jednostka = models.ForeignKey(
"Jednostka", CASCADE, blank=True, null=True, related_name="aktualna_jednostka"
)
aktualna_funkcja = models.ForeignKey(
"Funkcja_Autora",
CASCADE,
blank=True,
null=True,
related_name="aktualna_funkcja",
)
pokazuj = models.BooleanField(
default=True, help_text="Pokazuj autora na stronach jednostek oraz w rankingu. "
)
email = models.EmailField("E-mail", max_length=128, blank=True, null=True)
www = models.URLField("WWW", max_length=1024, blank=True, null=True)
plec = models.ForeignKey(Plec, CASCADE, null=True, blank=True)
urodzony = models.DateField(blank=True, null=True)
zmarl = models.DateField(blank=True, null=True)
poprzednie_nazwiska = models.CharField(
max_length=1024,
blank=True,
null=True,
help_text="""Jeżeli ten
autor(-ka) posiada nazwisko panieńskie, pod którym ukazywały
się publikacje lub zmieniał nazwisko z innych powodów, wpisz tutaj
wszystkie poprzednie nazwiska, oddzielając je przecinkami.""",
db_index=True,
)
pokazuj_poprzednie_nazwiska = models.BooleanField(
default=True,
help_text="Jeżeli odznaczone, poprzednie nazwiska nie będą się wyświetlać na podstronie autora "
"dla użytkowników niezalogowanych. Użytkownicy zalogowani widzą je zawsze. Wyszukiwanie po poprzednich "
"nazwiskach będzie nadal możliwe. ",
)
orcid = models.CharField(
"Identyfikator ORCID",
max_length=19,
blank=True,
null=True,
unique=True,
help_text="Open Researcher and Contributor ID, " "vide http://www.orcid.org",
validators=[
RegexValidator(
regex=r"^\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d(\d|X)$",
message="Identyfikator ORCID to 4 grupy po 4 cyfry w każdej, "
"oddzielone myślnikami",
code="orcid_invalid_format",
),
],
db_index=True,
)
orcid_w_pbn = models.NullBooleanField(
"ORCID jest w bazie PBN?",
help_text="""Jeżeli ORCID jest w bazie PBN, to pole powinno być zaznaczone. Zaznaczenie następuje
automatycznie, przez procedury integrujące bazę danych z PBNem w nocy. Można też zaznaczyć ręcznie.
Pole wykorzystywane jest gdy autor nie ma odpowiednika w PBN (pole 'PBN UID' rekordu autora jest puste,
zaś eksport danych powoduje komunikat zwrotny z PBN o nieistniejącym w ich bazie ORCID). W takich sytuacjach
należy w polu wybrać "Nie". """,
)
expertus_id = models.CharField(
"Identyfikator w bazie Expertus",
max_length=10,
null=True,
blank=True,
db_index=True,
unique=True,
)
system_kadrowy_id = models.PositiveIntegerField(
"Identyfikator w systemie kadrowym",
help_text="""Identyfikator cyfrowy, używany do matchowania autora z danymi z systemu kadrowego Uczelni""",
null=True,
blank=True,
db_index=True,
unique=True,
)
pbn_uid = models.ForeignKey(
"pbn_api.Scientist", null=True, blank=True, on_delete=SET_NULL
)
search = VectorField()
objects = AutorManager()
slug = AutoSlugField(populate_from="get_full_name", unique=True, max_length=1024)
sort = models.TextField()
jednostki = models.ManyToManyField("bpp.Jednostka", through="Autor_Jednostka")
def get_absolute_url(self):
return reverse("bpp:browse_autor", args=(self.slug,))
class Meta:
verbose_name = "autor"
verbose_name_plural = "autorzy"
ordering = ["sort"]
app_label = "bpp"
def __str__(self):
buf = "%s %s" % (self.nazwisko, self.imiona)
if self.poprzednie_nazwiska and self.pokazuj_poprzednie_nazwiska:
buf += " (%s)" % self.poprzednie_nazwiska
if self.tytul is not None:
buf += ", " + self.tytul.skrot
if self.pseudonim is not None:
buf += " (" + self.pseudonim + ")"
return buf
def dodaj_jednostke(self, jednostka, rok=None, funkcja=None):
if rok is None:
rok = datetime.now().date().year - 1
start_pracy = date(rok, 1, 1)
koniec_pracy = date(rok, 12, 31)
if Autor_Jednostka.objects.filter(
autor=self,
jednostka=jednostka,
rozpoczal_prace__lte=start_pracy,
zakonczyl_prace__gte=koniec_pracy,
):
# Ten czas jest już pokryty
return
try:
Autor_Jednostka.objects.create(
autor=self,
rozpoczal_prace=start_pracy,
jednostka=jednostka,
funkcja=funkcja,
zakonczyl_prace=koniec_pracy,
)
except IntegrityError:
return
self.defragmentuj_jednostke(jednostka)
def defragmentuj_jednostke(self, jednostka):
Autor_Jednostka.objects.defragmentuj(autor=self, jednostka=jednostka)
def save(self, *args, **kw):
self.sort = (self.nazwisko.lower().replace("von ", "") + self.imiona).lower()
ret = super(Autor, self).save(*args, **kw)
for jednostka in self.jednostki.all():
self.defragmentuj_jednostke(jednostka)
return ret
def afiliacja_na_rok(self, rok, wydzial, rozszerzona=False):
"""
Czy autor w danym roku był w danym wydziale?
:param rok:
:param wydzial:
:return: True gdy w danym roku był w danym wydziale
"""
start_pracy = date(rok, 1, 1)
koniec_pracy = date(rok, 12, 31)
if Autor_Jednostka.objects.filter(
autor=self,
rozpoczal_prace__lte=start_pracy,
zakonczyl_prace__gte=koniec_pracy,
jednostka__wydzial=wydzial,
):
return True
# A może ma wpisaną tylko datę początku pracy? W takiej sytuacji
# stwierdzamy, że autor NADAL tam pracuje, bo nie ma końca, więc:
if Autor_Jednostka.objects.filter(
autor=self,
rozpoczal_prace__lte=start_pracy,
zakonczyl_prace=None,
jednostka__wydzial=wydzial,
):
return True
# Jeżeli nie ma takiego rekordu z dopasowaniem z datami, to może jest
# rekord z dopasowaniem JAKIMKOLWIEK innym?
# XXX po telefonie p. <NAME>ia 2013-03-25 o godzinie 11:55
# dostałem informację, że NIE interesują nas tacy autorzy, zatem:
if not rozszerzona:
return
# ... aczkolwiek, sprawdzanie afiliacji do wydziału dla niektórych autorów może
# być przydatne np przy importowaniu imion i innych rzeczy, więc sprawdźmy w sytuacj
# gdy jest rozszerzona afiliacja:
if Autor_Jednostka.objects.filter(autor=self, jednostka__wydzial=wydzial):
return True
def get_full_name(self):
buf = "%s %s" % (self.imiona, self.nazwisko)
if self.poprzednie_nazwiska:
buf += " (%s)" % self.poprzednie_nazwiska
return buf
def get_full_name_surname_first(self):
buf = "%s" % self.nazwisko
if self.poprzednie_nazwiska:
buf += " (%s)" % self.poprzednie_nazwiska
buf += " %s" % self.imiona
return buf
def prace_w_latach(self):
"""Zwraca lata, w których ten autor opracowywał jakiekolwiek prace."""
from bpp.models.cache import Rekord
return (
Rekord.objects.prace_autora(self)
.values_list("rok", flat=True)
.distinct()
.order_by("rok")
)
def liczba_cytowan(self):
"""Zwraca liczbę cytowań prac danego autora"""
from bpp.models.cache import Rekord
return (
Rekord.objects.prace_autora(self)
.distinct()
.aggregate(s=Sum("liczba_cytowan"))["s"]
)
def liczba_cytowan_afiliowane(self):
"""Zwraca liczbę cytowań prac danego autora tam,
gdzie została podana afiliacja na jednostkę uczelni"""
from bpp.models.cache import Rekord
return (
Rekord.objects.prace_autora_z_afiliowanych_jednostek(self)
.distinct()
.aggregate(s=Sum("liczba_cytowan"))["s"]
)
def zbieraj_sloty(
self,
zadany_slot,
rok_min,
rok_max,
minimalny_pk=None,
dyscyplina_id=None,
jednostka_id=None,
akcja=None,
):
return zbieraj_sloty(
autor_id=self.pk,
zadany_slot=zadany_slot,
rok_min=rok_min,
rok_max=rok_max,
minimalny_pk=minimalny_pk,
dyscyplina_id=dyscyplina_id,
jednostka_id=jednostka_id,
akcja=akcja,
)
class Funkcja_Autora(NazwaISkrot):
"""Funkcja autora w jednostce"""
pokazuj_za_nazwiskiem = models.BooleanField(
default=False,
help_text="""Zaznaczenie tego pola sprawi, że ta funkcja
będzie wyświetlana na stronie autora, za nazwiskiem.""",
)
class Meta:
verbose_name = "funkcja w jednostce"
verbose_name_plural = "funkcje w jednostkach"
ordering = ["nazwa"]
app_label = "bpp"
class Grupa_Pracownicza(ModelZNazwa):
class Meta:
verbose_name = "grupa pracownicza"
verbose_name_plural = "grupy pracownicze"
ordering = [
"nazwa",
]
app_label = "bpp"
class Wymiar_Etatu(ModelZNazwa):
class Meta:
verbose_name = "wymiar etatu"
verbose_name_plural = "wymiary etatów"
ordering = ["nazwa"]
app_label = "bpp"
class Autor_Jednostka_Manager(models.Manager):
def defragmentuj(self, autor, jednostka):
poprzedni_rekord = None
usun = []
for rec in Autor_Jednostka.objects.filter(
autor=autor, jednostka=jednostka
).order_by("rozpoczal_prace"):
if poprzedni_rekord is None:
poprzedni_rekord = rec
continue
if rec.rozpoczal_prace is None and rec.zakonczyl_prace is None:
# Nic nie wnosi tutaj taki rekord ORAZ nie jest to 'poprzedni'
# rekord, więc:
usun.append(rec)
continue
# Przy imporcie danych z XLS na dane ze starego systemu - obydwa pola są None
if (
poprzedni_rekord.zakonczyl_prace is None
and poprzedni_rekord.rozpoczal_prace is None
):
usun.append(poprzedni_rekord)
poprzedni_rekord = rec
continue
# Nowy system - przy imporcie danych z XLS do nowego systemu jest sytuacja, gdy autor
# zaczął kiedyśtam prace ALE jej nie zakończył:
if poprzedni_rekord.zakonczyl_prace is None:
if (
rec.rozpoczal_prace is None
and poprzedni_rekord.rozpoczal_prace is not None
):
if rec.zakonczyl_prace == poprzedni_rekord.rozpoczal_prace:
usun.append(rec)
poprzedni_rekord.rozpoczal_prace = rec.rozpoczal_prace
poprzedni_rekord.save()
continue
if rec.rozpoczal_prace >= poprzedni_rekord.rozpoczal_prace:
usun.append(rec)
poprzedni_rekord.zakonczyl_prace = rec.zakonczyl_prace
poprzedni_rekord.save()
continue
if rec.rozpoczal_prace == poprzedni_rekord.zakonczyl_prace + timedelta(
days=1
):
usun.append(rec)
poprzedni_rekord.zakonczyl_prace = rec.zakonczyl_prace
poprzedni_rekord.save()
else:
poprzedni_rekord = rec
for aj in usun:
aj.delete()
class Autor_Jednostka(models.Model):
"""Powiązanie autora z jednostką"""
autor = models.ForeignKey(Autor, CASCADE)
jednostka = models.ForeignKey("bpp.Jednostka", CASCADE)
rozpoczal_prace = models.DateField(
"Rozpoczął pracę", blank=True, null=True, db_index=True
)
zakonczyl_prace = models.DateField(
"Zakończył pracę", null=True, blank=True, db_index=True
)
funkcja = models.ForeignKey(Funkcja_Autora, CASCADE, null=True, blank=True)
podstawowe_miejsce_pracy = models.NullBooleanField()
grupa_pracownicza = models.ForeignKey(
Grupa_Pracownicza, SET_NULL, null=True, blank=True
)
wymiar_etatu = models.ForeignKey(Wymiar_Etatu, SET_NULL, null=True, blank=True)
objects = Autor_Jednostka_Manager()
def clean(self, exclude=None):
if self.rozpoczal_prace is not None and self.zakonczyl_prace is not None:
if self.rozpoczal_prace >= self.zakonczyl_prace:
raise ValidationError(
"Początek pracy późniejszy lub równy, jak zakończenie"
)
if self.zakonczyl_prace is not None:
if self.zakonczyl_prace >= datetime.now().date():
raise ValidationError(
"Czas zakończenia pracy w jednostce nie może być taki sam"
" lub większy, jak obecna data"
)
def __str__(self):
buf = "%s ↔ %s" % (self.autor, self.jednostka.skrot)
if self.funkcja:
buf = "%s ↔ %s, %s" % (self.autor, self.funkcja.nazwa, self.jednostka.skrot)
return buf
@transaction.atomic
def ustaw_podstawowe_miejsce_pracy(self):
"""Ustawia to miejsce pracy jako podstawowe i wszystkie pozostałe jako nie-podstawowe"""
Autor_Jednostka.objects.filter(
autor=self.autor, podstawowe_miejsce_pracy=True
).exclude(pk=self.pk).update(podstawowe_miejsce_pracy=False)
self.podstawowe_miejsce_pracy = True
self.save()
class Meta:
verbose_name = "powiązanie autor-jednostka"
verbose_name_plural = "powiązania autor-jednostka"
ordering = ["autor__nazwisko", "rozpoczal_prace", "jednostka__nazwa"]
unique_together = [("autor", "jednostka", "rozpoczal_prace")]
app_label = "bpp"
| StarcoderdataPython |
34154 | <filename>pyxb/binding/content.py
# Copyright 2009, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Helper classes that maintain the content model of XMLSchema in the binding
classes.
L{AttributeUse} and L{ElementUse} record information associated with a binding
class, for example the types of values, the original XML QName or NCName, and
the Python field in which the values are stored. They also provide the
low-level interface to set and get the corresponding values in a binding
instance.
@todo: Document new content model
L{Wildcard} holds content-related information used in the content model.
"""
import pyxb
import pyxb.namespace
import basis
import xml.dom
class ContentState_mixin (pyxb.cscRoot):
"""Declares methods used by classes that hold state while validating a
content model component."""
def accepts (self, particle_state, instance, value, element_use):
"""Determine whether the provided value can be added to the instance
without violating state validation.
This method must not throw any non-catastrophic exceptions; general
failures should be transformed to a C{False} return value.
@param particle_state: The L{ParticleState} instance serving as the
parent to this state. The implementation must inform that state when
the proposed value completes the content model.
@param instance: An instance of a subclass of
{basis.complexTypeDefinition}, into which the provided value will be
stored if it is consistent with the current model state.
@param value: The value that is being validated against the state.
@param element_use: An optional L{ElementUse} instance that specifies
the element to which the value corresponds. This will be available
when the value is extracted by parsing a document, but will be absent
if the value was passed as a constructor positional parameter.
@return: C{True} if the value was successfully matched against the
state. C{False} if the value did not match against the state."""
raise Exception('ContentState_mixin.accepts not implemented in %s' % (type(self),))
def notifyFailure (self, sub_state, particle_ok):
"""Invoked by a sub-state to indicate that validation cannot proceed
in the current state.
Normally this is used when an intermediate content model must reset
itself to permit alternative models to be evaluated.
@param sub_state: the state that was unable to accept a value
@param particle_ok: C{True} if the particle that rejected the value is
in an accepting terminal state
"""
raise Exception('ContentState_mixin.notifyFailure not implemented in %s' % (type(self),))
def _verifyComplete (self, parent_particle_state):
"""Determine whether the deep state is complete without further elements.
No-op for non-aggregate state. For aggregate state, all contained
particles should be checked to see whether the overall model can be
satisfied if no additional elements are provided.
This method does not have a meaningful return value; violations of the
content model should produce the corresponding exception (generally,
L{MissingContentError}).
@param parent_particle_state: the L{ParticleState} for which this state
is the term.
"""
pass
class ContentModel_mixin (pyxb.cscRoot):
"""Declares methods used by classes representing content model components."""
def newState (self, parent_particle_state):
"""Return a L{ContentState_mixin} instance that will validate the
state of this model component.
@param parent_particle_state: The L{ParticleState} instance for which
this instance is a term. C{None} for the top content model of a
complex data type.
"""
raise Exception('ContentModel_mixin.newState not implemented in %s' % (type(self),))
def _validateCloneSymbolSet (self, symbol_set_im):
"""Create a mutable copy of the symbol set.
The top-level map is copied, as are the lists of values. The values
within the lists are unchanged, as validation does not affect them."""
rv = symbol_set_im.copy()
for (k, v) in rv.items():
rv[k] = v[:]
return rv
def _validateCloneOutputSequence (self, output_sequence_im):
return output_sequence_im[:]
def _validateReplaceResults (self, symbol_set_out, symbol_set_new, output_sequence_out, output_sequence_new):
"""In-place update of symbol set and output sequence structures.
Use this to copy from temporary mutable structures updated by local
validation into the structures that were passed in once the validation
has succeeded."""
symbol_set_out.clear()
symbol_set_out.update(symbol_set_new)
output_sequence_out[:] = output_sequence_new
def _validate (self, symbol_set, output_sequence):
"""Determine whether an output sequence created from the symbols can
be made consistent with the model.
The symbol set represents letters in an alphabet; the output sequence
orders those letters in a way that satisfies the regular expression
expressed in the model. Both are changed as a result of a successful
validation; both remain unchanged if the validation failed. In
recursing, implementers may assume that C{output_sequence} is
monotonic: its length remains unchanged after an invocation iff the
symbol set also remains unchanged. The L{_validateCloneSymbolSet},
L{_validateCloneOutputSequence}, and L{_validateReplaceResults}
methods are available to help preserve this behavior.
@param symbol_set: A map from L{ElementUse} instances to a list of
values. The order of the values corresponds to the order in which
they should appear. A key of C{None} identifies values that are
stored as wildcard elements. Values are removed from the lists as
they are used; when the last value of a list is removed, its key is
removed from the map. Thus an empty dictionary is the indicator that
no more symbols are available.
@param output_sequence: A mutable list to which should be appended
tuples C{( eu, val )} where C{eu} is an L{ElementUse} from the set of
symbol keys, and C{val} is a value from the corresponding list. A
document generated by producing the elements in the given order is
expected to validate.
@return: C{True} iff the model validates. C{symbol_set} and
C{output_path} must be unmodified if returns C{False}.
"""
raise Exception('ContentState_mixin._validate not implemented in %s' % (type(self),))
class AttributeUse (pyxb.cscRoot):
"""A helper class that encapsulates everything we need to know
about the way an attribute is used within a binding class.
Attributes are stored internally as pairs C{(provided, value)}, where
C{provided} is a boolean indicating whether a value for the attribute was
provided externally, and C{value} is an instance of the attribute
datatype. The C{provided} flag is used to determine whether an XML
attribute should be added to a created DOM node when generating the XML
corresponding to a binding instance.
"""
__name = None # ExpandedName of the attribute
__id = None # Identifier used for this attribute within the owning class
__key = None # Private attribute used in instances to hold the attribute value
__dataType = None # PST datatype
__unicodeDefault = None # Default value as a unicode string, or None
__defaultValue = None # Default value as an instance of datatype, or None
__fixed = False # If True, value cannot be changed
__required = False # If True, attribute must appear
__prohibited = False # If True, attribute must not appear
def __init__ (self, name, id, key, data_type, unicode_default=None, fixed=False, required=False, prohibited=False):
"""Create an AttributeUse instance.
@param name: The name by which the attribute is referenced in the XML
@type name: L{pyxb.namespace.ExpandedName}
@param id: The Python identifier for the attribute within the
containing L{pyxb.basis.binding.complexTypeDefinition}. This is a
public identifier, derived from the local part of the attribute name
and modified to be unique, and is usually used as the name of the
attribute's inspector method.
@type id: C{str}
@param key: The string used to store the attribute
value in the dictionary of the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is mangled so
that it is unique among and is treated as a Python private member.
@type key: C{str}
@param data_type: The class reference to the subclass of
L{pyxb.binding.basis.simpleTypeDefinition} of which the attribute
values must be instances.
@type data_type: C{type}
@keyword unicode_default: The default value of the attribute as
specified in the schema, or None if there is no default attribute
value. The default value (of the keyword) is C{None}.
@type unicode_default: C{unicode}
@keyword fixed: If C{True}, indicates that the attribute, if present,
must have the value that was given via C{unicode_default}. The
default value is C{False}.
@type fixed: C{bool}
@keyword required: If C{True}, indicates that the attribute must appear
in the DOM node used to create an instance of the corresponding
L{pyxb.binding.basis.complexTypeDefinition}. The default value is
C{False}. No more that one of L{required} and L{prohibited} should be
assigned C{True}.
@type required: C{bool}
@keyword prohibited: If C{True}, indicates that the attribute must
B{not} appear in the DOM node used to create an instance of the
corresponding L{pyxb.binding.basis.complexTypeDefinition}. The
default value is C{False}. No more that one of L{required} and
L{prohibited} should be assigned C{True}.
@type prohibited: C{bool}
@raise pyxb.BadTypeValueError: the L{unicode_default} cannot be used
to initialize an instance of L{data_type}
"""
self.__name = name
self.__id = id
self.__key = key
self.__dataType = data_type
self.__unicodeDefault = unicode_default
if self.__unicodeDefault is not None:
self.__defaultValue = self.__dataType.Factory(self.__unicodeDefault)
self.__fixed = fixed
self.__required = required
self.__prohibited = prohibited
def name (self):
"""The expanded name of the element.
@rtype: L{pyxb.namespace.ExpandedName}
"""
return self.__name
def defaultValue (self):
"""The default value of the attribute."""
return self.__defaultValue
def fixed (self):
"""C{True} iff the value of the attribute cannot be changed."""
return self.__fixed
def required (self):
"""Return True iff the attribute must be assigned a value."""
return self.__required
def prohibited (self):
"""Return True iff the attribute must not be assigned a value."""
return self.__prohibited
def provided (self, ctd_instance):
"""Return True iff the given instance has been explicitly given a
value for the attribute.
This is used for things like only generating an XML attribute
assignment when a value was originally given (even if that value
happens to be the default).
"""
return self.__getProvided(ctd_instance)
def id (self):
"""Tag used within Python code for the attribute.
This is not used directly in the default code generation template."""
return self.__id
def key (self):
"""String used as key within object dictionary when storing attribute value."""
return self.__key
def dataType (self):
"""The subclass of L{pyxb.binding.basis.simpleTypeDefinition} of which any attribute value must be an instance."""
return self.__dataType
def __getValue (self, ctd_instance):
"""Retrieve the value information for this attribute in a binding instance.
@param ctd_instance: The instance object from which the attribute is to be retrieved.
@type ctd_instance: subclass of L{pyxb.binding.basis.complexTypeDefinition}
@return: C{(provided, value)} where C{provided} is a C{bool} and
C{value} is C{None} or an instance of the attribute's datatype.
"""
return getattr(ctd_instance, self.__key, (False, None))
def __getProvided (self, ctd_instance):
return self.__getValue(ctd_instance)[0]
def value (self, ctd_instance):
"""Get the value of the attribute from the instance."""
return self.__getValue(ctd_instance)[1]
def __setValue (self, ctd_instance, new_value, provided):
return setattr(ctd_instance, self.__key, (provided, new_value))
def reset (self, ctd_instance):
"""Set the value of the attribute in the given instance to be its
default value, and mark that it has not been provided."""
self.__setValue(ctd_instance, self.__defaultValue, False)
def addDOMAttribute (self, dom_support, ctd_instance, element):
"""If this attribute as been set, add the corresponding attribute to the DOM element."""
( provided, value ) = self.__getValue(ctd_instance)
if provided:
assert value is not None
dom_support.addAttribute(element, self.__name, value.xsdLiteral())
return self
def validate (self, ctd_instance):
(provided, value) = self.__getValue(ctd_instance)
if value is not None:
if self.__prohibited:
raise pyxb.ProhibitedAttributeError('Value given for prohibited attribute %s' % (self.__name,))
if self.__required and not provided:
assert self.__fixed
raise pyxb.MissingAttributeError('Fixed required attribute %s was never set' % (self.__name,))
if not self.__dataType._IsValidValue(value):
raise pyxb.BindingValidationError('Attribute %s value type %s not %s' % (self.__name, type(value), self.__dataType))
self.__dataType.XsdConstraintsOK(value)
else:
if self.__required:
raise pyxb.MissingAttributeError('Required attribute %s does not have a value' % (self.__name,))
return True
def set (self, ctd_instance, new_value):
"""Set the value of the attribute.
This validates the value against the data type, creating a new instance if necessary.
@param ctd_instance: The binding instance for which the attribute
value is to be set
@type ctd_instance: subclass of L{pyxb.binding.basis.complexTypeDefinition}
@param new_value: The value for the attribute
@type new_value: An C{xml.dom.Node} instance, or any value that is
permitted as the input parameter to the C{Factory} method of the
attribute's datatype.
"""
provided = True
if isinstance(new_value, xml.dom.Node):
unicode_value = self.__name.getAttribute(new_value)
if unicode_value is None:
if self.__required:
raise pyxb.MissingAttributeError('Required attribute %s from %s not found' % (self.__name, ctd_instance._ExpandedName or type(ctd_instance)))
provided = False
unicode_value = self.__unicodeDefault
if unicode_value is None:
# Must be optional and absent
provided = False
new_value = None
else:
new_value = unicode_value
else:
assert new_value is not None
if self.__prohibited:
raise pyxb.ProhibitedAttributeError('Value given for prohibited attribute %s' % (self.__name,))
if (new_value is not None) and (not isinstance(new_value, self.__dataType)):
new_value = self.__dataType.Factory(new_value)
if self.__fixed and (new_value != self.__defaultValue):
raise pyxb.AttributeChangeError('Attempt to change value of fixed attribute %s' % (self.__name,))
self.__setValue(ctd_instance, new_value, provided)
return new_value
def _description (self, name_only=False, user_documentation=True):
if name_only:
return str(self.__name)
assert issubclass(self.__dataType, basis._TypeBinding_mixin)
desc = [ str(self.__id), ': ', str(self.__name), ' (', self.__dataType._description(name_only=True, user_documentation=False), '), ' ]
if self.__required:
desc.append('required')
elif self.__prohibited:
desc.append('prohibited')
else:
desc.append('optional')
if self.__defaultValue is not None:
desc.append(', ')
if self.__fixed:
desc.append('fixed')
else:
desc.append('default')
desc.extend(['=', self.__unicodeDefault ])
return ''.join(desc)
class ElementUse (ContentState_mixin, ContentModel_mixin):
"""Aggregate the information relevant to an element of a complex type.
This includes the L{original tag name<name>}, the spelling of L{the
corresponding object in Python <id>}, an L{indicator<isPlural>} of whether
multiple instances might be associated with the field, and other relevant
information..
"""
def name (self):
"""The expanded name of the element.
@rtype: L{pyxb.namespace.ExpandedName}
"""
return self.__name
__name = None
def id (self):
"""The string name of the binding class field used to hold the element
values.
This is the user-visible name, and excepting disambiguation will be
equal to the local name of the element."""
return self.__id
__id = None
# The dictionary key used to identify the value of the element. The value
# is the same as that used for private member variables in the binding
# class within which the element declaration occurred.
__key = None
def elementBinding (self):
"""The L{basis.element} instance identifying the information
associated with the element declaration.
"""
return self.__elementBinding
def _setElementBinding (self, element_binding):
# Set the element binding for this use. Only visible at all because
# we have to define the uses before the element instances have been
# created.
self.__elementBinding = element_binding
return self
__elementBinding = None
def isPlural (self):
"""True iff the content model indicates that more than one element
can legitimately belong to this use.
This includes elements in particles with maxOccurs greater than one,
and when multiple elements with the same NCName are declared in the
same type.
"""
return self.__isPlural
__isPlural = False
def __init__ (self, name, id, key, is_plural, element_binding=None):
"""Create an ElementUse instance.
@param name: The name by which the element is referenced in the XML
@type name: L{pyxb.namespace.ExpandedName}
@param id: The Python name for the element within the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is a public
identifier, albeit modified to be unique, and is usually used as the
name of the element's inspector method or property.
@type id: C{str}
@param key: The string used to store the element
value in the dictionary of the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is mangled so
that it is unique among and is treated as a Python private member.
@type key: C{str}
@param is_plural: If C{True}, documents for the corresponding type may
have multiple instances of this element. As a consequence, the value
of the element will be a list. If C{False}, the value will be C{None}
if the element is absent, and a reference to an instance of the type
identified by L{pyxb.binding.basis.element.typeDefinition} if present.
@type is_plural: C{bool}
@param element_binding: Reference to the class that serves as the
binding for the element.
"""
self.__name = name
self.__id = id
self.__key = key
self.__isPlural = is_plural
self.__elementBinding = element_binding
def defaultValue (self):
"""Return the default value for this element.
@todo: Right now, this returns C{None} for non-plural and an empty
list for plural elements. Need to support schema-specified default
values for simple-type content.
"""
if self.isPlural():
return []
return None
def value (self, ctd_instance):
"""Return the value for this use within the given instance."""
return getattr(ctd_instance, self.__key, self.defaultValue())
def reset (self, ctd_instance):
"""Set the value for this use in the given element to its default."""
setattr(ctd_instance, self.__key, self.defaultValue())
return self
def set (self, ctd_instance, value):
"""Set the value of this element in the given instance."""
if value is None:
return self.reset(ctd_instance)
assert self.__elementBinding is not None
if basis._TypeBinding_mixin._PerformValidation:
value = self.__elementBinding.compatibleValue(value, is_plural=self.isPlural())
setattr(ctd_instance, self.__key, value)
ctd_instance._addContent(value, self.__elementBinding)
return self
def setOrAppend (self, ctd_instance, value):
"""Invoke either L{set} or L{append}, depending on whether the element
use is plural."""
if self.isPlural():
return self.append(ctd_instance, value)
return self.set(ctd_instance, value)
def append (self, ctd_instance, value):
"""Add the given value as another instance of this element within the binding instance.
@raise pyxb.StructuralBadDocumentError: invoked on an element use that is not plural
"""
if not self.isPlural():
raise pyxb.StructuralBadDocumentError('Cannot append to element with non-plural multiplicity')
values = self.value(ctd_instance)
if basis._TypeBinding_mixin._PerformValidation:
value = self.__elementBinding.compatibleValue(value)
values.append(value)
ctd_instance._addContent(value, self.__elementBinding)
return values
def toDOM (self, dom_support, parent, value):
"""Convert the given value to DOM as an instance of this element.
@param dom_support: Helper for managing DOM properties
@type dom_support: L{pyxb.utils.domutils.BindingDOMSupport}
@param parent: The DOM node within which this element should be generated.
@type parent: C{xml.dom.Element}
@param value: The content for this element. May be text (if the
element allows mixed content), or an instance of
L{basis._TypeBinding_mixin}.
"""
if isinstance(value, basis._TypeBinding_mixin):
element_binding = self.__elementBinding
if value._substitutesFor(element_binding):
element_binding = value._element()
assert element_binding is not None
if element_binding.abstract():
raise pyxb.DOMGenerationError('Element %s is abstract but content %s not associated with substitution group member' % (self.name(), value))
element = dom_support.createChildElement(element_binding.name(), parent)
elt_type = element_binding.typeDefinition()
val_type = type(value)
if isinstance(value, basis.complexTypeDefinition):
assert isinstance(value, elt_type)
else:
if isinstance(value, basis.STD_union) and isinstance(value, elt_type._MemberTypes):
val_type = elt_type
if dom_support.requireXSIType() or elt_type._RequireXSIType(val_type):
val_type_qname = value._ExpandedName.localName()
tns_prefix = dom_support.namespacePrefix(value._ExpandedName.namespace())
if tns_prefix is not None:
val_type_qname = '%s:%s' % (tns_prefix, val_type_qname)
dom_support.addAttribute(element, pyxb.namespace.XMLSchema_instance.createExpandedName('type'), val_type_qname)
value._toDOM_csc(dom_support, element)
elif isinstance(value, (str, unicode)):
element = dom_support.createChildElement(self.name(), parent)
element.appendChild(dom_support.document().createTextNode(value))
else:
raise pyxb.LogicError('toDOM with unrecognized value type %s: %s' % (type(value), value))
def _description (self, name_only=False, user_documentation=True):
if name_only:
return str(self.__name)
desc = [ str(self.__id), ': ']
if self.isPlural():
desc.append('MULTIPLE ')
desc.append(self.elementBinding()._description(user_documentation=user_documentation))
return ''.join(desc)
def newState (self, parent_particle_state):
"""Implement parent class method."""
return self
def accepts (self, particle_state, instance, value, element_use):
rv = self._accepts(instance, value, element_use)
if rv:
particle_state.incrementCount()
return rv
def _accepts (self, instance, value, element_use):
if element_use == self:
self.setOrAppend(instance, value)
return True
if element_use is not None:
# If there's a known element, and it's not this one, the content
# does not match. This assumes we handled xsi:type and
# substitution groups earlier, which may be true.
return False
if isinstance(value, xml.dom.Node):
# If we haven't been able to identify an element for this before,
# then we don't recognize it, and will have to treat it as a
# wildcard.
return False
try:
self.setOrAppend(instance, self.__elementBinding.compatibleValue(value, _convert_string_values=False))
return True
except pyxb.BadTypeValueError, e:
pass
#print '%s %s %s in %s' % (instance, value, element_use, self)
return False
def _validate (self, symbol_set, output_sequence):
values = symbol_set.get(self)
#print 'values %s' % (values,)
if values is None:
return False
used = values.pop(0)
output_sequence.append( (self, used) )
if 0 == len(values):
del symbol_set[self]
return True
def __str__ (self):
return 'EU.%s@%x' % (self.__name, id(self))
class Wildcard (ContentState_mixin, ContentModel_mixin):
"""Placeholder for wildcard objects."""
NC_any = '##any' #<<< The namespace constraint "##any"
NC_not = '##other' #<<< A flag indicating constraint "##other"
NC_targetNamespace = '##targetNamespace'
NC_local = '##local'
__namespaceConstraint = None
def namespaceConstraint (self):
"""A constraint on the namespace for the wildcard.
Valid values are:
- L{Wildcard.NC_any}
- A tuple ( L{Wildcard.NC_not}, a L{namespace<pyxb.namespace.Namespace>} instance )
- set(of L{namespace<pyxb.namespace.Namespace>} instances)
Namespaces are represented by their URIs. Absence is
represented by None, both in the "not" pair and in the set.
"""
return self.__namespaceConstraint
PC_skip = 'skip' #<<< No constraint is applied
PC_lax = 'lax' #<<< Validate against available uniquely determined declaration
PC_strict = 'strict' #<<< Validate against declaration or xsi:type which must be available
# One of PC_*
__processContents = None
def processContents (self): return self.__processContents
def __normalizeNamespace (self, nsv):
if nsv is None:
return None
if isinstance(nsv, basestring):
nsv = pyxb.namespace.NamespaceForURI(nsv, create_if_missing=True)
assert isinstance(nsv, pyxb.namespace.Namespace), 'unexpected non-namespace %s' % (nsv,)
return nsv
def __init__ (self, *args, **kw):
# Namespace constraint and process contents are required parameters.
nsc = kw['namespace_constraint']
if isinstance(nsc, tuple):
nsc = (nsc[0], self.__normalizeNamespace(nsc[1]))
elif isinstance(nsc, set):
nsc = set([ self.__normalizeNamespace(_uri) for _uri in nsc ])
self.__namespaceConstraint = nsc
self.__processContents = kw['process_contents']
def matches (self, instance, value):
"""Return True iff the value is a valid match against this wildcard.
Validation per U{Wildcard allows Namespace Name<http://www.w3.org/TR/xmlschema-1/#cvc-wildcard-namespace>}.
"""
ns = None
if isinstance(value, xml.dom.Node):
if value.namespaceURI is not None:
ns = pyxb.namespace.NamespaceForURI(value.namespaceURI)
elif isinstance(value, basis._TypeBinding_mixin):
elt = value._element()
if elt is not None:
ns = elt.name().namespace()
else:
ns = value._ExpandedName.namespace()
else:
raise pyxb.LogicError('Need namespace from value')
if isinstance(ns, pyxb.namespace.Namespace) and ns.isAbsentNamespace():
ns = None
if self.NC_any == self.__namespaceConstraint:
return True
if isinstance(self.__namespaceConstraint, tuple):
(_, constrained_ns) = self.__namespaceConstraint
assert self.NC_not == _
if ns is None:
return False
if constrained_ns == ns:
return False
return True
return ns in self.__namespaceConstraint
def newState (self, parent_particle_state):
return self
def accepts (self, particle_state, instance, value, element_use):
if isinstance(value, xml.dom.Node):
value_desc = 'value in %s' % (value.nodeName,)
else:
value_desc = 'value of type %s' % (type(value),)
if not self.matches(instance, value):
return False
if not isinstance(value, basis._TypeBinding_mixin):
print 'NOTE: Created unbound wildcard element from %s' % (value_desc,)
assert isinstance(instance.wildcardElements(), list), 'Uninitialized wildcard list in %s' % (instance._ExpandedName,)
instance._appendWildcardElement(value)
particle_state.incrementCount()
return True
def _validate (self, symbol_set, output_sequence):
# @todo check node against namespace constraint and process contents
#print 'WARNING: Accepting node as wildcard match without validating.'
wc_values = symbol_set.get(None)
if wc_values is None:
return False
used = wc_values.pop(0)
output_sequence.append( (None, used) )
if 0 == len(wc_values):
del symbol_set[None]
return True
class SequenceState (ContentState_mixin):
__failed = False
__satisfied = False
def __init__ (self, group, parent_particle_state):
super(SequenceState, self).__init__(group)
self.__sequence = group
self.__parentParticleState = parent_particle_state
self.__particles = group.particles()
self.__index = -1
self.__satisfied = False
self.__failed = False
self.notifyFailure(None, False)
#print 'SS.CTOR %s: %d elts' % (self, len(self.__particles))
def accepts (self, particle_state, instance, value, element_use):
assert self.__parentParticleState == particle_state
assert not self.__failed
#print 'SS.ACC %s: %s %s %s' % (self, instance, value, element_use)
while self.__particleState is not None:
(consume, underflow_exc) = self.__particleState.step(instance, value, element_use)
if consume:
return True
if underflow_exc is not None:
self.__failed = True
raise underflow_exc
return False
def _verifyComplete (self, parent_particle_state):
while self.__particleState is not None:
self.__particleState.verifyComplete()
def notifyFailure (self, sub_state, particle_ok):
self.__index += 1
self.__particleState = None
if self.__index < len(self.__particles):
self.__particleState = ParticleState(self.__particles[self.__index], self)
else:
self.__satisfied = particle_ok
if particle_ok:
self.__parentParticleState.incrementCount()
#print 'SS.NF %s: %d %s %s' % (self, self.__index, particle_ok, self.__particleState)
class ChoiceState (ContentState_mixin):
def __init__ (self, group, parent_particle_state):
self.__parentParticleState = parent_particle_state
super(ChoiceState, self).__init__(group)
self.__choices = [ ParticleState(_p, self) for _p in group.particles() ]
self.__activeChoice = None
#print 'CS.CTOR %s: %d choices' % (self, len(self.__choices))
def accepts (self, particle_state, instance, value, element_use):
#print 'CS.ACC %s %s: %s %s %s' % (self, self.__activeChoice, instance, value, element_use)
if self.__activeChoice is None:
for choice in self.__choices:
#print 'CS.ACC %s candidate %s' % (self, choice)
try:
(consume, underflow_exc) = choice.step(instance, value, element_use)
except Exception, e:
consume = False
underflow_exc = e
#print 'CS.ACC %s: candidate %s : %s' % (self, choice, consume)
if consume:
self.__activeChoice = choice
self.__choices = None
return True
return False
(consume, underflow_exc) = self.__activeChoice.step(instance, value, element_use)
#print 'CS.ACC %s : active choice %s %s %s' % (self, self.__activeChoice, consume, underflow_exc)
if consume:
return True
if underflow_exc is not None:
self.__failed = True
raise underflow_exc
return False
def _verifyComplete (self, parent_particle_state):
rv = True
#print 'CS.VC %s: %s' % (self, self.__activeChoice)
if self.__activeChoice is None:
# Use self.__activeChoice as the iteration value so that it's
# non-None when notifyFailure is invoked.
for self.__activeChoice in self.__choices:
try:
#print 'CS.VC: try %s' % (self.__activeChoice,)
self.__activeChoice.verifyComplete()
return
except Exception, e:
pass
#print 'Missing components %s' % ("\n".join([ "\n ".join([str(_p2.term()) for _p2 in _p.particle().term().particles()]) for _p in self.__choices ]),)
raise pyxb.MissingContentError('choice')
self.__activeChoice.verifyComplete()
def notifyFailure (self, sub_state, particle_ok):
#print 'CS.NF %s %s' % (self, particle_ok)
if particle_ok and (self.__activeChoice is not None):
self.__parentParticleState.incrementCount()
pass
class AllState (ContentState_mixin):
__activeChoice = None
__needRetry = False
def __init__ (self, group, parent_particle_state):
self.__parentParticleState = parent_particle_state
super(AllState, self).__init__(group)
self.__choices = set([ ParticleState(_p, self) for _p in group.particles() ])
#print 'AS.CTOR %s: %d choices' % (self, len(self.__choices))
def accepts (self, particle_state, instance, value, element_use):
#print 'AS.ACC %s %s: %s %s %s' % (self, self.__activeChoice, instance, value, element_use)
self.__needRetry = True
while self.__needRetry:
self.__needRetry = False
if self.__activeChoice is None:
for choice in self.__choices:
#print 'AS.ACC %s candidate %s' % (self, choice)
try:
(consume, underflow_exc) = choice.step(instance, value, element_use)
except Exception, e:
consume = False
underflow_exc = e
#print 'AS.ACC %s: candidate %s : %s' % (self, choice, consume)
if consume:
self.__activeChoice = choice
self.__choices.discard(self.__activeChoice)
return True
return False
(consume, underflow_exc) = self.__activeChoice.step(instance, value, element_use)
#print 'AS.ACC %s : active choice %s %s %s' % (self, self.__activeChoice, consume, underflow_exc)
if consume:
return True
if underflow_exc is not None:
self.__failed = True
raise underflow_exc
return False
def _verifyComplete (self, parent_particle_state):
#print 'AS.VC %s: %s, %d left' % (self, self.__activeChoice, len(self.__choices))
if self.__activeChoice is not None:
self.__activeChoice.verifyComplete()
while self.__choices:
self.__activeChoice = self.__choices.pop()
self.__activeChoice.verifyComplete()
def notifyFailure (self, sub_state, particle_ok):
#print 'AS.NF %s %s' % (self, particle_ok)
self.__needRetry = True
self.__activeChoice = None
if particle_ok and (0 == len(self.__choices)):
self.__parentParticleState.incrementCount()
class ParticleState (pyxb.cscRoot):
def __init__ (self, particle, parent_state=None):
self.__particle = particle
self.__parentState = parent_state
self.__count = -1
#print 'PS.CTOR %s: particle %s' % (self, particle)
self.incrementCount()
def particle (self): return self.__particle
def incrementCount (self):
#print 'PS.IC %s' % (self,)
self.__count += 1
self.__termState = self.__particle.term().newState(self)
self.__tryAccept = True
def verifyComplete (self):
# @TODO@ Set a flag so we can make verifyComplete safe to call
# multiple times?
#print 'PS.VC %s entry' % (self,)
if not self.__particle.satisfiesOccurrences(self.__count):
self.__termState._verifyComplete(self)
if not self.__particle.satisfiesOccurrences(self.__count):
print 'PS.VC %s incomplete' % (self,)
raise pyxb.MissingContentError('incomplete')
if self.__parentState is not None:
self.__parentState.notifyFailure(self, True)
def step (self, instance, value, element_use):
"""Attempt to apply the value as a new instance of the particle's term.
The L{ContentState_mixin} created for the particle's term is consulted
to determine whether the instance can accept the given value. If so,
the particle's maximum occurrence limit is checked; if not, and the
particle has a parent state, it is informed of the failure.
@param instance: An instance of a subclass of
{basis.complexTypeDefinition}, into which the provided value will be
stored if it is consistent with the current model state.
@param value: The value that is being validated against the state.
@param element_use: An optional L{ElementUse} instance that specifies
the element to which the value corresponds. This will be available
when the value is extracted by parsing a document, but will be absent
if the value was passed as a constructor positional parameter.
@return: C{( consumed, underflow_exc )} A tuple where the first element
is C{True} iff the provided value was accepted in the current state.
When this first element is C{False}, the second element will be
C{None} if the particle's occurrence requirements have been met, and
is an instance of C{MissingElementError} if the observed number of
terms is less than the minimum occurrence count. Depending on
context, the caller may raise this exception, or may try an
alternative content model.
@raise pyxb.UnexpectedElementError: if the value satisfies the particle,
but having done so exceeded the allowable number of instances of the
term.
"""
#print 'PS.STEP %s: %s %s %s' % (self, instance, value, element_use)
# Only try if we're not already at the upper limit on occurrences
consumed = False
underflow_exc = None
# We can try the value against the term if we aren't at the maximum
# count for the term. Also, if we fail to consume, but as a side
# effect of the test the term may have reset itself, we can try again.
self.__tryAccept = True
while self.__tryAccept and (self.__count != self.__particle.maxOccurs()):
self.__tryAccept = False
consumed = self.__termState.accepts(self, instance, value, element_use)
#print 'PS.STEP %s: ta %s %s' % (self, self.__tryAccept, consumed)
self.__tryAccept = self.__tryAccept and (not consumed)
#print 'PS.STEP %s: %s' % (self, consumed)
if consumed:
if not self.__particle.meetsMaximum(self.__count):
raise pyxb.UnexpectedElementError('too many')
else:
if self.__parentState is not None:
self.__parentState.notifyFailure(self, self.__particle.satisfiesOccurrences(self.__count))
if not self.__particle.meetsMinimum(self.__count):
# @TODO@ Use better exception; changing this will require
# changing some unit tests.
#underflow_exc = pyxb.MissingElementError('too few')
underflow_exc = pyxb.UnrecognizedContentError('too few')
return (consumed, underflow_exc)
def __str__ (self):
particle = self.__particle
return 'ParticleState(%d:%d,%s:%s)@%x' % (self.__count, particle.minOccurs(), particle.maxOccurs(), particle.term(), id(self))
class ParticleModel (ContentModel_mixin):
"""Content model dealing with particles: terms with occurrence restrictions"""
def minOccurs (self): return self.__minOccurs
def maxOccurs (self): return self.__maxOccurs
def term (self): return self.__term
def meetsMaximum (self, count):
"""@return: C{True} iff there is no maximum on term occurrence, or the
provided count does not exceed that maximum"""
return (self.__maxOccurs is None) or (count <= self.__maxOccurs)
def meetsMinimum (self, count):
"""@return: C{True} iff the provided count meets the minimum number of
occurrences"""
return count >= self.__minOccurs
def satisfiesOccurrences (self, count):
"""@return: C{True} iff the provided count satisfies the occurrence
requirements"""
return self.meetsMinimum(count) and self.meetsMaximum(count)
def __init__ (self, term, min_occurs=1, max_occurs=1):
self.__term = term
self.__minOccurs = min_occurs
self.__maxOccurs = max_occurs
def newState (self):
return ParticleState(self)
def validate (self, symbol_set):
"""Determine whether the particle requirements are satisfiable by the
given symbol set.
The symbol set represents letters in an alphabet. If those letters
can be combined in a way that satisfies the regular expression
expressed in the model, a satisfying sequence is returned and the
symbol set is reduced by the letters used to form the sequence. If
the content model cannot be satisfied, C{None} is returned and the
symbol set remains unchanged.
@param symbol_set: A map from L{ElementUse} instances to a list of
values. The order of the values corresponds to the order in which
they should appear. A key of C{None} identifies values that are
stored as wildcard elements. Values are removed from the lists as
they are used; when the last value of a list is removed, its key is
removed from the map. Thus an empty dictionary is the indicator that
no more symbols are available.
@return: returns C{None}, or a list of tuples C{( eu, val )} where
C{eu} is an L{ElementUse} from the set of symbol keys, and C{val} is a
value from the corresponding list.
"""
output_sequence = []
#print 'Start: %d %s %s : %s' % (self.__minOccurs, self.__maxOccurs, self.__term, symbol_set)
result = self._validate(symbol_set, output_sequence)
#print 'End: %s %s %s' % (result, symbol_set, output_sequence)
if result:
return (symbol_set, output_sequence)
return None
def _validate (self, symbol_set, output_sequence):
symbol_set_mut = self._validateCloneSymbolSet(symbol_set)
output_sequence_mut = self._validateCloneOutputSequence(output_sequence)
count = 0
#print 'VAL start %s: %d %s' % (self.__term, self.__minOccurs, self.__maxOccurs)
last_size = len(output_sequence_mut)
while (count != self.__maxOccurs) and self.__term._validate(symbol_set_mut, output_sequence_mut):
#print 'VAL %s old cnt %d, left %s' % (self.__term, count, symbol_set_mut)
this_size = len(output_sequence_mut)
if this_size == last_size:
# Validated without consuming anything. Assume we can
# continue to do so, jump to the minimum, and exit.
if count < self.__minOccurs:
count = self.__minOccurs
break
count += 1
last_size = this_size
result = self.satisfiesOccurrences(count)
if (result):
self._validateReplaceResults(symbol_set, symbol_set_mut, output_sequence, output_sequence_mut)
#print 'VAL end PRT %s res %s: %s %s %s' % (self.__term, result, self.__minOccurs, count, self.__maxOccurs)
return result
class _Group (ContentModel_mixin):
"""Base class for content information pertaining to a U{model
group<http://www.w3.org/TR/xmlschema-1/#Model_Groups>}.
There is a specific subclass for each group compositor.
"""
_StateClass = None
"""A reference to a L{ContentState_mixin} class that maintains state when
validating an instance of this group."""
def particles (self): return self.__particles
def __init__ (self, *particles):
self.__particles = particles
def newState (self, parent_particle_state):
return self._StateClass(self, parent_particle_state)
# All and Sequence share the same validation code, so it's up here.
def _validate (self, symbol_set, output_sequence):
symbol_set_mut = self._validateCloneSymbolSet(symbol_set)
output_sequence_mut = self._validateCloneOutputSequence(output_sequence)
for p in self.particles():
if not p._validate(symbol_set_mut, output_sequence_mut):
return False
self._validateReplaceResults(symbol_set, symbol_set_mut, output_sequence, output_sequence_mut)
return True
class GroupChoice (_Group):
_StateClass = ChoiceState
# Choice requires a different validation algorithm
def _validate (self, symbol_set, output_sequence):
reset_mutables = True
for p in self.particles():
if reset_mutables:
symbol_set_mut = self._validateCloneSymbolSet(symbol_set)
output_sequence_mut = self._validateCloneOutputSequence(output_sequence)
if p._validate(symbol_set_mut, output_sequence_mut):
self._validateReplaceResults(symbol_set, symbol_set_mut, output_sequence, output_sequence_mut)
return True
reset_mutables = len(output_sequence) != len(output_sequence_mut)
return False
class GroupAll (_Group):
_StateClass = AllState
class GroupSequence (_Group):
_StateClass = SequenceState
## Local Variables:
## fill-column:78
## End:
| StarcoderdataPython |
8060179 | <gh_stars>0
""" a module to convert between the old (Python script) segment format,
and the new (JSON) one
"""
from typing import Dict, Tuple # noqa: F401
import os
import ast
import json
def assess_syntax(path):
with open(path) as file_obj:
content = file_obj.read()
syntax_tree = ast.parse(content)
docstring = "" # docstring = ast.get_docstring(syntaxTree)
dct = None
dtype = None
for i, child in enumerate(ast.iter_child_nodes(syntax_tree)):
if (i == 0
and isinstance(child, ast.Expr)
and isinstance(child.value, ast.Str)):
docstring = child.value.s
elif isinstance(child, ast.Assign):
targets = child.targets
if len(targets) > 1:
continue
target = child.targets[0] # type: ast.Name
dtype = target.id
if dtype not in ["tpl_dict", "tplx_dict"]:
continue
if not isinstance(child.value, ast.Dict):
raise ValueError(
"expected {} to be of type Dict: {}".format(
dtype, child.value))
dct = child.value
break
if dct is None:
raise IOError("could not find tpl(x)_dict")
output = {}
for key, value in zip(dct.keys, dct.values):
if not isinstance(key, ast.Str):
raise ValueError(
"expected {} key to be of type Str: {}".format(
dtype, key))
if not isinstance(value, ast.Str):
raise ValueError(
"expected {} value be of type Str: {}".format(
dtype, value))
output[key.s] = value.s
return {
"identifier": os.path.splitext(os.path.basename(path))[0],
"description": docstring,
"segments": output,
"$schema": "../../schema/segment.schema.json"
}
def py_to_json(path, outpath=None):
output = assess_syntax(path)
if outpath:
with open(outpath, "w") as file_obj:
json.dump(output, file_obj, indent=2)
return json.dumps(output, indent=2)
if __name__ == "__main__":
import glob
_dir_path = os.path.dirname(os.path.realpath(__file__))
_ext = ".tpl.json"
for _path in glob.glob(os.path.join(_dir_path, "**", "*.py")):
_name = os.path.splitext(os.path.basename(_path))[0]
_folder = os.path.basename(os.path.dirname(_path))
if _folder == "ipypublish":
_prefix = "ipy-"
else:
_prefix = "std-"
_outpath = os.path.join(os.path.dirname(_path), _prefix+_name+_ext)
py_to_json(_path, _outpath)
| StarcoderdataPython |
1892771 | <gh_stars>0
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import r2_score
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
# X = pd.read_csv('ratios.csv')
# X = X.loc[:, ['Positive', 'Negative', 'Count']]
#
#
# Y = np.loadtxt('price_diff.csv', dtype='float', delimiter=',')
# Y = np.reshape(Y, (415, 1))
#
# sampl = 100
# X_train = X[:-sampl]
# y_train = Y[:-sampl]
#
# X_test = X[-sampl:]
# y_test = Y[-sampl:]
#
# model = LinearRegression()
# model = model.fit(X_train, y_train)
# y_train_pred = model.predict(X_train)
# y_test_pred = model.predict(X_test)
#
# y_train_pred = np.reshape(y_train_pred, (len(y_train_pred),))
# y_test_pred = np.reshape(y_test_pred, (len(y_test_pred),))
#
#
# print(r2_score(y_train, y_train_pred))
#
# print(r2_score(y_test, y_test_pred))
# #model = LinearRegression()
# model = GradientBoostingRegressor()
# model = model.fit(X, Y)
# Y_pred = model.predict(X)
#
# print(r2_score(Y, Y_pred))
data = pd.read_csv('BTCUSDARMeanReversion.csv')
sentiment = pd.read_csv('ratios.csv')
sentiment = sentiment.loc[:, ['Positive', 'Negative', 'Count']]
Y_num = data['Position']
Y = np.where(Y_num < 1, 1, 0)
print(Y)
X = data.drop(['Position', 'OpenTime', 'CloseTime', 'ClosePrice', 'PnL'], axis=1)
X['Positive'] = sentiment['Positive']
X['Negative'] = sentiment['Negative']
X['Count'] = sentiment['Count']
print(X)
sampl = 30
X_train = X[:-sampl]
y_train = Y[:-sampl]
X_test = X[-sampl:]
y_test = Y[-sampl:]
#model = DecisionTreeClassifier()
model = GradientBoostingClassifier()
model = model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
y_train_pred = np.reshape(y_train_pred, (len(y_train_pred),))
y_test_pred = np.reshape(y_test_pred, (len(y_test_pred),))
# print(r2_score(y_train, y_train_pred))
#
# print(r2_score(y_test, y_test_pred))
print("TRAIN")
print(confusion_matrix(y_train, y_train_pred))
print(precision_score(y_train, y_train_pred))
print(recall_score(y_train, y_train_pred))
print("TEST")
print(confusion_matrix(y_test, y_test_pred))
print(precision_score(y_test, y_test_pred))
print(recall_score(y_test, y_test_pred))
print(y_test, y_test_pred, Y_num[-sampl:])
data['Prediction'] = model.predict(X)
data.to_csv('prediction_2.csv')
| StarcoderdataPython |
3576991 | <reponame>code-review-doctor/pythondotorg
# Generated by Django 2.2.24 on 2021-12-23 13:09
from django.db import migrations
from django.utils.text import slugify
def populate_packages_slugs(apps, schema_editor):
SponsorshipPackage = apps.get_model("sponsors", "SponsorshipPackage")
qs = SponsorshipPackage.objects.filter(slug="")
for pkg in qs:
pkg.slug = slugify(pkg.name)
pkg.save()
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0064_sponsorshippackage_slug'),
]
operations = [
migrations.RunPython(populate_packages_slugs, migrations.RunPython.noop)
]
| StarcoderdataPython |
6500890 | #!/usr/bin/env python
import unittest
import numpy as np
from arte.types.slopes import Slopes
class SlopesTest(unittest.TestCase):
def setUp(self):
self._n_slopes = 1600
self._mapx = np.ma.masked_array(
np.arange(self._n_slopes, dtype=np.float32))
self._mapy = np.ma.masked_array(
np.arange(self._n_slopes, dtype=np.float32)) * 10
self._slopes = Slopes(self._mapx, self._mapy)
def testNumpy(self):
slopes = Slopes.fromNumpyArray(self._mapx, self._mapy)
x2, y2 = slopes.toNumpyArray()
self.assertTrue(np.array_equal(self._mapx, x2))
self.assertTrue(np.array_equal(self._mapy, y2))
def testComparison(self):
s1 = Slopes.fromNumpyArray(self._mapx, self._mapy)
mapX2 = self._mapx.copy()
mapY2 = self._mapy.copy()
self.assertTrue(mapX2 is not self._mapx)
self.assertTrue(mapY2 is not self._mapy)
s2 = Slopes.fromNumpyArray(mapX2, mapY2)
self.assertEqual(s1, s2)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
6401337 | from django import forms
from django.contrib.auth.models import User
from .models import Participar
class ParticiparForm(forms.ModelForm):
class Meta:
model = Participar
fields = ['asistir']
| StarcoderdataPython |
8111771 | # Copyright 2004 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from pyplusplus.decl_wrappers import default_call_policies
from pyplusplus.decl_wrappers import return_arg
from pyplusplus.decl_wrappers import return_self
from pyplusplus.decl_wrappers import return_internal_reference
from pyplusplus.decl_wrappers import with_custodian_and_ward
from pyplusplus.decl_wrappers import with_custodian_and_ward_postcall
from pyplusplus.decl_wrappers import return_value_policy_t
from pyplusplus.decl_wrappers import copy_const_reference
from pyplusplus.decl_wrappers import copy_non_const_reference
from pyplusplus.decl_wrappers import manage_new_object
from pyplusplus.decl_wrappers import reference_existing_object
from pyplusplus.decl_wrappers import return_by_value
from pyplusplus.decl_wrappers import return_opaque_pointer
from pyplusplus.decl_wrappers import return_value_policy
from pyplusplus.decl_wrappers import return_pointee_value
from pyplusplus.decl_wrappers import custom_call_policies
from pyplusplus.decl_wrappers import convert_array_to_tuple
from pyplusplus.decl_wrappers import memory_managers
from pyplusplus.decl_wrappers import return_range
from pyplusplus.decl_wrappers import return_range_t
| StarcoderdataPython |
276409 | import base64
import hashlib
import bencodepy
def create_magnet_uri(data: bytes):
# noinspection PyTypeChecker
metadata: dict = bencodepy.decode(data)
subj = metadata[b'info']
hashcontents = bencodepy.encode(subj)
digest = hashlib.sha1(hashcontents).digest()
b32hash = base64.b32encode(digest).decode()
magnet_uri = 'magnet:?' + 'xt=urn:btih:' + b32hash
if b"announce" in metadata:
magnet_uri += ('&tr=' + metadata[b'announce'].decode())
if b"info" in metadata:
metadata_info = metadata[b'info']
if b"name" in metadata_info:
magnet_uri += ('&dn=' + metadata[b'info'][b'name'].decode())
if b"length" in metadata_info:
magnet_uri += ('&xl=' + str(metadata[b'info'][b'length']))
return magnet_uri
| StarcoderdataPython |
1832107 | <gh_stars>0
def solution(s, n):
answer = ''
for c in s:
if c != ' ':
if (ord(c) >= ord('a') and ord(c) <= ord('z')) and ord(c) + n > ord('z'):
answer += chr(ord(c) + n - 26)
elif (ord(c) >= ord('A') and ord(c) <= ord('Z')) and ord(c) + n > ord('Z'):
answer += chr(ord(c) + n - 26)
else:
answer += chr(ord(c) + n)
else:
answer += c
return answer
print(solution('z b', 1)) | StarcoderdataPython |
4859204 | import sublime
import webbrowser
from .common import log, hh_syntax
from .view import find_help_view, update_help_view, focus_on
from .help_index import _load_help_index, _scan_help_packages
from .help import _post_process_links, _resource_for_help
from .help import _load_help_file, _display_help_file, _reload_help_file
from .help import HistoryData, _update_help_history
###----------------------------------------------------------------------------
def load_help_index(index_resource):
"""
Given an index resource that points to a hyperhelp.json file, load the help
index and return back a normalized version. Returns None on error.
"""
return _load_help_index(index_resource)
def load_help_file(pkg_info, help_file):
"""
Load the contents of a help file contained in the provided help package.
The help file should be relative to the document root of the package.
Returns None if the help file cannot be loaded.
"""
return _load_help_file(pkg_info, help_file)
def help_index_list(reload=False, package=None):
"""
Obtain or reload the help index information for all packages. This demand
loads the indexes on first access and can optionally reload all package
indexes or only a single one, as desired.
"""
initial_load = False
if not hasattr(help_index_list, "index"):
initial_load = True
help_index_list.index = _scan_help_packages()
if reload and not initial_load:
help_index_list.index = reload_help_index(help_index_list.index, package)
return help_index_list.index
def reload_help_index(help_list, package):
"""
Reload the help index for the provided package from within the given help
list, updating the help list to record the new data.
If no package name is provided, the help list provided is ignored and all
help indexes are reloaded and returned in a new help list.
Attempts to reload a package that is not in the given help list has no
effect.
"""
if package is None:
log("Recanning all help index files")
return _scan_help_packages()
pkg_info = help_list.get(package, None)
if pkg_info is None:
log("Package '%s' was not previously loaded; cannot reload", package)
else:
log("Reloading help index for package '%s'", package)
result = _load_help_index(pkg_info.index_file)
if result is not None:
help_list[result.package] = result
return help_list
def help_file_resource(pkg_info, help_file):
"""
Get the resource name that references the help file in the given help
package. The help file should be relative to the document root of the
package.
"""
return _resource_for_help(pkg_info, help_file)
def load_help_file(pkg_info, help_file):
"""
Load the contents of a help file contained in the provided help package.
The help file should be relative to the document root of the package.
Returns None if the help file cannot be loaded.
"""
return _load_help_file(pkg_info, help_file)
def display_help_file(pkg_info, help_file):
"""
Load and display the help file contained in the provided help package. The
heop file should be relative to the document root of the package.
The help will be displayed in the help view of the current window, which
will be created if it does not exist.
Does nothing if the help view is already displaying this file.
Returns None if the help file could not be found/loaded or the help view
on success.
"""
return _display_help_file(pkg_info, help_file)
def reload_help_file(help_list, help_view):
"""
Reload the help file currently being displayed in the given view to pick
up changes made since it was displayed. The information on the package and
help file should be contained in the provided help list.
Returns True if the file was reloaded successfully or False if not.
"""
return _reload_help_file(help_list, help_view)
def lookup_help_topic(pkg_info, topic):
"""
Given a help data tuple or the name of a package, look up the topic and
return the topic structure if needed.
This does all manipulations on the incoming topic, such as case folding and
space replacement.
Returns the topic structure or None.
"""
if isinstance(pkg_info, str):
pkg_info = help_index_list().get(pkg_info, None)
if pkg_info is not None:
topic = topic.casefold().replace(" ", "\t")
alias = pkg_info.help_aliases.get(topic, None)
return pkg_info.help_topics.get(alias or topic, None)
return None
def show_help_topic(package, topic, history):
"""
Attempt to display the help for the provided topic in the given package
(both strings) as appropriate. This will transparently create a new help
view, open the underlying package file or open the URL as needed.
If history is True, the history for the help view is updated after a
successful help navigation to a help file; otherwise the history is left
untouched. history is implicitly True when this has to create a help view
for the first time so that history is properly initialized.
The return value is None on error or a string that represents the kind of
topic that was navigated to ("file", "pkg_file" or "url")
"""
pkg_info = help_index_list().get(package, None)
if pkg_info is None:
return None
topic_data = lookup_help_topic(pkg_info, topic)
if topic_data is None:
log("Unknown help topic '%s'", topic, status=True)
return None
help_file = topic_data["file"]
if help_file in pkg_info.urls:
webbrowser.open_new_tab(help_file)
return "url"
if help_file in pkg_info.package_files:
help_file = help_file.replace("Packages/", "${packages}/")
window = sublime.active_window()
window.run_command("open_file", {"file": help_file})
return "pkg_file"
# Update the current history entry if there is a help view.
if history:
_update_help_history(find_help_view())
existing_view = True if find_help_view() is not None else False
help_view = display_help_file(pkg_info, help_file)
if help_view is None:
log("Unable to load help file '%s'", help_file, status=True)
return None
found = False
anchors = help_view.settings().get("_hh_nav", [])
for anchor in anchors:
if topic_data["topic"] == anchor[0].casefold():
focus_on(help_view, anchor[1], at_center=True)
found = True
# Update history to track the new file, but only if the help view already
# existed; otherwise its creation set up the default history already.
if history and existing_view:
_update_help_history(help_view, append=True)
if not found:
log("Unable to find topic '%s' in help file '%s'", topic, help_file,
status=True)
return "file"
def navigate_help_history(help_view, prev):
"""
Navigate through the help history for the provided help view, either going
forward or backward as appropriate. This will update the current history
entry before displaying the historic topic.
If no help view is provided, the current help view is used instead, if any.
Returns a boolean to tell you if the history position changed or not.
"""
help_view = help_view or find_help_view()
if help_view is None:
return False
hist_pos = help_view.settings().get("_hh_hist_pos")
hist_info = help_view.settings().get("_hh_hist")
if (prev and hist_pos == 0) or (not prev and hist_pos == len(hist_info) - 1):
log("Cannot navigate %s through history; already at the end",
"backwards" if prev else "forwards", status=True)
return False
hist_pos = (hist_pos - 1) if prev else (hist_pos + 1)
entry = HistoryData._make(hist_info[hist_pos])
# Update the current history entry's viewport and caret location
_update_help_history(help_view)
# Navigate to the destination file in the history; need to manually set
# the cursor position
if show_help_topic(entry.package, entry.file, history=False) is not None:
help_view.sel().clear()
help_view.sel().add(sublime.Region(entry.caret[0], entry.caret[1]))
help_view.set_viewport_position(entry.viewport, False)
help_view.settings().set("_hh_hist_pos", hist_pos)
return True
return False
###----------------------------------------------------------------------------
| StarcoderdataPython |
3302546 | #!/bin/env python
#
# Features.py: classes for handling feature data
# Copyright (C) University of Manchester 2011-2019 <NAME>, <NAME>
# & <NAME>
#
"""
Features.py
Classes for handling feature data.
"""
import logging
import io
from .distances import closestDistanceToRegion
from .utils import make_errline
class FeatureSet(object):
"""Class for storing a set of features
RNA-seq features consists of genes/transcripts/isomers, which
are stored individually in Feature objects. This class is a
container for a collection of Feature objects and provides
methods to operate on the collection, by creating subsets by
filtering, and sorting the features based on various criteria.
"""
def __init__(self,features_file=None,features_list=None):
"""Create a new FeatureSet instance
Raises an exception if there are errors in the input file data
(non-numeric fields for start/end positions, end positions
occurring before start positions, or illegal strand values).
Arguments:
features_file (str): (optional) the name of an input
file to read the feature data from
features_list (list): (optional) list of Feature objects
to populate the FeatureSet with
"""
self.features = []
self.source_file = None
if features_file:
self.loadFeaturesFromFile(features_file)
elif features_list:
for feature in features_list:
self.addFeature(feature)
def loadFeaturesFromFile(self,features_file):
"""Read features from a file and populate the object
Arguments:
features_file: the name of the input file to read features from.
"""
# Local flags etc
line_index = 0
critical_error = False
# Read in data from file
with io.open(features_file,'rt') as fp:
for line in fp:
# Increment index
line_index += 1
# Skip lines starting with #
if line.startswith('#'):
logging.debug("Feature file: skipped line: %s" %
line.strip())
continue
# Lines are tab-delimited and have at least 5 columns:
# ID chr start end strand
items = line.strip().split('\t')
if len(items) < 5:
logging.warning("Feature file: skipped line: %s" %
line.strip())
logging.warning("Insufficient number of fields (%d)" %
len(items))
continue
# Check line is valid i.e. start and stop should be
# numbers, strand should be + or -
problem_fields = []
if not items[2].isdigit():
problem_fields.append(2)
if not items[3].isdigit():
problem_fields.append(3)
if not (items[4] == '+' or items[4] == '-'):
problem_fields.append(4)
if problem_fields:
# If this is the first line then assume it's a header
# and ignore
if line_index == 1:
logging.warning("%s: first line ignored as header: "
"%s" % (features_file,line.strip()))
else:
# Indicate problem field(s)
logging.error("%s: critical error line %d: bad "
"values:" % (features_file,line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),
problem_fields))
# This is a critical error: update flag
critical_error = True
# Continue to next line
continue
elif int(items[2]) >= int(items[3]):
# Start position is same or higher than end
logging.error("%s: critical error line %d: 'end' comes "
"before 'start':" % (features_file,
line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),(2,3)))
# This is a critical error: update flag but continue
# reading
critical_error = True
continue
# Store in a new Feature object
feature = Feature(items[0],
items[1],
items[2],
items[3],
items[4],
source_file=features_file)
# Additional flag
if len(items) >= 6:
# Is column 6 a flag?
try:
flag_value = int(items[5])
if flag_value != 0 and flag_value != 1:
flag_value = None
except ValueError:
flag_value = None
# Store value
feature.flag = flag_value
# Store data
self.features.append(feature)
# Deal with postponed critical errors
if critical_error:
raise Exception("Critical error(s) in '%s'" % features_file)
# Store the source file
self.source_file = features_file
# Return a reference to this object
return self
def addFeature(self,feature):
"""Append a feature to the FeatureSet object
Arguments:
feature: a Feature instance.
"""
self.features.append(feature)
def filterByChr(self,matchChr):
"""Return a subset of features filtered by specified chromosome name
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.chrom == matchChr:
feature_subset.addFeature(feature)
return feature_subset
def filterByStrand(self,matchStrand):
"""Return a subset of features filtered by specified strand
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.strand == matchStrand:
feature_subset.addFeature(feature)
return feature_subset
def filterByFlag(self,matchFlag):
"""Return a subset of features filtered by flag value
Returns a new FeatureSet object containing only the features from
the current object which matches the specified criteria.
Note that if there is no flag (the "isFlagged()" function returns
False) then an empty set will be returned.
"""
# Make a new (empty) RNASeqData object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.flag == matchFlag:
feature_subset.addFeature(feature)
return feature_subset
def filterByTSS(self,limit1,limit2,exclude_limits=False):
"""Return a subset of features filtered by TSS position
Returns a new FeatureSet object containing only the features
from the current object where the TSS positions fall within a
region defined by upper and lower limits.
limits can be supplied in either order (i.e. highest/lowest
or lowest/highest).
If exclude_limits is False (the default) then TSS positions
that fall exactly on one of the boundaries are counted as
being within the region; if it is True then these TSS
positions will not be considered to lie inside the region.
"""
# Sort out upper and lower limits
if limit1 > limit2:
upper,lower = limit1,limit2
else:
upper,lower = limit2,limit1
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
TSS = feature.getTSS()
if exclude_limits:
if lower < TSS and TSS < upper:
feature_subset.addFeature(feature)
else:
if lower <= TSS and TSS <= upper:
feature_subset.addFeature(feature)
return feature_subset
def sortByDistanceFrom(self,position):
"""Sort the features into order based on distance from a position
Sorts the features into order of absolute distance of
their TSS to the specified position (closest first).
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
abs(record.getTSS()-position))
return self
def sortByClosestEdgeTo(self,position1,position2=None):
"""Sort the features into order based on closest edge (TSS or TES)
Sorts the features into order of smallest absolute distance
to the specified position (closest first), considering both TSS
and TES, using the getClosestEdgeDistanceTo method of the
Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestEdgeDistanceTo(position1,
position2))
return self
def sortByClosestTSSTo(self,position1,position2=None):
"""Sort the features into order based on closest edge to TSS
Sorts the features into order of smallest absolute distance
to the specified position (closest first) to the TSS position,
using the getClosestTSSDistanceTo method of the Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestTSSDistanceTo(position1,
position2))
return self
def isFlagged(self):
"""Check whether feature data includes flags
Checks whether all the Feature records also have a valid flag
associated with them - if yes then returns True (indicating the
dataset as a whole is flagged), otherwise returns False.
"""
# Check all data and look for any None flags
for feature in self.features:
if feature.flag is None:
return False
# All flags valid
return True
def __iter__(self):
return iter(self.features)
def __getitem__(self,key):
try:
start = key.start
stop = key.stop
step = key.step
slice_ = FeatureSet()
for feature in self.features[start:stop:step]:
slice_.addFeature(feature)
return slice_
except AttributeError:
return self.features[key]
def __len__(self):
return len(self.features)
def __eq__(self,other):
if len(self) != len(other):
return False
for f1,f2 in zip(self,other):
if f1 != f2:
return False
return True
def __ne__(self,other):
if len(self) != len(other):
return True
for f1,f2 in zip(self,other):
if f1 != f2:
return True
return False
class Feature(object):
"""Class for storing an 'feature' (gene/transcript/isomer)
Access the data for the feature using the object's properties:
id
chrom
start
end
strand
tss
tes
A feature can also have the following optional data
associated with it:
- A source file name, which is set via the 'source_file'
keyword and accessed via the 'source_file' property.
It will be None if no filename has been specified.
There are also convenience methods (getTSS, getTES, getPromoterRegion)
and methods for calculating various distances.
"""
def __init__(self,feature_id,chrom,start,end,strand,source_file=None):
self.id = feature_id
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.flag = None
self.source_file = source_file
# Set the TSS and TES
if self.strand == '+':
self.tss = self.start
self.tes = self.end
elif self.strand == '-':
self.tss = self.end
self.tes = self.start
else:
raise Exception("Bad strand: '%s'" % self.strand)
def __repr__(self):
items = [self.id,
self.chrom,
str(self.start),
str(self.end),
self.strand]
if self.flag != None:
items.append(str(self.flag))
return '\t'.join(items)
def __eq__(self,other):
return \
(self.id == other.id) and \
(self.strand == other.strand) and \
(self.start == other.start) and \
(self.end == other.end)
def __ne__(self,other):
return \
(self.id != other.id) or \
(self.strand != other.strand) or \
(self.start != other.start) or \
(self.end != other.end)
def getTSS(self):
"""Return the TSS coordinate
TTS (transcription start site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tss' property.
"""
return self.tss
def getTES(self):
"""Return the TES coordinate
TES (transcription end site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tes' property.
"""
return self.tes
def containsPosition(self,coordinate):
"""Check whether a coordinate is within the gene coordinates
Returns True if coordinate lies within start and end, False
otherwise.
"""
return (self.start <= coordinate and coordinate <= self.end)
def getClosestTSSDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TSS to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TSS.
If a second position is given (specifying a region) then return
smallest absolute distance of (TSS,position1) and (TSS,position2).
By default there is no special treatment when the TSS lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTSS(),
position1,position2,
zero_inside_region)
def getClosestTESDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TES to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TES.
If a second position is given (specifying a region) then return
smallest absolute distance of (TES,position1) and (TES,position2).
By default there is no special treatment when the TES lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTES(),
position1,position2,
zero_inside_region)
def getClosestEdgeDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return closest edge distance to a coordinate or region
For a single specified position, the closest edge is whichever
of the TSS or TES is nearest (smallest absolute distance) from
that position i.e. the smallest distance of (TSS,position) and
(TES,position).
If a second position is given (specifying a region) then
the closest edge is whichever of the TSS/TES is closest to
either position1 or position2 i.e. the smallest distance of
(TSS,position1), (TES,position1), (TSS,position2) and
(TES,position2).
By default there is no special treatment when either the TSS
or TES lie inside the region specified by two positions; to
set this to zero, set the 'zero_inside_region' argument to
True.
"""
return min(self.getClosestTSSDistanceTo(position1,
position2,
zero_inside_region),
self.getClosestTESDistanceTo(position1,
position2,
zero_inside_region))
def getPromoterRegion(self,to_TSS,from_TSS):
"""Return the coordinates of the promoter region
The promoter region is a region of coordinates around the
TSS of a gene, defined by the supplied distances 'to_TSS'
(the distance downstream from the TSS) and 'from_TSS' (the
distance upstream from the TSS).
Returns a tuple containing the start and end coordinates
defining the promoter region.
"""
if self.strand == '+':
return (self.getTSS() - to_TSS,
self.getTSS() + from_TSS)
else:
return (self.getTSS() + to_TSS,
self.getTSS() - from_TSS)
| StarcoderdataPython |
112722 | import numpy as np
import torch.optim as optim
import networks.networks as net
from networks.gtsrb import *
from networks.svhn import *
import torchvision as tv
from torchvision import transforms
from torch.utils.data import DataLoader
from data.idadataloader import DoubleDataset
from config import get_transform
from data.mnist_m import MNISTM
import argparse
parser = argparse.ArgumentParser(description='Sanity Checks Only')
parser.add_argument('setting', default="SO", help='Setting to run (see config.py)')
args = parser.parse_args()
root = '/home/fcdl/dataset/'
#target_path = root + "GTSRB/Final_Training/Images"
#source_path = root + "synthetic_data"
#test_path = root + "GTSRB/Final_Test"
#target_path = root + 'sketchy/photo_train'
#source_path = root + 'sketchy/sketch'
#test_path = root + 'sketchy/photo_test'
EPOCHS = 40
NUM_CLASSES = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
const = 1
def train_epoch_single(network, train_loader, optimizer):
src_criterion = nn.CrossEntropyLoss()
network.train()
train_loss = 0
train_correct = 0
train_total = 0
batch_idx = 0
for batch in train_loader:
optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
logits, feat = network.forward(inputs) # feature vector only
prediction = network.predict(logits) # class scores
loss_bx = src_criterion(prediction, targets) # CE loss
loss_bx.backward()
optimizer.step()
# get predictions
_, predicted = prediction.max(1)
tr_tot = targets.size(0)
tr_crc = predicted.eq(targets).sum().item()
# compute statistics
train_loss += loss_bx.item()
train_total += tr_tot
train_correct += tr_crc
batch_idx += 1
if batch_idx % 200 == 0:
print(f"{batch_idx:3d} | Source Loss: {loss_bx:.6f} "
f"Source Acc : {100.0 * train_correct / train_total:.2f}")
train_acc = 100. * train_correct / train_total
return train_loss/batch_idx, train_acc
def train_epoch(network, train_loader, optimizer):
src_criterion = nn.CrossEntropyLoss()
dom_criterion = nn.BCEWithLogitsLoss()
network.train()
train_loss = 0
train_correct = 0
train_total = 0
train_total_src = 0
train_correct_src = 0
batch_idx = 0
# scheduler.step()
for source_batch, target_batch in train_loader:
p = float(batch_idx + start_steps) / total_steps
lam = 2. / (1. + np.exp(-10 * p)) - 1
optimizer.zero_grad()
inputs, targets = source_batch
inputs = inputs.to(device)
targets = targets.to(device) # ground truth class scores
domains = torch.zeros(inputs.shape[0], 1).to(device) # source is index 0
logits, feat = network.forward(inputs) # feature vector only
prediction = network.predict(logits) # class scores
s_prediction = network.discriminate_domain(feat, lam) # domain score
loss_bx_src = src_criterion(prediction, targets) # CE loss
loss_bx_dom_s = dom_criterion(s_prediction, domains)
_, predicted = prediction.max(1)
tr_tot = targets.size(0) # only on target
tr_crc = predicted.eq(targets).sum().item() # only on target
train_total_src += tr_tot
train_correct_src += tr_crc
# train the target
inputs, targets = target_batch
inputs, targets = inputs.to(device), targets.to(device) # class gt
domains = torch.ones(inputs.shape[0], 1).to(device) # target is index 1
logits, feat = network.forward(inputs) # feature vector only
prediction = network.predict(logits) # class scores
d_prediction = network.discriminate_domain(feat, lam) # domain score
loss_bx_tar = src_criterion(prediction, targets)
loss_bx_dom_t = dom_criterion(d_prediction, domains)
# sum the losses and do backward propagation
loss_dom = (loss_bx_dom_s + loss_bx_dom_t)
#loss_bx = loss_bx_src + loss_bx_tar + const * lam * loss_dom # using target labels
loss_bx = loss_bx_src + const * loss_dom # don't use target labels
loss_bx.backward()
optimizer.step()
_, predicted = prediction.max(1)
tr_tot = targets.size(0) # only on target
tr_crc = predicted.eq(targets).sum().item() # only on target
# compute statistics
train_loss += loss_bx.item()
train_total += tr_tot
train_correct += tr_crc
batch_idx += 1
if batch_idx % 200 == 0:
print(f"Batch {batch_idx} / {len(train_loader)}\n\t"
f"Lambda {lam:.4f} "
f"Domain Loss: {loss_dom:.6f}\n\t"
f"Source Loss: {loss_bx_src:.6f} "
f"Source Acc : {100.0 * train_correct_src / train_total_src:.2f} "
f"SrcDom Acc : {1 - torch.sigmoid(s_prediction.detach()).mean().cpu().item():.3f}\n\t"
f"Target Loss: {loss_bx_tar:.6f} "
f"Target Acc : {100.0 * train_correct / train_total:.2f} "
f"TarDom Acc : {torch.sigmoid(d_prediction.detach()).cpu().mean().item():.3f}"
)
train_acc = 100. * train_correct / train_total
return train_loss/batch_idx, train_acc
def valid(network, valid_loader):
criterion = nn.CrossEntropyLoss()
# make validation
network.eval()
test_loss = 0
test_correct = 0
test_total = 0
domain_acc = 0
with torch.no_grad():
for inputs, targets in valid_loader:
inputs = inputs.to(device)
targets = targets.to(device)
outputs, feats = network.forward(inputs)
predictions = network.predict(outputs) # class score
domains = network.discriminate_domain(feats, 0) # domain score (correct if 1., 0.5 is wanted)
loss_bx = criterion(predictions, targets)
test_loss += loss_bx.item()
_, predicted = predictions.max(1)
test_total += targets.size(0)
test_correct += predicted.eq(targets).sum().item()
domain_acc += torch.sigmoid(domains.cpu().detach()).sum().item()
# normalize and print stats
test_acc = 100. * test_correct / test_total
domain_acc = 100. * domain_acc / test_total
test_loss /= len(valid_loader)
return test_loss, test_acc, domain_acc
if __name__ == '__main__':
# define transform
transform, augmentation = get_transform('svhn')
augmentation = transforms.Compose([augmentation, transform])
print(transform, augmentation)
# define dataset
#target = tv.datasets.ImageFolder(target_path, transform=augmentation)
#source = tv.datasets.ImageFolder(source_path, transform=augmentation)
#test = tv.datasets.ImageFolder(test_path, transform=transform)
source = tv.datasets.SVHN(root, transform=augmentation)
target = tv.datasets.MNIST(root, transform=tv.transforms.Compose([tv.transforms.Grayscale(3), transform]))
test = tv.datasets.MNIST(root, train=False, transform=tv.transforms.Compose([tv.transforms.Grayscale(3), transform]))
#source = tv.datasets.MNIST(root, transform=tv.transforms.Compose([tv.transforms.Grayscale(3), transform]))
#target = MNISTM(root, transform=transform)
#test = MNISTM(root, train=False, transform=transform)
train = DoubleDataset(source, target)
# define dataloader
train_loader = DataLoader(train, 128, True, num_workers=8)
source_loader = DataLoader(source, 128, True, num_workers=8)
target_loader = DataLoader(target, 128, True, num_workers=8)
test_loader = DataLoader(test, 128, False, num_workers=8)
# get network
#net = net.cifar_resnet_revgrad(None, NUM_CLASSES).to(device)
#net = GTSRB_net(43).to(device)
net = SVHN_net(10).to(device)
#net = net.wide_resnet_revgrad(None, 125).to(device)
#net = net.resnet50(True, 125).to(device)
#net = LeNet().to(device)
#optimizer = optim.SGD(net.parameters(), lr=0.1)
#scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [int(0.7*EPOCHS), int(0.9*EPOCHS)], gamma=0.1)
total_steps = EPOCHS * len(train_loader)
print("Do a validation before starting to check it is ok...")
val_loss, val_acc, dom_acc = valid(net, valid_loader=test_loader)
print(f"Epoch {-1:03d} : Test Loss {val_loss:.6f}, Test Acc {val_acc:.2f}, Domain Acc {dom_acc:.2f}")
print("Result should be random guessing, i.e. 10% accuracy")
# define training steps
for epoch in range(EPOCHS):
# steps
start_steps = epoch * len(train_loader)
# train epoch
learning_rate = 0.01 / ((1 + 10 * (epoch)/EPOCHS)**0.75)
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
# scheduler.step()
print(f"Learning rate: {learning_rate}")
if args.setting == 'SO':
train_loss, train_acc = train_epoch_single(net, train_loader=source_loader, optimizer=optimizer)
elif args.setting == 'TO':
train_loss, train_acc = train_epoch_single(net, train_loader=target_loader, optimizer=optimizer)
else:
train_loss, train_acc = train_epoch(net, train_loader=train_loader, optimizer=optimizer)
# valid!
val_loss, val_acc, dom_acc = valid(net, valid_loader=test_loader)
print(f"\nEpoch {epoch+1:03d} : Test Loss {val_loss:.6f}, Test Acc {val_acc:.2f}, Domain Acc {dom_acc:.2f}\n")
if train_loss < 1e-4:
break
print(".... END")
| StarcoderdataPython |
3300727 | <gh_stars>0
class StatusModel:
is_error = None
is_processed = None
is_income = None
is_info = None
is_outcome = None
is_reverted = None
message = None
def __init__(self, processed: bool, move: str, message: str = None):
self.is_error = not processed
self.is_processed = processed
self.is_income = move == 'in' or move == 'revert'
self.is_info = move == 'info'
self.is_outcome = move == 'out'
self.is_reverted = move == 'revert'
self.error_type = message if self.is_error else None
self.message = message
def __str__(self):
return '<StatusModel is_error={} is_processed={} is_income={} is_info={} is_outcome={} ' \
'is_reverted={} error_type={} message={}>'.format(self.is_error,
self.is_processed,
self.is_income,
self.is_info,
self.is_outcome,
self.is_reverted,
self.error_type,
self.message)
| StarcoderdataPython |
74442 | <reponame>j-varun/enas
import sys
import os
import time
import numpy as np
import tensorflow as tf
from enas.controller import Controller
from enas.utils import get_train_ops
from enas.common_ops import stack_lstm
from tensorflow.python.training import moving_averages
class ConvController(Controller):
def __init__(self,
num_branches=6,
num_layers=4,
num_blocks_per_branch=8,
lstm_size=32,
lstm_num_layers=2,
lstm_keep_prob=1.0,
tanh_constant=None,
temperature=None,
lr_init=1e-3,
lr_dec_start=0,
lr_dec_every=100,
lr_dec_rate=0.9,
l2_reg=0,
clip_mode=None,
grad_bound=None,
use_critic=False,
bl_dec=0.999,
optim_algo="adam",
sync_replicas=False,
num_aggregate=None,
num_replicas=None,
name="controller"):
print "-" * 80
print "Building ConvController"
self.num_branches = num_branches
self.num_layers = num_layers
self.num_blocks_per_branch = num_blocks_per_branch
self.lstm_size = lstm_size
self.lstm_num_layers = lstm_num_layers
self.lstm_keep_prob = lstm_keep_prob
self.tanh_constant = tanh_constant
self.temperature = temperature
self.lr_init = lr_init
self.lr_dec_start = lr_dec_start
self.lr_dec_every = lr_dec_every
self.lr_dec_rate = lr_dec_rate
self.l2_reg = l2_reg
self.clip_mode = clip_mode
self.grad_bound = grad_bound
self.use_critic = use_critic
self.bl_dec = bl_dec
self.optim_algo = optim_algo
self.sync_replicas = sync_replicas
self.num_aggregate = num_aggregate
self.num_replicas = num_replicas
self.name = name
self._create_params()
self._build_sampler()
def _create_params(self):
with tf.variable_scope(self.name):
with tf.variable_scope("lstm"):
self.w_lstm = []
for layer_id in xrange(self.lstm_num_layers):
with tf.variable_scope("layer_{}".format(layer_id)):
w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size])
self.w_lstm.append(w)
self.num_configs = (2 ** self.num_blocks_per_branch) - 1
with tf.variable_scope("embedding"):
self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
self.w_emb = tf.get_variable("w", [self.num_blocks_per_branch,
self.lstm_size])
with tf.variable_scope("softmax"):
self.w_soft = tf.get_variable("w", [self.lstm_size,
self.num_blocks_per_branch])
with tf.variable_scope("critic"):
self.w_critic = tf.get_variable("w", [self.lstm_size, 1])
def _build_sampler(self):
"""Build the sampler ops and the log_prob ops."""
arc_seq = []
sample_log_probs = []
all_h = []
# sampler ops
inputs = self.g_emb
prev_c = [tf.zeros([1, self.lstm_size], dtype=tf.float32)
for _ in xrange(self.lstm_num_layers)]
prev_h = [tf.zeros([1, self.lstm_size], dtype=tf.float32)
for _ in xrange(self.lstm_num_layers)]
for layer_id in xrange(self.num_layers):
for branch_id in xrange(self.num_branches):
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
all_h.append(tf.stop_gradient(next_h[-1]))
logits = tf.matmul(next_h[-1], self.w_soft)
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
logits = self.tanh_constant * tf.tanh(logits)
config_id = tf.multinomial(logits, 1)
config_id = tf.to_int32(config_id)
config_id = tf.reshape(config_id, [1])
arc_seq.append(config_id)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=config_id)
sample_log_probs.append(log_prob)
inputs = tf.nn.embedding_lookup(self.w_emb, config_id)
arc_seq = tf.concat(arc_seq, axis=0)
self.sample_arc = arc_seq
self.sample_log_probs = tf.concat(sample_log_probs, axis=0)
self.ppl = tf.exp(tf.reduce_sum(self.sample_log_probs) /
tf.to_float(self.num_layers * self.num_branches))
self.all_h = all_h
def build_trainer(self, child_model):
# actor
child_model.build_valid_rl()
self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) /
tf.to_float(child_model.batch_size))
self.reward = self.valid_acc
if self.use_critic:
# critic
all_h = tf.concat(self.all_h, axis=0)
value_function = tf.matmul(all_h, self.w_critic)
advantage = value_function - self.reward
critic_loss = tf.reduce_sum(advantage ** 2)
self.baseline = tf.reduce_mean(value_function)
self.loss = -tf.reduce_mean(self.sample_log_probs * advantage)
critic_train_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name="critic_train_step")
critic_train_op, _, _, _ = get_train_ops(
critic_loss,
[self.w_critic],
critic_train_step,
clip_mode=None,
lr_init=1e-3,
lr_dec_start=0,
lr_dec_every=int(1e9),
optim_algo="adam",
sync_replicas=False)
else:
# or baseline
self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)
self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
baseline_update = tf.assign_sub(
self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))
with tf.control_dependencies([baseline_update]):
self.reward = tf.identity(self.reward)
self.loss = self.sample_log_probs * (self.reward - self.baseline)
self.train_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name="train_step")
tf_variables = [var for var in tf.trainable_variables()
if var.name.startswith(self.name)
and "w_critic" not in var.name]
print "-" * 80
for var in tf_variables:
print var
self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
self.loss,
tf_variables,
self.train_step,
clip_mode=self.clip_mode,
grad_bound=self.grad_bound,
l2_reg=self.l2_reg,
lr_init=self.lr_init,
lr_dec_start=self.lr_dec_start,
lr_dec_every=self.lr_dec_every,
lr_dec_rate=self.lr_dec_rate,
optim_algo=self.optim_algo,
sync_replicas=self.sync_replicas,
num_aggregate=self.num_aggregate,
num_replicas=self.num_replicas)
if self.use_critic:
self.train_op = tf.group(self.train_op, critic_train_op)
| StarcoderdataPython |
3328620 | from __future__ import annotations
import typing
from .. import spec
from .. import exceptions
from .. import crud_utils
from . import table_utils
def row_to_dict(row: spec.SARow) -> dict[str, typing.Any]:
return row._asdict()
def replace_row_foreign_keys(
*,
row: spec.Row,
conn: spec.SAConnection,
table: spec.TableRef,
foreign_name: str | None = None,
foreign_names: typing.Mapping[str, str] | None = None,
insert_missing_rows: bool = False,
) -> dict[str, typing.Any]:
if isinstance(table, str):
table = table_utils.create_table_object_from_db(
table_name=table, conn=conn
)
new_row = {}
for column_name, column_value in row.items():
column = table.c[column_name]
if len(column.foreign_keys) == 1 and isinstance(column_value, str):
if foreign_name is not None:
name = foreign_name
elif foreign_names is not None:
name = foreign_names[column_name]
else:
raise Exception('must specify foreign_name or foreign_names')
foreign_table = next(iter(column.foreign_keys)).column.table
result = crud_utils.select(
conn=conn,
table=foreign_table,
where_equals={name: column_value},
include_id=True,
row_count='at_most_one',
return_count='one',
)
if result is None:
if insert_missing_rows:
foreign_id = crud_utils.insert_row(
row={name: column_value},
table=foreign_table,
conn=conn,
)
else:
raise exceptions.DoesNotExistException('row not found')
else:
foreign_id = list(result.keys())[0]
new_row[column_name] = foreign_id
else:
new_row[column_name] = column_value
return new_row
| StarcoderdataPython |
148951 | # This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import datetime
import enum
import typing
from ._abstract import _BaseType
from .common import BaseResource, Reference, ReferenceTypeId, ResourceIdentifier
if typing.TYPE_CHECKING:
from .common import (
CreatedBy,
LastModifiedBy,
LocalizedString,
Money,
QueryPrice,
Reference,
ReferenceTypeId,
TypedMoney,
)
__all__ = [
"ProductDiscount",
"ProductDiscountChangeIsActiveAction",
"ProductDiscountChangeNameAction",
"ProductDiscountChangePredicateAction",
"ProductDiscountChangeSortOrderAction",
"ProductDiscountChangeValueAction",
"ProductDiscountDraft",
"ProductDiscountMatchQuery",
"ProductDiscountPagedQueryResponse",
"ProductDiscountReference",
"ProductDiscountResourceIdentifier",
"ProductDiscountSetDescriptionAction",
"ProductDiscountSetKeyAction",
"ProductDiscountSetValidFromAction",
"ProductDiscountSetValidFromAndUntilAction",
"ProductDiscountSetValidUntilAction",
"ProductDiscountUpdate",
"ProductDiscountUpdateAction",
"ProductDiscountValue",
"ProductDiscountValueAbsolute",
"ProductDiscountValueAbsoluteDraft",
"ProductDiscountValueDraft",
"ProductDiscountValueExternal",
"ProductDiscountValueExternalDraft",
"ProductDiscountValueRelative",
"ProductDiscountValueRelativeDraft",
]
class ProductDiscount(BaseResource):
#: Present on resources updated after 1/02/2019 except for events not tracked.
last_modified_by: typing.Optional["LastModifiedBy"]
#: Present on resources created after 1/02/2019 except for events not tracked.
created_by: typing.Optional["CreatedBy"]
name: "LocalizedString"
#: User-specific unique identifier for a product discount.
#: Must be unique across a project.
key: typing.Optional[str]
description: typing.Optional["LocalizedString"]
value: "ProductDiscountValue"
#: A valid ProductDiscount Predicate.
predicate: str
#: The string contains a number between 0 and 1.
#: A discount with greater sortOrder is prioritized higher than a discount with lower sortOrder.
#: A sortOrder must be unambiguous.
sort_order: str
#: Only active discount will be applied to product prices.
is_active: bool
#: The platform will generate this array from the predicate.
#: It contains the references of all the resources that are addressed in the predicate.
references: typing.List["Reference"]
#: The time from which the discount should be effective.
#: Please take Eventual Consistency into account for calculated product discount values.
valid_from: typing.Optional[datetime.datetime]
#: The time from which the discount should be ineffective.
#: Please take Eventual Consistency into account for calculated undiscounted values.
valid_until: typing.Optional[datetime.datetime]
def __init__(
self,
*,
id: str,
version: int,
created_at: datetime.datetime,
last_modified_at: datetime.datetime,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
name: "LocalizedString",
key: typing.Optional[str] = None,
description: typing.Optional["LocalizedString"] = None,
value: "ProductDiscountValue",
predicate: str,
sort_order: str,
is_active: bool,
references: typing.List["Reference"],
valid_from: typing.Optional[datetime.datetime] = None,
valid_until: typing.Optional[datetime.datetime] = None
):
self.last_modified_by = last_modified_by
self.created_by = created_by
self.name = name
self.key = key
self.description = description
self.value = value
self.predicate = predicate
self.sort_order = sort_order
self.is_active = is_active
self.references = references
self.valid_from = valid_from
self.valid_until = valid_until
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
)
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ProductDiscount":
from ._schemas.product_discount import ProductDiscountSchema
return ProductDiscountSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountSchema
return ProductDiscountSchema().dump(self)
class ProductDiscountDraft(_BaseType):
name: "LocalizedString"
#: User-specific unique identifier for a product discount.
#: Must be unique across a project.
#: The field can be reset using the Set Key UpdateAction
key: typing.Optional[str]
description: typing.Optional["LocalizedString"]
value: "ProductDiscountValueDraft"
#: A valid ProductDiscount Predicate.
predicate: str
#: The string must contain a decimal number between 0 and 1.
#: A discount with greater sortOrder is prioritized higher than a discount with lower sortOrder.
sort_order: str
#: If set to `true` the discount will be applied to product prices.
is_active: bool
#: The time from which the discount should be effective.
#: Please take Eventual Consistency into account for calculated product discount values.
valid_from: typing.Optional[datetime.datetime]
#: The time from which the discount should be effective.
#: Please take Eventual Consistency into account for calculated undiscounted values.
valid_until: typing.Optional[datetime.datetime]
def __init__(
self,
*,
name: "LocalizedString",
key: typing.Optional[str] = None,
description: typing.Optional["LocalizedString"] = None,
value: "ProductDiscountValueDraft",
predicate: str,
sort_order: str,
is_active: bool,
valid_from: typing.Optional[datetime.datetime] = None,
valid_until: typing.Optional[datetime.datetime] = None
):
self.name = name
self.key = key
self.description = description
self.value = value
self.predicate = predicate
self.sort_order = sort_order
self.is_active = is_active
self.valid_from = valid_from
self.valid_until = valid_until
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ProductDiscountDraft":
from ._schemas.product_discount import ProductDiscountDraftSchema
return ProductDiscountDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountDraftSchema
return ProductDiscountDraftSchema().dump(self)
class ProductDiscountMatchQuery(_BaseType):
product_id: str
variant_id: int
staged: bool
price: "QueryPrice"
def __init__(
self, *, product_id: str, variant_id: int, staged: bool, price: "QueryPrice"
):
self.product_id = product_id
self.variant_id = variant_id
self.staged = staged
self.price = price
super().__init__()
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountMatchQuery":
from ._schemas.product_discount import ProductDiscountMatchQuerySchema
return ProductDiscountMatchQuerySchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountMatchQuerySchema
return ProductDiscountMatchQuerySchema().dump(self)
class ProductDiscountPagedQueryResponse(_BaseType):
limit: int
count: int
total: typing.Optional[int]
offset: int
results: typing.List["ProductDiscount"]
def __init__(
self,
*,
limit: int,
count: int,
total: typing.Optional[int] = None,
offset: int,
results: typing.List["ProductDiscount"]
):
self.limit = limit
self.count = count
self.total = total
self.offset = offset
self.results = results
super().__init__()
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountPagedQueryResponse":
from ._schemas.product_discount import ProductDiscountPagedQueryResponseSchema
return ProductDiscountPagedQueryResponseSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountPagedQueryResponseSchema
return ProductDiscountPagedQueryResponseSchema().dump(self)
class ProductDiscountReference(Reference):
obj: typing.Optional["ProductDiscount"]
def __init__(self, *, id: str, obj: typing.Optional["ProductDiscount"] = None):
self.obj = obj
super().__init__(id=id, type_id=ReferenceTypeId.PRODUCT_DISCOUNT)
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountReference":
from ._schemas.product_discount import ProductDiscountReferenceSchema
return ProductDiscountReferenceSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountReferenceSchema
return ProductDiscountReferenceSchema().dump(self)
class ProductDiscountResourceIdentifier(ResourceIdentifier):
def __init__(
self, *, id: typing.Optional[str] = None, key: typing.Optional[str] = None
):
super().__init__(id=id, key=key, type_id=ReferenceTypeId.PRODUCT_DISCOUNT)
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountResourceIdentifier":
from ._schemas.product_discount import ProductDiscountResourceIdentifierSchema
return ProductDiscountResourceIdentifierSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountResourceIdentifierSchema
return ProductDiscountResourceIdentifierSchema().dump(self)
class ProductDiscountUpdate(_BaseType):
version: int
actions: typing.List["ProductDiscountUpdateAction"]
def __init__(
self, *, version: int, actions: typing.List["ProductDiscountUpdateAction"]
):
self.version = version
self.actions = actions
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ProductDiscountUpdate":
from ._schemas.product_discount import ProductDiscountUpdateSchema
return ProductDiscountUpdateSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountUpdateSchema
return ProductDiscountUpdateSchema().dump(self)
class ProductDiscountUpdateAction(_BaseType):
action: str
def __init__(self, *, action: str):
self.action = action
super().__init__()
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountUpdateAction":
if data["action"] == "changeIsActive":
from ._schemas.product_discount import (
ProductDiscountChangeIsActiveActionSchema,
)
return ProductDiscountChangeIsActiveActionSchema().load(data)
if data["action"] == "changeName":
from ._schemas.product_discount import ProductDiscountChangeNameActionSchema
return ProductDiscountChangeNameActionSchema().load(data)
if data["action"] == "changePredicate":
from ._schemas.product_discount import (
ProductDiscountChangePredicateActionSchema,
)
return ProductDiscountChangePredicateActionSchema().load(data)
if data["action"] == "changeSortOrder":
from ._schemas.product_discount import (
ProductDiscountChangeSortOrderActionSchema,
)
return ProductDiscountChangeSortOrderActionSchema().load(data)
if data["action"] == "changeValue":
from ._schemas.product_discount import (
ProductDiscountChangeValueActionSchema,
)
return ProductDiscountChangeValueActionSchema().load(data)
if data["action"] == "setDescription":
from ._schemas.product_discount import (
ProductDiscountSetDescriptionActionSchema,
)
return ProductDiscountSetDescriptionActionSchema().load(data)
if data["action"] == "setKey":
from ._schemas.product_discount import ProductDiscountSetKeyActionSchema
return ProductDiscountSetKeyActionSchema().load(data)
if data["action"] == "setValidFrom":
from ._schemas.product_discount import (
ProductDiscountSetValidFromActionSchema,
)
return ProductDiscountSetValidFromActionSchema().load(data)
if data["action"] == "setValidFromAndUntil":
from ._schemas.product_discount import (
ProductDiscountSetValidFromAndUntilActionSchema,
)
return ProductDiscountSetValidFromAndUntilActionSchema().load(data)
if data["action"] == "setValidUntil":
from ._schemas.product_discount import (
ProductDiscountSetValidUntilActionSchema,
)
return ProductDiscountSetValidUntilActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountUpdateActionSchema
return ProductDiscountUpdateActionSchema().dump(self)
class ProductDiscountValue(_BaseType):
type: str
def __init__(self, *, type: str):
self.type = type
super().__init__()
@classmethod
def deserialize(cls, data: typing.Dict[str, typing.Any]) -> "ProductDiscountValue":
if data["type"] == "absolute":
from ._schemas.product_discount import ProductDiscountValueAbsoluteSchema
return ProductDiscountValueAbsoluteSchema().load(data)
if data["type"] == "external":
from ._schemas.product_discount import ProductDiscountValueExternalSchema
return ProductDiscountValueExternalSchema().load(data)
if data["type"] == "relative":
from ._schemas.product_discount import ProductDiscountValueRelativeSchema
return ProductDiscountValueRelativeSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueSchema
return ProductDiscountValueSchema().dump(self)
class ProductDiscountValueAbsolute(ProductDiscountValue):
money: typing.List["TypedMoney"]
def __init__(self, *, money: typing.List["TypedMoney"]):
self.money = money
super().__init__(type="absolute")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueAbsolute":
from ._schemas.product_discount import ProductDiscountValueAbsoluteSchema
return ProductDiscountValueAbsoluteSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueAbsoluteSchema
return ProductDiscountValueAbsoluteSchema().dump(self)
class ProductDiscountValueDraft(_BaseType):
type: str
def __init__(self, *, type: str):
self.type = type
super().__init__()
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueDraft":
if data["type"] == "absolute":
from ._schemas.product_discount import (
ProductDiscountValueAbsoluteDraftSchema,
)
return ProductDiscountValueAbsoluteDraftSchema().load(data)
if data["type"] == "external":
from ._schemas.product_discount import (
ProductDiscountValueExternalDraftSchema,
)
return ProductDiscountValueExternalDraftSchema().load(data)
if data["type"] == "relative":
from ._schemas.product_discount import (
ProductDiscountValueRelativeDraftSchema,
)
return ProductDiscountValueRelativeDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueDraftSchema
return ProductDiscountValueDraftSchema().dump(self)
class ProductDiscountValueAbsoluteDraft(ProductDiscountValueDraft):
money: typing.List["Money"]
def __init__(self, *, money: typing.List["Money"]):
self.money = money
super().__init__(type="absolute")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueAbsoluteDraft":
from ._schemas.product_discount import ProductDiscountValueAbsoluteDraftSchema
return ProductDiscountValueAbsoluteDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueAbsoluteDraftSchema
return ProductDiscountValueAbsoluteDraftSchema().dump(self)
class ProductDiscountValueExternal(ProductDiscountValue):
def __init__(self):
super().__init__(type="external")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueExternal":
from ._schemas.product_discount import ProductDiscountValueExternalSchema
return ProductDiscountValueExternalSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueExternalSchema
return ProductDiscountValueExternalSchema().dump(self)
class ProductDiscountValueExternalDraft(ProductDiscountValueDraft):
def __init__(self):
super().__init__(type="external")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueExternalDraft":
from ._schemas.product_discount import ProductDiscountValueExternalDraftSchema
return ProductDiscountValueExternalDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueExternalDraftSchema
return ProductDiscountValueExternalDraftSchema().dump(self)
class ProductDiscountValueRelative(ProductDiscountValue):
permyriad: int
def __init__(self, *, permyriad: int):
self.permyriad = permyriad
super().__init__(type="relative")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueRelative":
from ._schemas.product_discount import ProductDiscountValueRelativeSchema
return ProductDiscountValueRelativeSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueRelativeSchema
return ProductDiscountValueRelativeSchema().dump(self)
class ProductDiscountValueRelativeDraft(ProductDiscountValueDraft):
permyriad: int
def __init__(self, *, permyriad: int):
self.permyriad = permyriad
super().__init__(type="relative")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountValueRelativeDraft":
from ._schemas.product_discount import ProductDiscountValueRelativeDraftSchema
return ProductDiscountValueRelativeDraftSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountValueRelativeDraftSchema
return ProductDiscountValueRelativeDraftSchema().dump(self)
class ProductDiscountChangeIsActiveAction(ProductDiscountUpdateAction):
is_active: bool
def __init__(self, *, is_active: bool):
self.is_active = is_active
super().__init__(action="changeIsActive")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountChangeIsActiveAction":
from ._schemas.product_discount import ProductDiscountChangeIsActiveActionSchema
return ProductDiscountChangeIsActiveActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountChangeIsActiveActionSchema
return ProductDiscountChangeIsActiveActionSchema().dump(self)
class ProductDiscountChangeNameAction(ProductDiscountUpdateAction):
name: "LocalizedString"
def __init__(self, *, name: "LocalizedString"):
self.name = name
super().__init__(action="changeName")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountChangeNameAction":
from ._schemas.product_discount import ProductDiscountChangeNameActionSchema
return ProductDiscountChangeNameActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountChangeNameActionSchema
return ProductDiscountChangeNameActionSchema().dump(self)
class ProductDiscountChangePredicateAction(ProductDiscountUpdateAction):
#: A valid ProductDiscount Predicate.
predicate: str
def __init__(self, *, predicate: str):
self.predicate = predicate
super().__init__(action="changePredicate")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountChangePredicateAction":
from ._schemas.product_discount import (
ProductDiscountChangePredicateActionSchema,
)
return ProductDiscountChangePredicateActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import (
ProductDiscountChangePredicateActionSchema,
)
return ProductDiscountChangePredicateActionSchema().dump(self)
class ProductDiscountChangeSortOrderAction(ProductDiscountUpdateAction):
#: The string must contain a number between 0 and 1.
#: A discount with greater sortOrder is prioritized higher than a discount with lower sortOrder.
sort_order: str
def __init__(self, *, sort_order: str):
self.sort_order = sort_order
super().__init__(action="changeSortOrder")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountChangeSortOrderAction":
from ._schemas.product_discount import (
ProductDiscountChangeSortOrderActionSchema,
)
return ProductDiscountChangeSortOrderActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import (
ProductDiscountChangeSortOrderActionSchema,
)
return ProductDiscountChangeSortOrderActionSchema().dump(self)
class ProductDiscountChangeValueAction(ProductDiscountUpdateAction):
value: "ProductDiscountValueDraft"
def __init__(self, *, value: "ProductDiscountValueDraft"):
self.value = value
super().__init__(action="changeValue")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountChangeValueAction":
from ._schemas.product_discount import ProductDiscountChangeValueActionSchema
return ProductDiscountChangeValueActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountChangeValueActionSchema
return ProductDiscountChangeValueActionSchema().dump(self)
class ProductDiscountSetDescriptionAction(ProductDiscountUpdateAction):
description: typing.Optional["LocalizedString"]
def __init__(self, *, description: typing.Optional["LocalizedString"] = None):
self.description = description
super().__init__(action="setDescription")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountSetDescriptionAction":
from ._schemas.product_discount import ProductDiscountSetDescriptionActionSchema
return ProductDiscountSetDescriptionActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountSetDescriptionActionSchema
return ProductDiscountSetDescriptionActionSchema().dump(self)
class ProductDiscountSetKeyAction(ProductDiscountUpdateAction):
#: The key to set.
#: If you provide a `null` value or do not set this field at all, the existing `key` field is removed.
key: typing.Optional[str]
def __init__(self, *, key: typing.Optional[str] = None):
self.key = key
super().__init__(action="setKey")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountSetKeyAction":
from ._schemas.product_discount import ProductDiscountSetKeyActionSchema
return ProductDiscountSetKeyActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountSetKeyActionSchema
return ProductDiscountSetKeyActionSchema().dump(self)
class ProductDiscountSetValidFromAction(ProductDiscountUpdateAction):
#: The time from which the discount should be effective.
#: Please take Eventual Consistency into account for calculated product discount values.
valid_from: typing.Optional[datetime.datetime]
def __init__(self, *, valid_from: typing.Optional[datetime.datetime] = None):
self.valid_from = valid_from
super().__init__(action="setValidFrom")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountSetValidFromAction":
from ._schemas.product_discount import ProductDiscountSetValidFromActionSchema
return ProductDiscountSetValidFromActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountSetValidFromActionSchema
return ProductDiscountSetValidFromActionSchema().dump(self)
class ProductDiscountSetValidFromAndUntilAction(ProductDiscountUpdateAction):
valid_from: typing.Optional[datetime.datetime]
#: The timeframe for which the discount should be effective.
#: Please take Eventual Consistency into account for calculated undiscounted values.
valid_until: typing.Optional[datetime.datetime]
def __init__(
self,
*,
valid_from: typing.Optional[datetime.datetime] = None,
valid_until: typing.Optional[datetime.datetime] = None
):
self.valid_from = valid_from
self.valid_until = valid_until
super().__init__(action="setValidFromAndUntil")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountSetValidFromAndUntilAction":
from ._schemas.product_discount import (
ProductDiscountSetValidFromAndUntilActionSchema,
)
return ProductDiscountSetValidFromAndUntilActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import (
ProductDiscountSetValidFromAndUntilActionSchema,
)
return ProductDiscountSetValidFromAndUntilActionSchema().dump(self)
class ProductDiscountSetValidUntilAction(ProductDiscountUpdateAction):
#: The time from which the discount should be ineffective.
#: Please take Eventual Consistency into account for calculated undiscounted values.
valid_until: typing.Optional[datetime.datetime]
def __init__(self, *, valid_until: typing.Optional[datetime.datetime] = None):
self.valid_until = valid_until
super().__init__(action="setValidUntil")
@classmethod
def deserialize(
cls, data: typing.Dict[str, typing.Any]
) -> "ProductDiscountSetValidUntilAction":
from ._schemas.product_discount import ProductDiscountSetValidUntilActionSchema
return ProductDiscountSetValidUntilActionSchema().load(data)
def serialize(self) -> typing.Dict[str, typing.Any]:
from ._schemas.product_discount import ProductDiscountSetValidUntilActionSchema
return ProductDiscountSetValidUntilActionSchema().dump(self)
| StarcoderdataPython |
4896705 | """The various views and routes for MapRoulette"""
from flask import render_template, redirect, session
from maproulette import app
from maproulette.helpers import signed_in
from maproulette.models import Challenge, Task, db
@app.route('/')
def index():
return render_template('index.html')
@app.route('/logout')
def logout():
if signed_in() or app.debug:
session.destroy()
return redirect('/')
| StarcoderdataPython |
6632977 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from unittest import TestCase
from sqlalchemy.future import create_engine
from sqlalchemy.orm import sessionmaker
from eventsourcing_sqlalchemy.datastore import SQLAlchemyDatastore
class TestDatastore(TestCase):
def test_should_be_created_with_url(self) -> None:
datastore = SQLAlchemyDatastore(url="sqlite:///:memory:")
self.assertIsInstance(datastore, SQLAlchemyDatastore)
def test_should_be_created_with_session_cls(self) -> None:
session_cls = sessionmaker(bind=create_engine(url="sqlite:///:memory:"))
datastore = SQLAlchemyDatastore(session_cls=session_cls)
self.assertIsInstance(datastore, SQLAlchemyDatastore)
def test_should_raise_exception_without_url_or_session_cls(self) -> None:
with self.assertRaises(EnvironmentError):
SQLAlchemyDatastore()
| StarcoderdataPython |
3226598 | # esi.py
import requests
import threading
import uuid
import webbrowser
from .server import StoppableHTTPServer, AuthHandler
from shortcircuit.model.logger import Logger
class ESI:
'''
ESI
We are bad boys here.
What should have been done is proxy auth server with code request, storage and all that stuff.
Instead we just follow implicit flow and ask to relogin every time.
From Russia with love.
'''
ENDPOINT_ESI_VERIFY = 'https://esi.evetech.net/verify'
ENDPOINT_ESI_LOCATION_FORMAT = 'https://esi.evetech.net/latest/characters/{}/location/'
ENDPOINT_ESI_UNIVERSE_NAMES = 'https://esi.evetech.net/latest/universe/names/'
ENDPOINT_ESI_UI_WAYPOINT = 'https://esi.evetech.net/latest/ui/autopilot/waypoint/'
ENDPOINT_EVE_AUTH_FORMAT = 'https://login.eveonline.com/oauth/authorize?response_type=token&redirect_uri={}&client_id={}&scope={}&state={}'
CLIENT_CALLBACK = 'http://127.0.0.1:7444/callback/'
CLIENT_ID = 'd802bba44b7c4f6cbfa2944b0e5ea83f'
CLIENT_SCOPES = [
'esi-location.read_location.v1',
'esi-ui.write_waypoint.v1',
]
def __init__(self, login_callback, logout_callback):
self.login_callback = login_callback
self.logout_callback = logout_callback
self.httpd = None
self.state = None
self.token = None
self.char_id = None
self.char_name = None
self.sso_timer = None
def start_server(self):
if not self.httpd:
# Server not running - restart it
Logger.debug('Starting server')
self.httpd = StoppableHTTPServer(
server_address=('127.0.0.1', 7444),
request_handler_class=AuthHandler,
timeout_callback=self.timeout_server
)
server_thread = threading.Thread(target=self.httpd.serve, args=(self.handle_login, ))
server_thread.setDaemon(True)
server_thread.start()
self.state = str(uuid.uuid4())
else:
# Server already running - reset timeout counter
self.httpd.tries = 0
scopes = ' '.join(ESI.CLIENT_SCOPES)
endpoint_auth = ESI.ENDPOINT_EVE_AUTH_FORMAT.format(ESI.CLIENT_CALLBACK, ESI.CLIENT_ID, scopes, self.state)
return webbrowser.open(endpoint_auth)
def timeout_server(self):
self.httpd = None
def stop_server(self):
Logger.debug('Stopping server')
if self.httpd:
self.httpd.stop()
self.httpd = None
def handle_login(self, message):
if not message:
return
if 'state' in message:
if message['state'][0] != self.state:
Logger.warning('OAUTH state mismatch')
return
if 'access_token' in message:
self.token = message['access_token'][0]
self.sso_timer = threading.Timer(int(message['expires_in'][0]), self._logout)
self.sso_timer.setDaemon(True)
self.sso_timer.start()
r = requests.get(ESI.ENDPOINT_ESI_VERIFY, headers=self._get_headers())
if r.status_code == requests.codes.ok:
data = r.json()
self.char_id = data['CharacterID']
self.char_name = data['CharacterName']
else:
self.token = None
self.sso_timer = None
self.char_id = None
self.char_name = None
self.login_callback(self.char_name)
self.stop_server()
def _get_headers(self):
return {
'User-Agent': 'Short Circuit (minimally maintained by @Second_Fry), <EMAIL>',
'Authorization': 'Bearer {}'.format(self.token),
}
def get_char_location(self):
if not self.token:
return None
current_location_name = None
current_location_id = None
r = requests.get(ESI.ENDPOINT_ESI_LOCATION_FORMAT.format(self.char_id), headers=self._get_headers())
if r.status_code == requests.codes.ok:
current_location_id = r.json()['solar_system_id']
r = requests.post(ESI.ENDPOINT_ESI_UNIVERSE_NAMES, json=[str(current_location_id)])
if r.status_code == requests.codes.ok:
current_location_name = r.json()[0]['name']
return current_location_name
def set_char_destination(self, sys_id):
if not self.token:
return False
success = False
r = requests.post('{}?add_to_beginning=false&clear_other_waypoints=true&destination_id={}'.format(ESI.ENDPOINT_ESI_UI_WAYPOINT, sys_id), headers=self._get_headers())
if r.status_code == 204:
success = True
return success
def logout(self):
if self.sso_timer:
self.sso_timer.cancel()
self._logout()
def _logout(self):
self.token = None
self.char_id = None
self.char_name = None
self.logout_callback()
def login_cb(char_name):
print('Welcome, {}'.format(char_name))
def logout_cb():
print('Session expired')
def main():
import code
implicit = True
client_id = ''
client_secret = ''
esi = ESI(login_cb, logout_cb)
print(esi.start_server())
gvars = globals().copy()
gvars.update(locals())
shell = code.InteractiveConsole(gvars)
shell.interact()
if __name__ == '__main__':
main()
| StarcoderdataPython |
239196 | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
x = read().rstrip().decode()[::-1]
p = 0
m = 0
for xx in x:
if xx == 'S' and m != 0:
m -= 1
p += 1
if xx == 'T':
m += 1
print(len(x) - 2 * p)
| StarcoderdataPython |
336640 | <filename>ts/model_service/__init__.py<gh_stars>1-10
"""
Model services code
"""
from . import model_service
| StarcoderdataPython |
11277755 | <gh_stars>10-100
from alphavantage._version import version_info, __version__
| StarcoderdataPython |
12810154 | <reponame>Lukeeeeee/FISDNN<filename>src/model/inputs/inputs.py
import tensorflow as tf
class Inputs(object):
def __init__(self, config):
self.input_dict = {}
for key, value in config.items():
if type(value) is list:
self.input_dict[key] = tf.placeholder(tf.float32, shape=[None] + value)
elif type(value) is int:
self.input_dict[key] = tf.placeholder(tf.float32, shape=[None, value])
else:
raise TypeError('does not support %s to init a input tensor' % str(type(value)))
pass
self.tensor_tuple = tuple(value for _, value in self.input_dict.items())
def generate_inputs_tuple(self, data_dict):
res = tuple(data_dict[str(key)] for key, _ in self.input_dict.items())
return res
def __call__(self, name=None):
if name is not None:
return self.input_dict[name]
else:
return self.input_dict
if __name__ == '__main__':
config = {
'IMAGE': [1, 2, 3],
'SPEED': [3],
'POS': [1]
}
a = Inputs(config)
print(a.tensor_tuple)
print(a.generate_inputs_tuple(data_dict={'IMAGE': 0,
'SPEED': 1,
'POS': 2}))
print(a('IMAGE'))
a = tf.Session()
| StarcoderdataPython |
3407998 | <reponame>aashiq075/PepeBot
"""Type `.df` or `.df <1-9>` reply to a photo or sticker
"""
from telethon.errors.rpcerrorlist import YouBlockedUserError
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern='df(:? |$)(.*)?'))
async def _(event):
await event.edit("`Destroying Image...`")
level = event.pattern_match.group(2)
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("`Reaply to an Image Nigga`")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("`I am not going to Destroy a Media :)`")
return
chat = "@image_deepfrybot"
await event.edit("```Final Nuking...```")
event.message.reply_to_msg_id
msg_reply = None
async with event.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/deepfry {level}"
msg_reply = await conv.send_message(
m,
reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
elif reply_message.gif:
m = "/deepfry"
msg_reply = await conv.send_message(
m,
reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
else:
response = await conv.get_response()
""" Don't spam notification """
await event.client.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await event.reply("`Please unblock` @image_deepfrybot`...`")
return
if response.text.startswith("Couldn't"):
await event.edit("`Send Image Plox`")
await event.client.delete_messages(
conv.chat_id,
[msg.id, response.id, r.id, msg_reply.id])
return
else:
await event.client.send_file(event.chat_id, response)
| StarcoderdataPython |
11249996 | <reponame>loafbaker/-django_ecommerce1-
"""
Django settings for django_ecommerce1 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_FROM_EMAIL = '<NAME> <<EMAIL>>'
# Email Utility
EMAIL_HOST = 'smtp.live.com' # 'smtp.sendgrid.net'
EMAIL_HOST_USER = '<EMAIL>' # change to your own email address
EMAIL_HOST_PASSWORD = '<PASSWORD>' # change to your own password
EMAIL_POT = 25 # default: 587
EMAIL_USE_TLS = True
# Site settings
if DEBUG:
SITE_URL = 'http://127.0.0.1:8000'
else:
SITE_URL = 'http://cfestore.com'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'products',
'carts',
'orders',
'accounts',
'marketing',
'localflavor',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'marketing.middleware.DisplayMarketing',
)
ROOT_URLCONF = 'django_ecommerce1.urls'
WSGI_APPLICATION = 'django_ecommerce1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'django_ecommerce1.sqlite'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MARKETING_HOURS_OFFSET = 3
MARKETING_MINUTES_OFFSET = 0
MARKETING_SECONDS_OFFSET = 0
DEFAULT_TAX_RATE = 0.08
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_files'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
STRIPE_SECRET_KEY = '<KEY>'
STRIPE_PUBLISHABLE_KEY = '<KEY>'
| StarcoderdataPython |
4876733 | import tempfile
import os
import subprocess
import shutil
import sys
import numpy
from ase import Atoms, Atom
def feff_edge_number(edge):
edge_map = {}
edge_map['k'] = 1
edge_map['l1'] = edge_map['li'] = 1
edge_map['l2'] = edge_map['lii'] = 3
edge_map['l3'] = edge_map['liii'] = 4
return edge_map[edge.lower()]
def load_chi_dat(filename):
f = open(filename)
chi_section = False
k = []
chi = []
for line in f:
line = line.strip()
if len(line) == 0:
continue
fields = line.split()
if fields[0] == "k" and fields[1] == "chi" and fields[2] == "mag":
chi_section = True
continue
if chi_section:
k.append(float(fields[0]))
chi.append(float(fields[1]))
return numpy.array(k), numpy.array(chi)
def load_feff_dat(filename):
xk = []
cdelta = []
afeff = []
phfeff = []
redfac = []
xlam = []
rep = []
atoms = Atoms()
atoms.set_pbc((False,False,False))
path_section = False
atoms_section = False
data_section = False
f = open(filename)
for line in f:
line = line.strip()
fields = line.split()
if "genfmt" in line:
path_section = True
continue
if fields[0] == "x" and fields[1] == "y" and fields[2] == "z":
atoms_section = True
path_section = False
continue
if fields[0] == "k" and fields[1] == "real[2*phc]":
data_section = True
atoms_section = False
continue
if path_section:
if "---------------" in line:
continue
reff = float(fields[2])
path_section = False
if atoms_section:
x = float(fields[0])
y = float(fields[1])
z = float(fields[2])
pot = int(fields[3])
atomic_number = int(fields[4])
atoms.append(Atom(symbol=atomic_number, position=(x,y,z),
tag=pot))
if data_section:
fields = [ float(f) for f in fields ]
xk.append(fields[0])
cdelta.append(fields[1])
afeff.append(fields[2])
phfeff.append(fields[3])
redfac.append(fields[4])
xlam.append(fields[5])
rep.append(fields[6])
xk = numpy.array(xk)
cdelta = numpy.array(cdelta)
afeff = numpy.array(afeff)
phfeff = numpy.array(phfeff)
redfac = numpy.array(redfac)
xlam = numpy.array(xlam)
rep = numpy.array(rep)
return {
"atoms":atoms,
"reff":reff,
"xk":xk,
"cdelta":cdelta,
"afeff":afeff,
"phfeff":phfeff,
"redfac":redfac,
"xlam":xlam,
"rep":rep,
}
def write_feff(filename, atoms, absorber, feff_options={}):
f = open(filename, "w")
f.write("TITLE %s\n" % str(atoms))
for key, value in feff_options.iteritems():
f.write("%s %s\n" % (key, value))
f.write("\nPOTENTIALS\n")
absorber_z = atoms[absorber].number
f.write("%i %i\n" % (0, absorber_z))
unique_z = list(set(atoms.get_atomic_numbers()))
pot_map = {}
i = 1
for z in unique_z:
nz = len( [ a for a in atoms if a.number == z ] )
if z == absorber_z and nz-1==0:
continue
f.write("%i %i\n" % (i, z))
pot_map[z] = i
i+=1
f.write("\nATOMS\n")
for i,atom in enumerate(atoms):
if i == absorber:
pot = 0
else:
pot = pot_map[atom.number]
f.write("%f %f %f %i\n" % (atom.x, atom.y, atom.z, pot))
def pbc(r, box, ibox = None):
"""
Applies periodic boundary conditions.
Parameters:
r: the vector the boundary conditions are applied to
box: the box that defines the boundary conditions
ibox: the inverse of the box. This will be calcluated if not provided.
"""
if ibox is None:
ibox = numpy.linalg.inv(box)
vdir = numpy.dot(r, ibox)
vdir = (vdir % 1.0 + 1.5) % 1.0 - 0.5
return numpy.dot(vdir, box)
def absorber_sphere(atoms, absorber, radius):
box = atoms.get_cell()
ibox = numpy.linalg.inv(box)
pos = atoms.get_positions()
elements = atoms.get_chemical_symbols()
atoms_sphere = [Atom(elements[absorber], (0.,0.,0.))]
for i in xrange(len(atoms)):
if i == absorber: continue
r = pbc(pos[i] - pos[absorber], box, ibox)
d = numpy.linalg.norm(r)
if d <= radius:
atoms_sphere.append(Atom(elements[i], r))
return Atoms(atoms_sphere)
def run_feff(atoms, absorber, feff_options={}, tmp_dir=None, get_path=False):
tmp_dir_path = tempfile.mkdtemp(prefix="tmp_feff_", dir=tmp_dir)
feff_inp_path = os.path.join(tmp_dir_path, "feff.inp")
if any(atoms.get_pbc()):
#pick out a sphere around the absorber atom, important for PBC to work with feff
#atom index 0 is now the absorber
atoms = absorber_sphere(atoms, absorber, radius=float(feff_options['RMAX'])+0.01)
absorber = 0
write_feff(feff_inp_path, atoms, absorber, feff_options)
try:
p = subprocess.Popen(["feff"], cwd=tmp_dir_path,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
from sys import exit, stderr
stderr.write('unable to locate feff executable in PATH\n')
exit(1)
retval = p.wait()
#function to deal with errors
def feff_error():
print 'Problem with feff calculation in %s' % tmp_dir_path
tmp_f = tempfile.NamedTemporaryFile(dir='.', prefix='feff.inp.')
print 'feff.inp saved to:', tmp_f.name
tmp_f.close()
write_feff(tmp_f.name, atoms, absorber, feff_options)
shutil.rmtree(tmp_dir_path)
if retval != 0:
feff_error()
return
stdout, stderr = p.communicate()
stderr = stderr.strip()
if stderr == "hash error":
atoms[absorber].set_position(atoms[absorber].get_position()+0.001)
sys.stderr.write("%s\n"%stderr)
return run_feff(atoms, absorber, feff_options, tmp_dir)
#check to see if we found any paths
for line in stdout.split('\n'):
line = line.strip()
if line.startswith('Paths found'):
npaths = int(line.split()[2])
if npaths == 0:
shutil.rmtree(tmp_dir_path)
if get_path:
return None, None, None
else:
return None, None
try:
k, chi = load_chi_dat(os.path.join(tmp_dir_path, "chi.dat"))
except IOError:
feff_error()
raise
if get_path:
path = load_feff_dat(os.path.join(tmp_dir_path, "feff0001.dat"))
shutil.rmtree(tmp_dir_path)
if get_path:
return k, chi, path
else:
return k, chi
| StarcoderdataPython |
3238476 | <gh_stars>0
from pyspark.sql import Row
import networkx as nx
from splink_graph.cluster_metrics import (
cluster_main_stats,
cluster_basic_stats,
cluster_eb_modularity,
cluster_lpg_modularity,
cluster_avg_edge_betweenness,
cluster_connectivity_stats,
number_of_bridges,
cluster_graph_hash,
)
import pytest
import pandas as pd
import pyspark.sql.functions as f
def test_cluster_basic_stats(spark):
# Create an Edge DataFrame with "src" and "dst" columns
data_list = [
{"src": "a", "dst": "b", "weight": 0.4, "cluster_id": 1},
{"src": "b", "dst": "c", "weight": 0.56, "cluster_id": 1},
{"src": "d", "dst": "e", "weight": 0.2, "cluster_id": 2},
{"src": "f", "dst": "e", "weight": 0.8, "cluster_id": 2},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_basic_stats(
e_df,
src="src",
dst="dst",
cluster_id_colname="cluster_id",
weight_colname="weight",
).toPandas()
assert (df_result["nodecount"] == 3).all()
assert (df_result["edgecount"] == 2).all()
assert df_result["density"].values == pytest.approx(0.666667, 0.01)
def test_cluster_main_stats(spark):
# Create an Edge DataFrame with "src" and "dst" columns
data_list = [
{"src": "a", "dst": "b", "distance": 0.4, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.56, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.2, "cluster_id": 2},
{"src": "f", "dst": "e", "distance": 0.8, "cluster_id": 2},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
e_df = e_df.withColumn("weight", 1.0 - f.col("distance"))
df_result = cluster_main_stats(e_df).toPandas()
assert df_result["diameter"][0] == 2
assert df_result["diameter"][1] == 2
assert df_result["transitivity"][0] == pytest.approx(0, 0.01)
assert df_result["transitivity"][1] == pytest.approx(0, 0.01)
def test_cluster_main_stats_customcolname(spark):
# Create an Edge DataFrame with "id_l" and "id_r" columns
data_list = [
{"id_l": "a", "id_r": "b", "distance": 0.4, "cluster_id": 1},
{"id_l": "b", "id_r": "c", "distance": 0.56, "cluster_id": 1},
{"id_l": "d", "id_r": "e", "distance": 0.2, "cluster_id": 2},
{"id_l": "f", "id_r": "e", "distance": 0.8, "cluster_id": 2},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
e_df = e_df.withColumn("weight", 1.0 - f.col("distance"))
df_result = cluster_main_stats(e_df, src="id_l", dst="id_r").toPandas()
assert df_result["diameter"][0] == 2
assert df_result["diameter"][1] == 2
def test_cluster_main_stats_customcolname2(spark):
# Create an Edge DataFrame with "id_l" and "id_r" columns
data_list = [
{"id_l": "a", "id_r": "b", "distance": 0.4, "estimated_id": 1},
{"id_l": "b", "id_r": "c", "distance": 0.56, "estimated_id": 1},
{"id_l": "d", "id_r": "e", "distance": 0.2, "estimated_id": 2},
{"id_l": "f", "id_r": "e", "distance": 0.8, "estimated_id": 2},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
e_df = e_df.withColumn("weight", 1.0 - f.col("distance"))
df_result = cluster_main_stats(
e_df, src="id_l", dst="id_r", cluster_id_colname="estimated_id"
).toPandas()
assert df_result["diameter"][0] == 2
assert df_result["diameter"][1] == 2
def test_cluster_eb_modularity_neg(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 2 nodes one edge
# when cut the nodes are singletons. modularity should be negative
data_list = [
{"src": "a", "dst": "b", "distance": 0.4, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_eb_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["cluster_eb_modularity"][0] < 0
def test_cluster_eb_modularity_0_dist(spark):
with pytest.raises(Exception):
# Create an Edge DataFrame with on "src" and "dst" column. so 7 nodes connected one after the other
# modularity should be relatively large here (>0.30) .all edges are bridges
data_list = [
{"src": "a", "dst": "b", "distance": 1.0, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.00, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 1.0, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 1.0, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 1.0, "cluster_id": 1},
{"src": "f", "dst": "g", "distance": 1.0, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_eb_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["cluster_eb_modularity"][0] > 0.30
def test_cluster_eb_modularity_pos_large(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 7 nodes connected one after the other
# modularity should be relatively large here (>0.30) .all edges are bridges
data_list = [
{"src": "a", "dst": "b", "distance": 0.8, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.86, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.8, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.8, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.8, "cluster_id": 1},
{"src": "f", "dst": "g", "distance": 0.8, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_eb_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["cluster_eb_modularity"][0] > 0.30
def test_cluster_eb_modularity_pos_small(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 6 nodes each connected to all others
# modularity should be quite small here
data_list = [
{"src": "a", "dst": "b", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.96, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "d", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "c", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "d", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "c", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "c", "dst": "f", "distance": 0.94, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_eb_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert abs(df_result["cluster_eb_modularity"][0] - 0.01) < 0.1
def test_cluster_avg_cluster_eb(spark):
# Create an Edge DataFrame with on "src" and "dst" column.
data_list = [
{"src": "a", "dst": "b", "distance": 0.8, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.86, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.8, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.8, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.8, "cluster_id": 1},
{"src": "f", "dst": "g", "distance": 0.8, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_avg_edge_betweenness(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["avg_cluster_eb"][0] > 0.4
def test_cluster_avg_cluster_eb_completegraph(spark):
# Create an Edge DataFrame with on "src" and "dst" column.
data_list = [
{"src": "a", "dst": "b", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.96, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "d", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "c", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "d", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "c", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "c", "dst": "f", "distance": 0.94, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_avg_edge_betweenness(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["avg_cluster_eb"][0] == pytest.approx(0.076)
def test_cluster_avg_cluster_eb_0_156_othergraph(spark):
# Create an Edge DataFrame with on "src" and "dst" column.
data_list = [
{"src": "a", "dst": "b", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.96, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "b", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "c", "dst": "f", "distance": 0.94, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_avg_edge_betweenness(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["avg_cluster_eb"][0] == pytest.approx(0.156)
def test_cluster_avg_cluster_eb_one_edge(spark):
# Create an Edge DataFrame with on "src" and "dst" column.
data_list = [
{"src": "a", "dst": "b", "distance": 0.8, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_avg_edge_betweenness(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["avg_cluster_eb"][0] == pytest.approx(1.00)
def test_cluster_lpg_modularity_zero(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 2 nodes one edge
# with lpg both belong to same cluster. modularity should be 0
data_list = [
{"src": "a", "dst": "b", "distance": 0.4, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_lpg_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["cluster_lpg_modularity"][0] == pytest.approx(0.0)
def test_cluster_lpg_modularity_pos_large(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 7 nodes connected one after the other
# modularity should be relatively large here (>0.30) .all edges are bridges
data_list = [
{"src": "a", "dst": "b", "distance": 0.8, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.86, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.8, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.8, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.8, "cluster_id": 1},
{"src": "f", "dst": "g", "distance": 0.8, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_lpg_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["cluster_lpg_modularity"][0] > 0.30
def test_cluster_lpg_modularity_pos_small(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 6 nodes each connected to all others
# modularity should be quite small here
data_list = [
{"src": "a", "dst": "b", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.96, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "d", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "c", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "d", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "c", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "c", "dst": "f", "distance": 0.94, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_lpg_modularity(
e_df,
src="src",
dst="dst",
distance_colname="distance",
cluster_id_colname="cluster_id",
).toPandas()
assert df_result["cluster_lpg_modularity"][0] == pytest.approx(0.0, 0.001)
def test_cluster_connectivity_stats_completegraph(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 6 nodes each connected to all others
# modularity should be quite small here
data_list = [
{"src": "a", "dst": "b", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.96, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "d", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "c", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "d", "distance": 0.92, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "d", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "a", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "a", "dst": "f", "distance": 0.94, "cluster_id": 1},
{"src": "b", "dst": "f", "distance": 0.92, "cluster_id": 1},
{"src": "c", "dst": "e", "distance": 0.98, "cluster_id": 1},
{"src": "c", "dst": "f", "distance": 0.94, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_connectivity_stats(
e_df, src="src", dst="dst", cluster_id_colname="cluster_id",
).toPandas()
assert df_result["node_conn"][0] == 4
assert df_result["edge_conn"][0] == 4
def test_cluster_connectivity_stats_linegraph(spark):
# Create an Edge DataFrame with on "src" and "dst" column. so 6 nodes each connected to all others
data_list = [
{"src": "a", "dst": "b", "distance": 0.8, "cluster_id": 1},
{"src": "b", "dst": "c", "distance": 0.86, "cluster_id": 1},
{"src": "c", "dst": "d", "distance": 0.8, "cluster_id": 1},
{"src": "d", "dst": "e", "distance": 0.8, "cluster_id": 1},
{"src": "e", "dst": "f", "distance": 0.8, "cluster_id": 1},
{"src": "f", "dst": "g", "distance": 0.8, "cluster_id": 1},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_connectivity_stats(
e_df, src="src", dst="dst", cluster_id_colname="cluster_id",
).toPandas()
assert df_result["node_conn"][0] == 1
assert df_result["edge_conn"][0] == 1
def test_number_of_bridges(spark):
# Create an Edge DataFrame with "src" and "dst" columns
e2_df = spark.createDataFrame(
[
("a", "b", 0.4, 1),
("b", "c", 0.56, 1),
("d", "e", 0.84, 2),
("e", "f", 0.65, 2),
("f", "d", 0.67, 2),
("f", "g", 0.34, 2),
("g", "h", 0.99, 2),
("h", "i", 0.5, 2),
("h", "j", 0.8, 2),
],
["src", "dst", "weight", "clus_id"],
)
e2_df = e2_df.withColumn("distance", 1.0 - f.col("weight"))
out = number_of_bridges(e2_df, cluster_id_colname="clus_id").toPandas()
filter_cluster_1 = out["cluster_id"] == 1
filter_cluster_2 = out["cluster_id"] == 2
num_bridges_cluster_1 = out[filter_cluster_1].iloc[0]["number_of_bridges"]
assert num_bridges_cluster_1 == 2
num_bridges_cluster_2 = out[filter_cluster_2].iloc[0]["number_of_bridges"]
assert num_bridges_cluster_2 == 4
def test_four_bridges(spark):
g = nx.barbell_graph(5, 3)
fourbridges = pd.DataFrame(list(g.edges), columns=["src", "dst"])
fourbridges["weight"] = 1.0
fourbridges["cluster_id"] = 1
# Create an Edge DataFrame with "src" and "dst" columns
e2_df = spark.createDataFrame(fourbridges, ["src", "dst", "weight", "cluster_id"],)
e2_df = e2_df.withColumn("distance", 1.0 - f.col("weight"))
result = number_of_bridges(e2_df, cluster_id_colname="cluster_id").toPandas()
assert result["number_of_bridges"][0] == 4
def test_0_bridges(spark):
g = nx.complete_graph(9)
zerobridges = pd.DataFrame(list(g.edges), columns=["src", "dst"])
zerobridges["weight"] = 1.0
zerobridges["cluster_id"] = 1
# Create an Edge DataFrame with "src" and "dst" columns
e2_df = spark.createDataFrame(zerobridges, ["src", "dst", "weight", "cluster_id"],)
e2_df = e2_df.withColumn("distance", 1.0 - f.col("weight"))
result = number_of_bridges(e2_df, cluster_id_colname="cluster_id").toPandas()
assert result["number_of_bridges"][0] == 0
def test_cluster_graph_hash(spark):
# Create an Edge DataFrame with "src" and "dst" columns
data_list = [
{"src": "a", "dst": "b", "weight": 0.4, "cluster_id": 1},
{"src": "b", "dst": "c", "weight": 0.56, "cluster_id": 1},
{"src": "d", "dst": "e", "weight": 0.2, "cluster_id": 2},
{"src": "f", "dst": "e", "weight": 0.8, "cluster_id": 2},
]
e_df = spark.createDataFrame(Row(**x) for x in data_list)
df_result = cluster_graph_hash(
e_df, src="src", dst="dst", cluster_id_colname="cluster_id",
).toPandas()
assert df_result["graphhash"][0] == "0f43d8cdd43b0b78727b192b6d6d0d0e"
| StarcoderdataPython |
216676 | <reponame>kmoskovtsev/HOOMD-Blue-fork
from hoomd import *
from hoomd import deprecated
from hoomd import hpmc
import unittest
import math
# this script needs to be run on two ranks
# initialize with one rank per partitions
context.initialize()
class muvt_updater_test(unittest.TestCase):
def setUp(self):
self.system = deprecated.init.create_random(N=1000,phi_p=0.001,min_dist=4.0,seed=12345)
def tearDown(self):
del self.muvt
del self.mc
del self.system
context.initialize()
def test_spheres(self):
self.mc = hpmc.integrate.sphere(seed=123)
self.mc.set_params(d=0.1)
self.mc.shape_param.set('A', diameter=1.0)
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
def test_convex_polyhedron(self):
self.mc = hpmc.integrate.convex_polyhedron(seed=10,max_verts=8);
self.mc.shape_param.set("A", vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
def test_sphere_union(self):
self.mc = hpmc.integrate.sphere_union(seed=10);
self.mc.shape_param.set("A", diameters=[1.0, 1.0], centers=[(-0.25, 0, 0), (0.25, 0, 0)]);
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
def test_polyhedron(self):
self.mc = hpmc.integrate.polyhedron(seed=10);
self.mc.shape_param.set('A', vertices=[(-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), \
(0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5)],\
faces = [(7, 3, 1, 5), (7, 5, 4, 6), (7, 6, 2, 3), (3, 2, 0, 1), (0, 2, 6, 4), (1, 0, 4, 5)]);
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
def test_faceted_sphere(self):
self.mc = hpmc.integrate.faceted_sphere(seed=10);
self.mc.shape_param.set("A", normals=[(-1,0,0),
(1,0,0),
(0,1,0,),
(0,-1,0),
(0,0,1),
(0,0,-1)],
offsets=[-1]*6,
vertices=[(-1,-1,-1),(-1,-1,1),(-1,1,-1),(-1,1,1),(1,-1,-1),(1,-1,1),(1,1,-1),(1,1,1)],
diameter=2,
origin=(0,0,0));
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
def test_spheropolyhedron(self):
self.mc = hpmc.integrate.convex_spheropolyhedron(seed=10);
self.mc.shape_param.set("A", vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
def test_ellipsoid(self):
self.mc = hpmc.integrate.ellipsoid(seed=10);
self.mc.shape_param.set('A', a=0.5, b=0.25, c=0.125);
self.muvt=hpmc.update.muvt(mc=self.mc,seed=456,transfer_types=['A'])
self.muvt.set_fugacity('A', 100)
run(100)
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| StarcoderdataPython |
8088014 | class color():
def __init__(self,r,g,b):
self.r = float(r)
self.g = float(g)
self.b = float(b)
def __str__(self):
return ("rgb(%d, %d, %d)" % (self.r, self.g, self.b))
def __repr__(self):
return ("rgb(%d, %d, %d)" % (self.r, self.g, self.b))
def __add__(self, other):
return color(self.r + other.r, self.g + other.g, self.b + other.b)
def __sub__(self, other):
return color(self.r - other.r, self.g - other.g, self.b - other.b)
def __mul__(self, other):
if isinstance(other, color):
return color(self.r * other.r, self.g * other.g, self.b * other.b)
else:
other = float(other)
return color(self.r * other, self.g * other, self.b * other)
def __truediv__(self, other):
if isinstance(other, color):
return color(self.r / other.r, self.g / other.g, self.b / other.b)
else:
other = float(other)
return color(self.r / other, self.g / other, self.b / other)
def lerp(v0, v1, t):
return v0 + (v1 - v0) * t
def generate_color_map(colors):
colors.append(colors[0])
result = [None] * 13
result[0] = colors[0]
result[12] = colors[0]
last_idx = 0
for i in xrange(1, len(colors)):
idx = int(float(i) / (len(colors) - 1) * 12)
for j in xrange(last_idx + 1, idx + 1):
result[j] = lerp(colors[i], colors[i - 1], (1.0 - (j - last_idx) / float(idx - last_idx)))
last_idx = idx
out = ""
for i in xrange(12):
out += ".month_%d {fill: %s;}\n" % (i + 1, result[i])
return out
| StarcoderdataPython |
391503 | <reponame>sgg10/CIFO
"""User models admin."""
# Django
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# Models
from cifo.users.models import User
@admin.register(User)
class UserAdmin(BaseUserAdmin):
list_display = (
'identification',
'email',
'name',
'operatorId',
'operatorName',
'is_verified'
)
search_fields = ('email', 'name', 'identification', 'operatorName')
list_filter = ('created', 'modified', 'is_verified') | StarcoderdataPython |
1854041 | <gh_stars>10-100
import os
import pytest
def pytest_addoption(parser):
parser.addoption(
"-E",
action="store",
metavar="NAME",
help="only run tests matching the environment NAME.",
)
parser.addoption(
"--with-tox",
default=False,
action="store_true"
)
return
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"markers", "env(name): mark test to run only on named environment"
)
def pytest_runtest_setup(item: pytest.Function):
config = item.config
with_tox = config.getoption("--with-tox")
if not with_tox:
return
envnames = [mark.args[0] for mark in item.iter_markers(name="env")]
optional = os.environ.get("OPTIONAL_PACKAGES_AVAILABLE")
if envnames:
if not optional:
pytest.skip("test requires env in {!r}".format(envnames))
return
for env in envnames:
if env not in optional:
pytest.skip("test requires env in {!r}".format(envnames))
return
| StarcoderdataPython |
6598514 | <filename>Python-code-snippets-001-100/054-Awesome GUI date picker.py
'''
54-Awesome GUI Date Picker
Modified (shambleized)from example code at:
https://github.com/j4321/tkcalendar#documentation
You may need to "pip install tkcalendar"
<NAME>, feb 16th 2019.
https://stevepython.wordpress.com
'''
import tkinter as tk
from tkinter import ttk
from tkcalendar import Calendar
ROOT = tk.Tk()
ROOT.withdraw()# hide naff extra window
ROOT.title('Please choose a date')
def pick_date_dialog():
'''Display GUI date picker dialog,
print date selected when OK clicked'''
def print_sel():
selected_date = (cal.get_date())
print(selected_date)
top = tk.Toplevel(ROOT)
#defaults to today's date
cal = Calendar(top,
font="Arial 10", background='darkblue',
foreground='white', selectmode='day')
cal.grid()
ttk.Button(top, text="OK", command=print_sel).grid()
pick_date_dialog()
ROOT.mainloop()
| StarcoderdataPython |
250246 | <reponame>MaxGSEO/Entity-Swissknife
import requests
import textrazor
from google.cloud import language_v1
class TextRazorAnalyzer:
def __init__(self, api_key):
""" Initializes TextRazorAnalyzer
Args:
api_key (str): The API key for TextRazor
"""
textrazor.api_key = api_key
self.client = textrazor.TextRazor(
extractors=["entities", "topics"],
)
self.client.set_classifiers(["textrazor_mediatopics"])
self.client.set_cleanup_return_cleaned(True)
def analyze(self, text, is_url):
""" Analyzes text with TextRazor
Args:
text (str): The text to analyze
is_url (bool): Whether the text is a URL
Returns:
response (TextRazorResponse): The response from TextRazor
"""
if is_url:
response = self.client.analyze_url(text)
else:
response = self.client.analyze(text)
return response
class GoogleNLPAnalyzer:
def __init__(self, key):
""" Initializes GoogleNLPAnalyzer
Args:
key (str): The API key for GoogleNLP
"""
self.client = language_v1.LanguageServiceClient.from_service_account_info(key)
def analyze(self, text, is_url):
""" Analyzes text with GoogleNLP
Args:
text (str): The text to analyze
is_url (bool): Whether the text is a URL
Returns:
response (GoogleNLPResponse): The response from GoogleNLP
"""
if is_url:
html = self.load_text_from_url(text)
if not html:
return None
document = language_v1.Document(
content=html,
type_=language_v1.Document.Type.HTML
)
response = self.client.analyze_entities(
document=document
)
else:
document = language_v1.Document(
content=text,
type_=language_v1.Document.Type.PLAIN_TEXT
)
response = self.client.analyze_entities(
document=document
)
return response
def load_text_from_url(self, url):
""" Loads text from a URL
Args:
url (str): The URL to load text from
Returns:
text (str): The text loaded from the URL
"""
timeout = 20
results = []
try:
headers = {'User-Agent': 'My User Agent 1.0'}
# print("Extracting text from: {}".format(url))
response = requests.get(url, headers=headers, timeout=timeout)
text = response.text
status = response.status_code
if status == 200 and len(text) > 0:
return text
return None
except Exception as e:
print(e)
print('Problem with url: {0}.'.format(url))
return None
| StarcoderdataPython |
77824 | <reponame>heinervdm/MysticMine
#!/usr/bin/env python
import random
import pygame
from koon.geo import Vec2D
import koon.geo as geo
from koon.gfx import SpriteFilm, Font, LoopAnimationTimer, PingPongTimer, Timer
from koon.res import resman
import pickups
import event
import tiles
class PickupView:
def __init__( self ):
self.pos = None
self.jump_pos = None
def get_z( self ):
if self.pos is None:
return -999
else:
return self.pos.y + 64
z = property( get_z )
def get_pos( self, frame ):
self.pos = None
if self.model.container is None or not hasattr( self.model.container, "views" ): return None
self.pos = self.model.container.views[0].get_pickup_pos( frame )
if self.model.jump_cnt is not None:
if self.jump_pos is None:
self.jump_pos = self.pos
x = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.x, self.pos.x )
y = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.y, self.pos.y )
height = self.model.jump_cnt
if self.model.jump_cnt > 0.5:
height = 1.0 - self.model.jump_cnt
self.pos = Vec2D( x, y - 30 * height)
else:
self.jump_pos = None
return self.pos
class TorchView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.torch_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class KeyView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.key_sprite")
self.animTimer = LoopAnimationTimer( 25, 0, 19 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class MirrorView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.mirror_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 9 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 10) )
class OilerView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.oiler_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class MultiplierView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
def draw( self, frame ):
if self.get_pos( frame ) is None: return
font = Font(size = 28, color = (255,0,0))
pos = self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET)
if self.model.goldcar is not None:
pos += Vec2D(0, 20)
font.draw("x2", frame.surface, pos.get_tuple(), Font.CENTER, Font.MIDDLE)
class BalloonView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.balloon_sprite")
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class GhostView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.ghost_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class CopperCoinView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.copper_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, self.sprite.max_x )
self.animTimer.set_frame( 0, random.randint(0,self.sprite.max_x-1) )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class GoldBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.gold_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class RockBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.rock_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET + 10) )
class DiamondView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.diamond_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class DynamiteView (PickupView):
class Sparkle:
def __init__( self, pos ):
self.pos = pos
self.life = 10 + int(random.random() * 2)
self.move = Vec2D( random.uniform( -2.5, 2.5 ), random.uniform( -2.5, 0.0 ) )
self.surf = resman.get("game.sparkle_surf")
width, height = self.surf.get_size()
self.center = Vec2D( width/2, height/2 )
def game_tick( self ):
self.life -= 1
self.pos += self.move
self.move.y += 0.1
def is_dead( self ):
return self.life <= 0
def draw( self, frame ):
pos = self.pos + self.center + Vec2D( frame.X_OFFSET, frame.Y_OFFSET )
self.surf.draw( frame.surface, pos )
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.dynamite_sprite").clone()
self.sprite_delta = 1
self.prev_life = 1.0
w, h = self.sprite.get_size()
self.sparkle_offset = Vec2D( 7, -h + 24 )
self.sparkle_line = Vec2D( 0, -22 )
self.sparkles = []
self.sparkle_timer = Timer( 25 )
def draw( self, frame ):
if self.get_pos(frame) is None: return
# no time... must implement... bad code...
if self.model.life < pickups.Dynamite.DEC * 18 and\
self.model.life != self.prev_life:
self.prev_life = self.model.life
self.sprite.nr += self.sprite_delta
if self.sprite.nr < 0:
self.sprite.nr = 0
self.sprite_delta = 1
elif self.sprite.nr >= 4:
self.sprite.nr = 3
self.sprite_delta = -1
event.Event.dynamite_tick()
while self.sparkle_timer.do_tick( frame.time_sec ):
self.sparkle_tick( frame )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D( frame.X_OFFSET, frame.Y_OFFSET ) )
for sparkle in self.sparkles:
sparkle.draw( frame )
def sparkle_tick( self, frame ):
if self.model.life > pickups.Dynamite.DEC * 18:
for i in range(3):
pos = self.get_pos(frame) + self.sparkle_offset + self.sparkle_line * self.model.life
self.sparkles.append( DynamiteView.Sparkle( pos ) )
new_sparkles = []
for sparkle in self.sparkles:
sparkle.game_tick()
if not sparkle.is_dead():
new_sparkles.append( sparkle )
self.sparkles = new_sparkles
class LampView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.lamp_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class AxeView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.axe_sprite").clone()
# FIXME: make it pingpong instead of loop
self.animTimer = PingPongTimer( 25, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class FlagView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.flag%d_sprite" % (model.goldcar.nr+1))
self.animTimer = LoopAnimationTimer( 20, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class LeprechaunView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.leprechaun_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
| StarcoderdataPython |
4956915 | <gh_stars>0
import numpy as np
import pandas as pd
import timely_beliefs as tb
from process_analytics.utils import data_utils
from process_analytics.utils import forecast_utils
def naive_forecaster(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
FREQ: str,
MAX_FORECAST_HORIZON_HOURS: int,
) -> tb.BeliefsDataFrame:
"""Forecaster: naive model."""
naive_forecaster = tb.BeliefSource("Naive forecaster")
forecast_input = current_bdf.xs(
current_time - pd.Timedelta(FREQ), level="event_start", drop_level=False
) # Latest measurement known at current time
# probably equivalent to the line above, but needs index to be sorted by event_start
# forecast_input = current_bdf.tail(1)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=np.repeat(
forecast_input["event_value"].values, MAX_FORECAST_HORIZON_HOURS
),
sensor=current_bdf.sensor,
forecaster=naive_forecaster,
current_time=current_time,
)
return forecast_bdf
| StarcoderdataPython |
6563366 | <gh_stars>1-10
#!/usr/bin/env python3 -B
import unittest
from cromulent import vocab
from tests import TestSalesPipelineOutput, classified_identifiers
vocab.add_attribute_assignment_check()
class PIRModelingTest_AR89(TestSalesPipelineOutput):
def test_modeling_ar89(self):
'''
AR-89: Naming of groups representing an uncertain member
'''
output = self.run_pipeline('ar89')
groups = output['model-groups']
group = groups['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#PROV,B-183,1810-10-26,0050-SellerGroup']
self.assertIn('identified_by', group)
group_names = group['identified_by']
self.assertEqual(len(group_names), 1)
group_name = group_names[0]
self.assertEqual(group_name['content'], 'Molo, Pierre-Louis de OR Blaere, <NAME>')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
224361 | import http
from openbrokerapi.service_broker import LastOperation, OperationState
from tests import BrokerTestCase
class LastBindingOperationTest(BrokerTestCase):
def setUp(self):
self.broker.service_id.return_value = 'service-guid-here'
def test_last_operation_called_just_with_required_fields(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation',
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id', None, None, None)
def test_last_operation_called_with_operation_data(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
query = 'service_id=&plan_id=456&operation=service-guid-here%20operation-data'
self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation?%s' % query,
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id',
'service-guid-here operation-data', "", "456")
def test_returns_200_with_given_state(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
query = 'service_id=123&plan_id=456&operation=service-guid-here%20operation-data'
response = self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation?%s' % query,
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id',
'service-guid-here operation-data', "123", "456")
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, dict(
state=OperationState.IN_PROGRESS.value,
description='Running...'
))
| StarcoderdataPython |
27744 | from ..utils.util import ObnizUtil
class ObnizMeasure:
def __init__(self, obniz):
self.obniz = obniz
self._reset()
def _reset(self):
self.observers = []
def echo(self, params):
err = ObnizUtil._required_keys(
params, ["io_pulse", "pulse", "pulse_width", "io_echo", "measure_edges"]
)
if err:
raise Exception(
"Measure start param '" + err + "' required, but not found "
)
self.params = ObnizUtil._key_filter(
params,
[
"io_pulse",
"pulse",
"pulse_width",
"io_echo",
"measure_edges",
"timeout",
"callback",
],
)
echo = {}
echo["io_pulse"] = self.params["io_pulse"]
echo["pulse"] = self.params["pulse"]
echo["pulse_width"] = self.params["pulse_width"]
echo["io_echo"] = self.params["io_echo"]
echo["measure_edges"] = self.params["measure_edges"]
if type(self.params.get("timeout")) is int:
echo["timeout"] = self.params["timeout"]
self.obniz.send({"measure": {"echo": echo}})
if "callback" in self.params:
self.observers.append(self.params["callback"])
def notified(self, obj):
if len(self.observers):
callback = self.observers.pop(0)
callback(obj["echo"])
| StarcoderdataPython |
4880237 | <gh_stars>1-10
from django import forms
class SamlRequestForm(forms.Form):
next = forms.CharField(widget=forms.HiddenInput)
| StarcoderdataPython |
204777 | """
Created on Tue May 14, 2019
@author: <NAME>
"""
import tensorflow as tf
import gc
from parameters import *
from process_data_text import *
from process_data_audio import *
from process_data_multimodal import *
from model_text import *
from model_multimodal_attention import *
from evaluate_multimodal_attention import *
if __name__ == '__main__':
# instantiating all of the data handlers
text_data_handler = ProcessDataText(data_path)
audio_data_handler = ProcessDataAudio(data_path)
multi_data_handler = ProcessDataMultimodal(data_path, text_data_handler, audio_data_handler)
del audio_data_handler
gc.collect()
# splitting the data int training, validation and test sets
train_text_data, train_audio_data, train_labels, test_text_data, test_audio_data, test_labels, val_text_data, \
val_audio_data, val_labels = multi_data_handler.split_train_test(prop_train=0.8, prop_test=0.05)
# converting the labels to the one-hot format
train_labels = text_data_handler.label_one_hot(label=train_labels, num_categories=num_categories)
test_labels = text_data_handler.label_one_hot(label=test_labels, num_categories=num_categories)
val_labels = text_data_handler.label_one_hot(label=val_labels, num_categories=num_categories)
# creating the text datasets
text_placeholder = tf.placeholder(tf.int32, shape=[None, train_text_data.shape[1]], name='text_input_placeholder')
audio_placeholder = tf.placeholder(tf.float32, shape=[None, train_audio_data.shape[1]],
name='audio_input_placeholder')
label_placeholder = tf.placeholder(tf.float32, shape=[None, num_categories], name='labels_placeholder')
train_iterator, test_iterator, val_iterator, text_input, audio_input, label_batch, handle = \
multi_data_handler.create_datasets(text_placeholder, audio_placeholder, label_placeholder,
tf.cast(test_text_data, dtype=tf.int32),
tf.cast(test_audio_data, dtype=tf.float32),
tf.cast(test_labels, dtype=tf.float32),
tf.cast(val_text_data, dtype=tf.int32),
tf.cast(val_audio_data, dtype=tf.float32),
tf.cast(val_labels, dtype=tf.float32), batch_size, num_epochs)
del multi_data_handler
gc.collect()
# creating the multimodal model with attention
multimodal_model = MultimodalAttentionModel(text_input, label_batch, batch_size, num_categories, learning_rate,
text_data_handler.dict_size, hidden_dim_text, num_layers_text,
dr_prob_text, multimodal_model_status, audio_input, num_filters_audio,
filter_lengths_audio, n_pool_audio, train_audio_data.shape[1],
dr_prob_audio, hidden_dim_audio, num_layers_audio)
multimodal_model.build_graph()
# evaluation object
evaluator = EvaluateMultimodalAttention()
# training the model
with tf.Session() as sess:
# initializing the global variables
sess.run(tf.global_variables_initializer())
# writing the graph
writer_train = tf.summary.FileWriter('../graphs/graph_train', sess.graph)
writer_val = tf.summary.FileWriter('../graphs/graph_val', sess.graph)
# training loop
print("Training...")
# initializing iterator with the training data
sess.run(train_iterator.initializer, feed_dict={text_placeholder: train_text_data,
audio_placeholder: train_audio_data,
label_placeholder: train_labels})
# creating the text training, testing and validation handles (to switch between datasets)
train_handle = sess.run(train_iterator.string_handle())
test_handle = sess.run(test_iterator.string_handle())
val_handle = sess.run(val_iterator.string_handle())
# loading pre-trained embedding vector to placeholder
sess.run(multimodal_model.embedding_init, feed_dict={multimodal_model.embedding_GloVe:
text_data_handler.get_glove()})
batch_count = 1
# keeping track of the best test and validation accuracies
best_train_accuracy = 0
best_val_accuracy = 0
del text_data_handler
gc.collect()
# feeding the batches to the model
while True:
try:
_, accuracy, loss, summary = sess.run(
[multimodal_model.optimizer, multimodal_model.accuracy, multimodal_model.loss,
multimodal_model.summary_op],
feed_dict={handle: train_handle})
writer_train.add_summary(summary, global_step=multimodal_model.global_step.eval())
print('Batch: ' + str(batch_count) + ' Loss: {:.4f}'.format(loss) +
' Training accuracy: {:.4f}'.format(accuracy))
# saving the best training accuracy so far
if accuracy > best_train_accuracy:
best_train_accuracy = accuracy
batch_count += 1
# evaluating on the validation set every 50 batches
if batch_count % 150 == 0:
# calculating the accuracy on the validation set
val_accuracy = evaluator.evaluate_multi_model_val(sess, multimodal_model, val_iterator, handle,
val_handle, writer_val)
# saving the best training accuracy so far
if val_accuracy > best_val_accuracy:
best_val_accuracy = val_accuracy
except tf.errors.OutOfRangeError:
print('End of training')
print('Best training accuracy: {:.4f}'.format(best_train_accuracy))
print('Best validation accuracy: {:.4f}'.format(best_val_accuracy))
# evaluating on the test set
test_accuracy = evaluator.evaluate_multi_model_test(sess, multimodal_model, test_iterator, handle,
test_handle)
break
# saving the final text model
saver = tf.train.Saver()
saver.save(sess, '../pretrained-models/pt_multimodal_model') | StarcoderdataPython |
1961768 | <reponame>Ali-Nawed/RLProjects
from simulation_code.simulation_logic import *
if __name__ == '__main__':
runAndSaveSimulation()
| StarcoderdataPython |
12803207 | from forecaster.mediate.telegram import TelegramMediator
from forecaster.utils import get_conf
from raven import Client
def test_validate_tokens():
"""validate tokens"""
config = get_conf()
telegram = config['TOKENS']['telegram']
sentry = config['TOKENS']['sentry']
bot = TelegramMediator(telegram, None)
bot.bot.getMe()
Client(dsn=sentry)
| StarcoderdataPython |
1798126 | # -*- coding: utf-8 -*-
from .xmlquery import XMLQuery
class Parser(object):
def __init__(self):
pass
def load(self, filename):
pass
def run(self):
pass
def parse(self, filename):
self.load(filename)
return self.run()
class XMLParser(Parser):
def __init__(self):
super(XMLParser, self).__init__()
self._xmlQuery = XMLQuery()
def load(self, filename):
self._xmlQuery.open(filename)
def run(self):
return self._xmlQuery.toDict() | StarcoderdataPython |
154512 | <gh_stars>1-10
# coding: utf-8
# 学習したSSDモデルにinput_xを追加して、./model/以下に保存する
import os
import tensorflow as tf
slim = tf.contrib.slim
import sys
sys.path.append('/home/ubuntu/notebooks/github/SSD-Tensorflow/')
sys.path.append('../')
from nets import ssd_vgg_300
from preprocessing import ssd_vgg_preprocessing
from lib.ssd_params import *
MODEL_DIR=os.path.abspath(os.path.dirname(__file__))+"/../output"
OUTPUT_MODEL_DIR=os.path.abspath(os.path.dirname(__file__))+"/../model"
# TensorFlow session: grow memory when needed. TF, DO NOT USE ALL MY GPU MEMORY!!!
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)
# Input placeholder.
net_shape = (300, 300)
data_format = 'NHWC'
img_input = tf.placeholder(tf.int32, shape=(None, None, 3),name='input_x')
# Evaluation pre-processing: resize to SSD net shape.
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# Define the SSD model.
reuse = True if 'ssd_net' in locals() else None
ssd_net = ssd_vgg_300.SSDNet(params=ssd_params)
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
predictions, localizations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
# Restore SSD model.
checkpoint = tf.train.get_checkpoint_state(MODEL_DIR)
if checkpoint:
# checkpointファイルから最後に保存したモデルへのパスを取得する
ckpt_filename = checkpoint.model_checkpoint_path
print("load {0}".format(ckpt_filename))
else:
#ckpt_filename = '../checkpoints/ssd_300_vgg.ckpt'
#ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
ckpt_filename = os.path.abspath(os.path.dirname(__file__))+'/../output/model.ckpt-7352'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
def print_graph_operations(graph):
# print operations
print("----- operations in graph -----")
for op in graph.get_operations():
print("{} {}".format(op.name,op.outputs))
def print_graph_nodes(graph_def):
# print nodes
print("----- nodes in graph_def -----")
for node in graph_def.node:
print(node)
# graphを出力する
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
# print operations
print_graph_operations(graph)
saver.save(isess, OUTPUT_MODEL_DIR + '/model.ckpt')
| StarcoderdataPython |
11281927 | import pandas as pd
import numpy as np
import os
import csv
from tqdm import tqdm
import argparse
from glob import glob
import faiss
from multiprocessing import Pool, cpu_count
from math import ceil
def train_embedding_to_gpt2_data(
data_path='qa_embeddings/bertffn_crossentropy.pkl',
output_path='gpt2_train_data/bertffn_crossentropy.csv',
number_samples=10,
batch_size=512
):
# qa = pd.read_hdf(args.data_path, key='qa_embedding')
qa = pd.read_pickle(data_path)
# with Pool(cpu_count()) as p:
# question_bert = p.map(eval, qa["Q_FFNN_embeds"].tolist())
# answer_bert = p.map(eval, qa["A_FFNN_embeds"].tolist())
question_bert = qa["Q_FFNN_embeds"].tolist()
answer_bert = qa["A_FFNN_embeds"].tolist()
question_bert = np.array(question_bert)
answer_bert = np.array(answer_bert)
question_bert = question_bert.astype('float32')
answer_bert = answer_bert.astype('float32')
answer_index = faiss.IndexFlatIP(answer_bert.shape[-1])
question_index = faiss.IndexFlatIP(question_bert.shape[-1])
faiss.normalize_L2(question_bert)
faiss.normalize_L2(answer_bert)
answer_index.add(answer_bert)
question_index.add(question_bert)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
output = open(output_path, "w")
writer = csv.writer(output)
firstrow = ['question', 'answer']
for ii in range(0, number_samples):
firstrow.append('question'+str(ii))
firstrow.append('answer'+str(ii))
writer.writerow(firstrow)
def topKforGPT2(start_ind, end_ind, topk):
D1, I1 = answer_index.search(
question_bert[start_ind:end_ind].astype('float32'), topk)
D2, I2 = question_index.search(
question_bert[start_ind:end_ind].astype('float32'), topk)
return I1, I2
steps = ceil(qa.shape[0] / batch_size)
# for k in tqdm(range(1000), mininterval=30, maxinterval=60):
for k in tqdm(range(0, qa.shape[0], batch_size), total=steps):
start_ind = k
end_ind = k+batch_size
a_batch_index, q_batch_index = topKforGPT2(
start_ind, end_ind, int(number_samples/2))
for a_index, q_index in zip(a_batch_index, q_batch_index):
rowfill = []
rowfill.append(qa["question"].iloc[k])
rowfill.append(qa["answer"].iloc[k])
aaa = qa.iloc[list(a_index), :]
qqq = qa.iloc[list(q_index), :]
aaaa = [*sum(zip(list(aaa['question']), list(aaa['answer'])), ())]
qqqq = [*sum(zip(list(qqq['question']), list(qqq['answer'])), ())]
finalfill = aaaa+qqqq
rowfill = rowfill + finalfill
writer.writerow(rowfill)
output.close()
if __name__ == "__main__":
train_embedding_to_gpt2_data()
| StarcoderdataPython |
3222759 | <filename>qualifier/qualifier.py<gh_stars>0
from typing import Any, List, Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
T = []
Li = []
Lb = []
C = []
B = []
x = 0
rows_length = []
for n in rows[0]:
rows_items = []
p = 0
for items in rows:
p = p+1
rows_items.append(str(items[x]))
longest_string = max(rows_items, key=len)
rows_length.append(longest_string)
x = x+1
if labels != None:
y = 0
for n in labels:
if len(str(n)) > len(rows_length[y]):
rows_length[y] = n
y = y+1
mal = []
for n in rows_length:
mal.append(len(str(n)))
for maxL in mal[0:(len(mal)-1)]:
t = "─"+"─"*maxL+"─"+"┬"
l = "─"+"─"*maxL+"─"+"┼"
b = "─"+"─"*maxL+"─"+"┴"
T.append(t)
B.append(b)
if labels!= None and len(labels) > 0:
Lb.append(l)
for maxL in mal[(len(mal)-1):]:
t = "─"+"─"*maxL+"─"
l = "─"+"─"*maxL+"─"
b = "─"+"─"*maxL+"─"
T.append(t)
B.append(b)
if labels != None and len(labels) > 0:
Lb.append(l)
if labels != None and len(labels)>0 and centered:
p = 0
for n in labels:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n==labels[-1]:
Li.append(crc)
else:
Li.append(cc)
p = p+1
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n == rows[r][-1]:
C.append(crc)
else:
C.append(cc)
p = p+1
r = r+1
elif labels!=None and len(labels)>0:
p = 0
for n in labels:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n==labels[-1]:
Li.append(cr)
else:
Li.append(c)
p = p+1
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n == rows[r][-1]:
C.append(cr)
else:
C.append(c)
p = p+1
r = r+1
elif centered:
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n == rows[r][-1]:
C.append(crc)
else:
C.append(cc)
p = p+1
r = r+1
else:
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n == rows[r][-1]:
C.append(cr)
else:
C.append(c)
p=p+1
r = r+1
if labels == None:
return "┌" +"".join(T) + "┐"+"\n"+"".join(C)+"└"+"".join(B) + "┘"
else:
return "┌" +"".join(T) + "┐"+"\n" + "".join(Li)+"├"+"".join(Lb)+"┤"+"\n"+"".join(C)+"└"+"".join(B) + "┘"
| StarcoderdataPython |
3465802 | <gh_stars>1-10
from django.contrib import admin
from .models import Squad_Article,Newsletter
# Register your models here.
class Squad_Article_Admin(admin.ModelAdmin):
list_display=["author","role","server","min_rank","min_kd","publish",]
admin.site.register(Squad_Article,Squad_Article_Admin)
admin.site.register(Newsletter) | StarcoderdataPython |
3438453 | <reponame>zimolzak/wav-in-python
import numpy as np
import matplotlib.pyplot as plt
import wave # so we can refer to its classes in type hint annotations
from scipy import signal
from typing import Generator
import collections
from printing import pretty_hex_string, ints2dots
def bytes2int_list(byte_list: bytes) -> Generator[int, None, None]:
"""Input a 'bytes' object. Add pairs of bytes together & yield generator of ints.
:param byte_list: bytes object, like b'#\xff^\xff', usually right out of readframes()
:return: Yield decoded values (integers 0 to 65535).
"""
# fixme - there may be a pre-made "decode" way to do this.
for n, b in enumerate(byte_list):
if n % 2 == 0:
continue
else:
# yield 256 * byte_list[n - 1] + byte_list[n] # the other endian
raw_int = 256 * byte_list[n] + byte_list[n - 1]
midpoint = 2 ** 15
if raw_int >= midpoint:
scaled_int = raw_int - midpoint
else:
scaled_int = raw_int + midpoint
yield scaled_int
# indexing or list() on a 'bytes' obj auto-converts to 'int'
def run_length_to_bitstream(rl: np.ndarray, values: np.ndarray, v_high: int, v_low: int) -> np.ndarray:
"""Do run length DECODING and map low/high signal to logic 0/1.
Supposed to leave middle values untouched.
[1,2,1,1,1] [7,1,7,1,5] -->
[1 0 0 1 0 5]
:param rl: Array of run lengths
:param values: Array of corresponding values (positive ints)
:param v_high: Value that will be mapped to 1
:param v_low: Value that will be mapped to 0
:return: Array of hopefully only {0,1} with runs re-expanded.
:raises: ValueError if rl not exactly same size as values.
"""
rl = np.asarray(rl) # so that technically it works on lists
values = np.asarray(values)
if rl.shape != values.shape:
raise ValueError("rl and values shapes unequal: %s %s" % (str(rl.shape), str(values.shape)))
high_shifts = np.where(values == v_high, 1 - v_high, 0)
low_shifts = np.where(values == v_low, 0 - v_low, 0)
values_edited = values + high_shifts + low_shifts
# fixme exception (or warn?) if values not in the set {v_high, v_low}
return np.repeat(values_edited, rl) # requires ints in rl, not floats
def square_up(a: np.ndarray, v_high: int, v_low: int, tolerance: int = 1) -> np.ndarray:
"""Take all elements close to v_high, and nudge them equal to v_high. Same for v_low.
Makes a nearly square wave into a very square wave.
Supposed to leave middle ones untouched.
[1 1 1 1 2 7 7 7 7 6 7 7 7 5 ] -->
1 1 1 1 1 7 7 7 7 7 7 7 7 5
:param a: Array of values (usually time series)
:param v_high: High value to nudge to
:param v_low: Low value to nudge to
:param tolerance: How much are you allowed to nudge?
:return: Array of squared-up values
:raises: ValueError: if intervals overlap
"""
if min(v_high + tolerance, v_low + tolerance) >= max(v_high - tolerance, v_low - tolerance):
raise ValueError("Nudging intervals overlap: %f and %f +/- %f" % (v_low, v_high, tolerance))
is_high = abs(a - v_high) <= tolerance
is_low = abs(a - v_low) <= tolerance
fixed1 = np.where(is_high, v_high, a)
return np.where(is_low, v_low, fixed1)
def rle(a: np.ndarray) -> tuple:
"""Perform run-length encoding
:param a: Array of arbitrary numbers, presumably with some repetition.
:return: Array of run lengths, and array of numbers corresponding to those runs.
"""
# https://newbedev.com/find-length-of-sequences-of-identical-values-in-a-numpy-array-run-length-encoding
ia = np.asarray(a)
n = len(ia)
if n == 0:
return None, None
else:
there_is_transition = ia[1:] != ia[:-1] # pairwise unequal (string safe)
transition_locations = np.append(np.where(there_is_transition), n - 1) # must include last element pos
run_lengths = np.diff(np.append(-1, transition_locations))
# p = np.cumsum(np.append(0, run_lengths))[:-1] # positions
return run_lengths, ia[transition_locations]
class WaveData:
"""Wrap a Wave_read object with awareness of baud and its sample values."""
def __init__(self, wav_file: wave.Wave_read,
start_sample: int = 0, n_symbols_to_read: int = 750, baud: int = 50) -> None:
"""Decode a portion of an open WAV file to bytes and integer samples.
Example:
W = WaveData(fh)
W.int_list -> [32547, 32606, 32964, 33108, ...]
:param wav_file: Object opened by wave.open() but not yet read
:param start_sample: Where in the file to start reading
:param n_symbols_to_read: How many FSK symbols to read. `None` to read whole file.
:param baud: Rate of FSK symbols per second
"""
self.wav_file = wav_file
self.baud = baud
# Derived and calculated vars
self.sample_rate = wav_file.getframerate()
self.bytes_per_sample = wav_file.getsampwidth()
self.samples_per_symbol = self.sample_rate / baud
if n_symbols_to_read is not None:
n_samples_to_read = int(self.samples_per_symbol * n_symbols_to_read)
else:
n_samples_to_read = wav_file.getnframes()
# Read from file
wav_file.setpos(start_sample)
self.wav_bytes = wav_file.readframes(n_samples_to_read) # important op, maybe catch exceptions?
# Usual results
self.n_samples_actually_read = len(self.wav_bytes) / self.bytes_per_sample
self.n_symbols_actually_read = self.n_samples_actually_read / self.sample_rate * baud
self.int_list = list(bytes2int_list(self.wav_bytes))
def print_summary(self, n_samples_to_plot: int = 15) -> None:
"""Show reasonable data and metadata from a WAV file, in plain text.
:param n_samples_to_plot: How many WAV samples to display (as numbers and a text graph)
"""
char_per_byte = 2 # That means hex chars. 1 B = 2 hex digits '01' or '0F' etc.
n_bytes_to_plot = n_samples_to_plot * self.bytes_per_sample
# objects for printing
pretty_hex_list = list(pretty_hex_string(self.wav_bytes.hex()))
dot_list = list(ints2dots(self.int_list))
print("\n\n# WAV file information\n")
print("Params:\n", self.wav_file.getparams())
print()
print("File duration (s) =", self.wav_file.getnframes() / self.sample_rate)
print("Samples / FSK symbol =", self.samples_per_symbol)
print("Bytes in %f FSK symbols =" % self.n_symbols_actually_read, len(self.wav_bytes))
print("Seconds read =", self.n_samples_actually_read / self.sample_rate)
print()
print("First %i bytes (%i samples):" % (n_bytes_to_plot, n_samples_to_plot))
print(self.wav_bytes[:n_bytes_to_plot])
print()
print(''.join(pretty_hex_list[:n_bytes_to_plot * char_per_byte])) # pretty hex list
print()
print(self.int_list[:n_samples_to_plot]) # int list
print()
print('\n'.join(dot_list[:n_samples_to_plot])) # dot list
class Fourier:
def __init__(self, wave_data: WaveData, seg_per_symbol: int = 3) -> None:
"""Represent results of short-time Fourier transform applied to WAV audio, including spectrogram of max
intensity frequency over time. Converts high-resolution sample time series to medium-resolution frequency
time-series.
Example:
F = Fourier(W)
F.max_freq_indices -> [1 1 7 6 7 7 7 7 1 1]
...where "1" means 600 Hz, and "7" means 1500 Hz.
:param wave_data: Object containing list of WAV numeric samples to be processed.
:param seg_per_symbol: How many FT segments are calculated for each FSK symbol.
"""
self.n_symbols_actually_read = wave_data.n_symbols_actually_read
samples_per_symbol = wave_data.sample_rate / wave_data.baud
self.f, self.t, self.Zxx = signal.stft(wave_data.int_list, fs=wave_data.sample_rate,
nperseg=int(samples_per_symbol / seg_per_symbol)) # important
# Zxx's first axis is freq, second is times
self.max_freq_indices = self.Zxx.argmax(0) # Main output: vector of which freq band is most intense, per time
# fixme - it is possible I don't understand the "nperseg" parameter.
def apply_passband(self, lo_freq: float = 400, hi_freq: float = 2000) -> None:
"""Retain only certain rows (frequencies) in the FT and other result matrices/vectors.
:param lo_freq: Lower cutoff frequency (below this will be blocked)
:param hi_freq: Higher cutoff frequency
"""
selected_indices = ((lo_freq < self.f) * (self.f < hi_freq))
self.f = self.f[selected_indices]
self.Zxx = np.abs(self.Zxx[selected_indices])
self.max_freq_indices = self.Zxx.argmax(0)
def print_summary(self):
"""Show data/metadata on STFT results."""
print("\n\n# Fourier analysis of FSK\n")
print("Zxx (FFT result) shape, frequencies * time points:", self.Zxx.shape)
print("FFT frequencies in pass band:", self.f)
print("\nFrequency bin values over time:")
print(self.max_freq_indices)
def save_plot(self, filename: str) -> None:
"""Render a spectrogram of the complete STFT of WAV data.
:param filename: Name of the image file where the plot will be saved
"""
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html
z_max = np.max(self.Zxx) # global max just used for plot scale
plt.pcolormesh(self.t, self.f, self.Zxx, vmin=0, vmax=z_max, shading='gouraud')
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.savefig(filename)
# plt.show()
# By spec: FSK shift of 850 Hz. Mine by inspection is about 581 Hz and 1431 Hz
# one symbol is about 450 - 470 samples by inspection
# calculated at 441 samples/symbol
# 11.62 cycles in a low freq symbol, 28.62 in high freq.
class Bitstream:
def __init__(self, fourier: Fourier) -> None:
"""Convert the medium-resolution frequency time series to low resolution bitstream (FSK symbol time series).
Often input in fourier.max_freq_indices is like this:
array([0, 7, 7, 7, 7, 7, 6, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 6, 1, 1, 1, 1, 1])
B = Bitstream(F)
B.stream -> [1, 0, 1, 0]
:param fourier: Object containing array of max intensity frequency over time.
"""
# elements (segments) per symbol is a critical param.
# In theory, could try to auto-set from histogram(rl).
# Now we auto-set by knowing N symbols read.
# Could also pass this in from knowledge of FFT setup (but it was 2x as much, overlap?).
self.n_symbols_actually_read = fourier.n_symbols_actually_read
self.max_freq_indices = fourier.max_freq_indices # Need to save these to print later.
self.calculated_seg_per_symbol = len(self.max_freq_indices) / self.n_symbols_actually_read
# Infer that the 2 most prevalent frequencies are mark and space
h = np.histogram(self.max_freq_indices, bins=np.arange(15)) # Integer bins. Can ignore h[1].
least_to_most = h[0].argsort()
common_val_1 = least_to_most[-1]
common_val_2 = least_to_most[-2]
self.low = min(common_val_1, common_val_2)
self.high = max(common_val_1, common_val_2)
if (self.high - self.low) <= 1:
raise ValueError("high %i and low %i are very close: not likely to process well")
# Compress multiple FT segments into 1 symbol, and map mark/space frequencies to 0/1.
rl, values = rle(square_up(self.max_freq_indices, self.high, self.low))
npi = np.vectorize(int)
rounded = npi(np.around(rl / self.calculated_seg_per_symbol)) # important - shortens all run lengths
self.stream = run_length_to_bitstream(rounded, values, self.high, self.low)
def print_summary(self):
"""Show reasonable data/metadata about the bitstream."""
print("\n\n# Bitstream\n")
print("Using %i segments / %i symbols = %f seg/sym" %
(len(self.max_freq_indices), self.n_symbols_actually_read, self.calculated_seg_per_symbol))
print("Inferred %i is high and %i is low (+/- 1)." % (self.high, self.low))
print(self.stream)
print("%i bits" % len(self.stream))
print()
def print_shapes(self, array_widths: collections.abc.Iterable) -> None:
"""Print bitstream reshaped in multiple ways. To look for start/stop bits.
:param array_widths: list, range, or other iterable of matrix widths you want to try
"""
# fixme - make an 8N1 and 5N1 decoder on B.stream
# fixme - make guesses about B.stream width
for n_columns in array_widths:
# 5N1 = 7
# 8N1 = 10
if n_columns == 7:
print("5N1")
if n_columns == 10:
print("8N1")
n = len(self.stream)
n_padding = n_columns - (n % n_columns)
padding = [0] * n_padding
bitstream_padded = np.append(self.stream, padding)
n_rows = len(bitstream_padded) // n_columns
print(np.reshape(bitstream_padded, (n_rows, n_columns)))
print()
def whole_pipeline(infile: str = 'sample-data.wav', outfile: str = 'plot_default.png',
start_sample: int = 0, n_symbols_to_read: int = None,
baud: int = 50, seg_per_symbol: int = 3,
pass_lo: int = 400, pass_hi: int = 2000) -> np.ndarray:
"""Chain together WAV reading, Fourier analysis, and Bitstream detection, with reasonable defaults. Useful
for main.py or for testing.
:param infile: Name of input WAV file
:param outfile: Name of output image file. Set to `None` to suppress all print & file output.
:param start_sample: WAV file position to start reading
:param n_symbols_to_read: Amount of FSK symbols to read from WAV file. `None` means read it all.
:param baud: Symbols per second, to help calculate duration of an FT window (segment)
:param seg_per_symbol: Number of FT segments to compute for each FSK symbol
:param pass_lo: Spectrum below this frequency (Hz) is ignored as neither mark nor space.
:param pass_hi: Spectrum above this frequency (Hz) is ignored as neither mark nor space.
"""
# fixme - baud, pass_lo, pass_hi should maybe be float not int.
with wave.open(infile, 'r') as wav_file:
w = WaveData(wav_file, start_sample, n_symbols_to_read, baud)
f = Fourier(w, seg_per_symbol)
f.apply_passband(pass_lo, pass_hi)
b = Bitstream(f)
# outputs
if outfile is not None:
w.print_summary(n_samples_to_plot=15)
f.print_summary()
f.save_plot(outfile)
b.print_summary()
b.print_shapes(range(5, 12))
return b.stream
| StarcoderdataPython |
1915892 | from model import Generator, Discriminator
from torch.autograd import Variable
from torchvision.utils import save_image
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import time
import datetime
from sys import exit
from vgg import VGG16FeatureExtractor
from loss import VGGLoss
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
elif classname.find('InstanceNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
def r1_reg(d_out, x_in):
# zero-centered gradient penalty for real images
batch_size = x_in.size(0)
grad_dout = torch.autograd.grad(
outputs=d_out.sum(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert(grad_dout2.size() == x_in.size())
reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
return reg
class Solver(object):
def __init__(self, data_loader, config):
"""Initialize configurations."""
self.data_loader = data_loader
self.config = config
self.build_model(config)
def build_model(self, config):
"""Create a generator and a discriminator."""
self.G = Generator(config.g_conv_dim, config.d_channel, config.channel_1x1) # 2 for mask vector.
self.D = Discriminator(config.crop_size, config.d_conv_dim, config.d_repeat_num)
self.G.apply(weights_init)
self.D.apply(weights_init)
self.G.cuda()
self.D.cuda()
self.g_optimizer = torch.optim.Adam(self.G.parameters(), config.g_lr, [config.beta1, config.beta2])
self.d_optimizer = torch.optim.Adam(self.D.parameters(), config.d_lr, [config.beta1, config.beta2])
self.G = nn.DataParallel(self.G)
self.D = nn.DataParallel(self.D)
self.VGGLoss = VGGLoss().eval()
self.VGGLoss.cuda()
self.VGGLoss = nn.DataParallel(self.VGGLoss)
def adv_loss(self, logits, target):
assert target in [1, 0]
targets = torch.full_like(logits, fill_value=target)
loss = F.binary_cross_entropy_with_logits(logits, targets)
return loss
def restore_model(self, resume_iters):
"""Restore the trained generator and discriminator."""
print('Loading the trained models from step {}...'.format(resume_iters))
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(resume_iters))
self.G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
def reset_grad(self):
"""Reset the gradient buffers."""
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def update_lr(self, g_lr, d_lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
def denorm(self, x):
"""Convert the range from [-1, 1] to [0, 1]."""
out = (x + 1) / 2
return out.clamp_(0, 1)
def gradient_penalty(self, y, x):
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones(y.size()).cuda()
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm-1)**2)
def train(self):
data_loader = self.data_loader
config = self.config
# Learning rate cache for decaying.
g_lr = config.g_lr
d_lr = config.d_lr
# Label for lsgan
real_target = torch.full((config.batch_size,), 1.).cuda()
fake_target = torch.full((config.batch_size,), 0.).cuda()
criterion = nn.MSELoss().cuda()
# Start training.
print('Start training...')
start_time = time.time()
iteration = 0
num_iters_decay = config.num_epoch_decay * len(data_loader)
for epoch in range(config.num_epoch):
for i, (I_ori, I_gt, I_r, I_s) in enumerate(data_loader):
iteration += i
I_ori = I_ori.cuda(non_blocking=True)
I_gt = I_gt.cuda(non_blocking=True)
I_r = I_r.cuda(non_blocking=True)
I_s = I_s.cuda(non_blocking=True)
# =================================================================================== #
# 2. Train the discriminator #
# =================================================================================== #
# Compute loss with real images.
I_gt.requires_grad_(requires_grad=True)
out = self.D(I_gt)
# d_loss_real = criterion(out, real_target) * 0.5
d_loss_real = self.adv_loss(out, 1)
d_loss_reg = r1_reg(out, I_gt)
# Compute loss with fake images.
I_fake = self.G(I_r, I_s)
out = self.D(I_fake.detach())
# d_loss_fake = criterion(out, fake_target) * 0.5
d_loss_fake = self.adv_loss(out, 0)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + d_loss_reg
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_reg'] = d_loss_reg.item()
# =================================================================================== #
# 3. Train the generator #
# =================================================================================== #
I_gt.requires_grad_(requires_grad=False)
# if (i+1) % config.n_critic == 0:
I_fake, g_loss_tr = self.G(I_r, I_s, IsGTrain=True)
out = self.D(I_fake)
# g_loss_fake = criterion(out, real_target)
g_loss_fake = self.adv_loss(out, 1)
g_loss_rec = torch.mean(torch.abs(I_fake - I_gt)) # Eq.(6)
g_loss_prec, g_loss_style = self.VGGLoss(I_gt, I_fake)
g_loss_prec *= config.lambda_perc
g_loss_style *= config.lambda_style
# Backward and optimize.
g_loss = g_loss_fake + config.lambda_rec * g_loss_rec + config.lambda_tr * g_loss_tr + g_loss_prec + g_loss_style
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_tr'] = g_loss_tr.item()
loss['G/loss_prec'] = g_loss_prec.item()
loss['G/loss_style'] = g_loss_style.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training information.
if (i+1) % config.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Epoch [{}/{}], Iteration [{}/{}], g_lr {:.5f}, d_lr {:.5f}".format(
et, epoch, config.num_epoch, i+1, len(data_loader),
g_lr, d_lr)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
# Decay learning rates.
if (epoch+1) > config.num_epoch_decay:
g_lr -= (config.g_lr / float(num_iters_decay))
d_lr -= (config.d_lr / float(num_iters_decay))
self.update_lr(g_lr, d_lr)
# print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
# Translate fixed images for debugging.
if (epoch+1) % config.sample_epoch == 0:
with torch.no_grad():
I_fake_ori = self.G(I_ori, I_s)
I_fake_zero = self.G(torch.zeros(I_ori.size()), I_s)
sample_path = os.path.join(config.sample_dir, '{}.jpg'.format(epoch))
I_concat = self.denorm(torch.cat([I_ori, I_gt, I_r, I_fake, I_fake_ori, I_fake_zero], dim=2))
I_concat = torch.cat([I_concat, I_s.repeat(1,3,1,1)], dim=2)
save_image(I_concat.data.cpu(), sample_path)
print('Saved real and fake images into {}...'.format(sample_path))
G_path = os.path.join(config.model_save_dir, '{}-G.ckpt'.format(epoch+1))
torch.save(self.G.state_dict(), G_path)
print('Saved model checkpoints into {}...'.format(config.model_save_dir))
def test(self):
"""Translate images using StarGAN trained on a single dataset."""
# Load the trained generator.
self.restore_model(self.test_iters)
# Set data loader.
if self.dataset == 'CelebA':
data_loader = self.celeba_loader
elif self.dataset == 'RaFD':
data_loader = self.rafd_loader
with torch.no_grad():
for i, (x_real, c_org) in enumerate(data_loader):
# Prepare input images and target domain labels.
x_real = x_real.to(self.device)
c_trg_list = self.create_labels(c_org, self.c_dim, self.dataset, self.selected_attrs)
# Translate images.
x_fake_list = [x_real]
for c_trg in c_trg_list:
x_fake_list.append(self.G(x_real, c_trg))
# Save the translated images.
x_concat = torch.cat(x_fake_list, dim=3)
result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(result_path))
| StarcoderdataPython |
1867017 | <reponame>YuTao0310/deepLearning
# coding: UTF-8
"""
@author: <NAME>
"""
from torch.autograd import Variable
from torch.autograd import grad
import torch.autograd as autograd
import torch.nn as nn
import torch
import numpy as np
def gradient_penalty(x, y, f):
# interpolation
shape = [x.size(0)] + [1] * (x.dim() - 1)
alpha = torch.rand(shape).to(x.device)
z = x + alpha * (y - x)
# gradient penalty
z = Variable(z, requires_grad=True).to(x.device)
o = f(z)
g = grad(o, z, grad_outputs=torch.ones(o.size()).to(z.device), create_graph=True)[0].view(z.size(0), -1)
gp = ((g.norm(p=2, dim=1) - 1)**2).mean()
return gp
def R1Penalty(real_img, f):
# gradient penalty
reals = Variable(real_img, requires_grad=True).to(real_img.device)
real_logit = f(reals)
apply_loss_scaling = lambda x: x * torch.exp(x * torch.Tensor([np.float32(np.log(2.0))]).to(real_img.device))
undo_loss_scaling = lambda x: x * torch.exp(-x * torch.Tensor([np.float32(np.log(2.0))]).to(real_img.device))
real_logit = apply_loss_scaling(torch.sum(real_logit))
real_grads = grad(real_logit, reals, grad_outputs=torch.ones(real_logit.size()).to(reals.device), create_graph=True)[0].view(reals.size(0), -1)
real_grads = undo_loss_scaling(real_grads)
r1_penalty = torch.sum(torch.mul(real_grads, real_grads))
return r1_penalty
def R2Penalty(fake_img, f):
# gradient penalty
fakes = Variable(fake_img, requires_grad=True).to(fake_img.device)
fake_logit = f(fakes)
apply_loss_scaling = lambda x: x * torch.exp(x * torch.Tensor([np.float32(np.log(2.0))]).to(fake_img.device))
undo_loss_scaling = lambda x: x * torch.exp(-x * torch.Tensor([np.float32(np.log(2.0))]).to(fake_img.device))
fake_logit = apply_loss_scaling(torch.sum(fake_logit))
fake_grads = grad(fake_logit, fakes, grad_outputs=torch.ones(fake_logit.size()).to(fakes.device), create_graph=True)[0].view(fakes.size(0), -1)
fake_grads = undo_loss_scaling(fake_grads)
r2_penalty = torch.sum(torch.mul(fake_grads, fake_grads))
return r2_penalty
| StarcoderdataPython |
3586640 | import pytest
from subprocess import check_output
from unittest.mock import Mock
from ..client_config import ClientConfig
def test_mariadb_server_logs_error_when_serverbin_invalid(mariadb_server):
mocklog = Mock()
server_bin = "invalid_mysqld"
cfg = ClientConfig(mocklog, name="nonexistentcfg.json") # default config
# Give the kernel a wrong mysqld binary
cfg.default_config.update({"server_bin": server_bin})
mariadb_server(mocklog, cfg)
mocklog.error.assert_any_call(f"No MariaDB Server found at {server_bin};")
def test_mariadb_server_starts_stops_mysqld_correctly(mariadb_server):
mocklog = Mock()
cfg = ClientConfig(mocklog, name="nonexistentcfg.json") # default config
server = mariadb_server(mocklog, cfg)
mocklog.info.assert_any_call("Started MariaDB server successfully")
assert server.is_up() == True
# Throws CalledProcessError when return value of pidof is non-zero
check_output(["pidof", "mysqld"])
# It's fine to call this here, mariadb_server fixture won't do any harm
# when it calls server.stop() too
server.stop()
mocklog.info.assert_any_call("Stopped MariaDB server successfully")
# Throws TimeoutExpired if the server didn't die
server.server.wait(timeout=3)
assert server.is_up() == False
| StarcoderdataPython |
3405317 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from evenz.events import observable, event
@observable
class Dog(object):
"""
This is a dog that can bark. We can also listen for a 'barked' event.
"""
__test__ = False # Don't test the class.
def __init__(self, name: str):
self.name = name
def bark(self, count: int):
"""
Call this method to make the dog bark.
:param count: How many times will the dog bark?
"""
self.barked(count)
@event
def barked(self, count: int):
"""
This event is raised when the dog barks.
:param count: how many times did the dog bark?
"""
def test_init1sub1_raise_count():
barks = 5
# Create our observable dog.
dog = Dog('Fido')
# we're going to keep count of the number of times the
bark_count = {'value': 0}
# Create a handler function for the dog's 'barked' event.
def on_bark(sender, count: int):
for i in range(0, count):
bark_count['value'] += 1
# When the dog barks, we'll respond.
dog.barked += on_bark
# Have the dog bark a few times.
dog.bark(barks)
# At this point, we're fed up and no longer listening for barks.
dog.barked -= on_bark
# Now the dog's barks should go without a response.
dog.bark(barks)
assert barks == bark_count['value']
def test_subUnsub_raise_count():
barks = 5
# Create our observable dog.
dog = Dog('Fido')
# we're going to keep count of the number of times the
bark_count = {'value': 0}
# Create a handler function for the dog's 'barked' event.
def on_bark(sender, count: int):
for i in range(0, count):
bark_count['value'] += 1
# When the dog barks, we'll respond.
dog.barked += on_bark
# Have the dog bark a few times.
dog.bark(barks)
# At this point, we're fed up and no longer listening for barks.
dog.barked -= on_bark
# Now the dog's barks should go without a response.
dog.bark(barks)
assert barks == bark_count['value']
def test_init2sub1_raise_count():
barks = 5
# Create our observable dog.
dog1 = Dog('Fido')
# Create another observable dog.
dog2 = Dog('Rover')
# we're going to keep count of the number of times the
bark_count = {'value': 0}
# Create a handler function for the dog's 'barked' event.
def on_bark(sender, count: int):
for i in range(0, count):
bark_count['value'] += 1
# When the first dog barks, we'll respond. (But not the second dog.)
dog1.barked += on_bark
# Have the dogs bark a few times.
dog1.bark(barks)
dog2.bark(barks)
# At this point, we're fed up and no longer listening for barks.
dog1.barked -= on_bark
# Now the dog's barks should go without a response.
dog1.bark(barks)
assert barks == bark_count['value']
def test_init2sub2_raise_count():
barks = 5
# Create our observable dog.
dog1 = Dog('Fido')
# Create another observable dog.
dog2 = Dog('Rover')
# we're going to keep count of the number of times the
bark_count = {'value': 0}
# Create a handler function for the dog's 'barked' event.
def on_bark(sender, count: int):
for i in range(0, count):
bark_count['value'] += 1
# When the first dog barks, we'll respond. (But not the second dog.)
dog1.barked += on_bark
dog2.barked += on_bark
# Have the dogs bark a few times.
dog1.bark(barks)
dog2.bark(barks)
# At this point, we're fed up and no longer listening for barks.
dog1.barked -= on_bark
dog2.barked -= on_bark
# Now the dog's barks should go without a response.
dog1.bark(barks)
dog2.bark(barks)
assert barks * 2 == bark_count['value']
| StarcoderdataPython |
96287 | import os.path
from bt_utils.console import Console
from bt_utils.get_content import content_dir
SHL = Console("BundestagsBot Template Command") # Use SHL.output(text) for all console based output!
settings = {
'name': 'template', # name/invoke of your command
'mod_cmd': True, # if this cmd is only useable for users with the teamrole
'channels': ['team'], # allowed channels: [dm, bot, team, all]; use !dm to blacklist dm
'log': True, # if this cmd should be logged to the console, default: True
}
# global / changeable variables
PATH = os.path.join(content_dir, "template.json")
# client, message object
# params is a list of the message content splitted at spaces
async def main(client, message, params):
pass
| StarcoderdataPython |
1717970 |
from decimal import Decimal
class Product:
def __init__(self, productId = None, code = None, name = None, price = None, in_stock = None):
self.productId = productId
self.code = code
self.name = name
self.price = price
self.in_stock = in_stock
def printData(self):
print("Product:")
print("productId: %s" % self.productId)
print("name: %s" % self.name)
print("code: %s" % self.code)
print("price: %s" % self.price)
print("in_stock: %s" % self.in_stock)
#Validation
def validate(self):
if self.productId is not None and self.productId <= 0:
raise Exception("Validation error in productId")
if self.code is None:
raise Exception("Validation error in code")
if self.name is None:
raise Exception("Validation error in name")
if self.price is None or type(self.price) is not Decimal:
raise Exception("Validation error in price")
if self.in_stock is None:
raise Exception("Validation error in in_stock")
@staticmethod
def serialize(obj):
if type(obj) != Product:
raise Exception("Error in serialization: Wrong type")
obj.validate()
return {
'id':obj.productId,
'code':obj.code,
'name':obj.name,
'price':str(obj.price),
'inStock':obj.in_stock
}
@staticmethod
def from_json(data):
productId = data.get('id')
code = data.get('code')
name = data.get('name')
price = Decimal(data.get('price'))
in_stock = int(data.get('inStock'))
product = Product(productId = productId, code = code, name = name, price = price, in_stock = in_stock)
#product.printData()
product.validate()
return product
class Cart:
def __init__(self, userId = 0):
self.userId = userId
self.products = {}
def addProduct(self, productId):
#Set of products, where products[id] = count
if self.products.get(productId) is None:
self.products[productId] = 0
self.products[productId] += 1
#Validation
def validate(self):
if self.userId is None or self.userId <= 0:
raise Exception("Validation error in userId")
if self.products is None:
raise Exception("Validation error in products")
#Serialize object to json-friendly format
@staticmethod
def serialize(obj):
if type(obj) != Cart:
raise Exception("Error in serialization: Wrong type")
obj.validate()
products = []
for pId, count in obj.products.items():
products.append({ 'productId':pId, 'inCart':count })
return {
'userId':obj.userId,
'products':products
}
#deserializer
@staticmethod
def from_json(data):
cart = Cart()
#Assemble products
if data.get('products') is not None:
for product in data.get('products'):
pId = product.get('productId')
count = product.get('inCart')
cart.products[int(pId)] = int(count)
#Set userId
if data.get('userId') is not None:
cart.userId = int(data.get('userId'))
cart.validate()
return cart
| StarcoderdataPython |
12810381 | #
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import unittest
from gafferpy import gaffer as g
class GafferFunctionsTest(unittest.TestCase):
examples = [
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Concat",
"separator" : "\u0020"
}
''',
g.Concat(
separator=" "
)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Divide"
}
''',
g.Divide()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.DivideBy",
"by" : 3
}
''',
g.DivideBy(
by=3
)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.ExtractKeys"
}
''',
g.ExtractKeys()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.DictionaryLookup",
"dictionary": {
"One": 1,
"Two": 2,
"Three": 3
}
}
''',
g.DictionaryLookup(dictionary=dict(One=1, Two=2, Three=3))
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.ExtractValue",
"key" : "blueKey"
}
''',
g.ExtractValue(
key="blueKey"
)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.ExtractValues"
}
''',
g.ExtractValues()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Identity"
}
''',
g.Identity()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.IsEmpty"
}
''',
g.IsEmpty()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Longest"
}
''',
g.Longest()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.IterableLongest"
}
''',
g.IterableLongest()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.IterableFlatten",
"operator": {
"class": "uk.gov.gchq.koryphe.impl.binaryoperator.Max"
}
}
''',
g.IterableFlatten(operator=g.bop.Max())
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.IterableConcat"
}
''',
g.IterableConcat()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Multiply"
}
''',
g.Multiply()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.MultiplyBy",
"by" : 4
}
''',
g.MultiplyBy(
by=4
)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.MultiplyLongBy",
"by" : 4
}
''',
g.MultiplyLongBy(
by=4
)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Size"
}
''',
g.Size()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.ToString"
}
''',
g.ToString()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.ToString",
"charset": "UTF-16"
}
''',
g.ToString(charset="UTF-16")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringReplace",
"searchString": "replaceme",
"replacement": "withthis"
}
''',
g.StringReplace(search_string="replaceme", replacement="withthis")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringRegexReplace",
"regex": "repl.*me",
"replacement": "withthis"
}
''',
g.StringRegexReplace(regex="repl.*me", replacement="withthis")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringSplit",
"delimiter": " "
}
''',
g.StringSplit(delimiter=" ")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringRegexSplit",
"regex": "[ \\t]*"
}
''',
g.StringRegexSplit(regex="[ \t]*")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringJoin",
"delimiter": " "
}
''',
g.StringJoin(delimiter=" ")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringTrim"
}
''',
g.StringTrim()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringAppend",
"suffix": "test"
}
''',
g.StringAppend(suffix="test")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringPrepend",
"prefix": "test"
}
''',
g.StringPrepend(prefix="test")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.StringTruncate",
"length": 20,
"ellipses": false
}
''',
g.StringTruncate(length=20, ellipses=False)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.ReverseString"
}
''',
g.ReverseString()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.DefaultIfNull",
"defaultValue": "test"
}
''',
g.DefaultIfNull(default_value="test")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.DefaultIfEmpty",
"defaultValue": "test"
}
''',
g.DefaultIfEmpty(default_value="test")
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.function.ToEntityId"
}
''',
g.ToEntityId()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.function.FromEntityId"
}
''',
g.FromEntityId()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.function.ToElementId"
}
''',
g.ToElementId()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.function.FromElementId"
}
''',
g.FromElementId()
],
[
'''
{
"class": "uk.gov.gchq.gaffer.types.function.ToTypeValue"
}
''',
g.ToTypeValue()
],
[
'''
{
"class": "uk.gov.gchq.gaffer.types.function.ToTypeSubTypeValue"
}
''',
g.ToTypeSubTypeValue()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.generator.MapGenerator",
"fields" : {
"GROUP" : "Group Label",
"VERTEX" : "Vertex Label",
"SOURCE" : "Source Label",
"count" : "Count Label"
},
"constants" : {
"A Constant" : "Some constant value"
}
}
''',
g.MapGenerator(
fields={
'VERTEX': 'Vertex Label',
'count': 'Count Label',
'GROUP': 'Group Label',
'SOURCE': 'Source Label'
},
constants={
'A Constant': 'Some constant value'
}
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.generator.CsvGenerator",
"fields" : {
"GROUP" : "Group Label",
"VERTEX" : "Vertex Label",
"SOURCE" : "Source Label",
"count" : "Count Label"
},
"constants" : {
"A Constant" : "Some constant value"
},
"quoted" : true,
"commaReplacement": "-"
}
''',
g.CsvGenerator(
fields={
'VERTEX': 'Vertex Label',
'count': 'Count Label',
'GROUP': 'Group Label',
'SOURCE': 'Source Label'
},
constants={
'A Constant': 'Some constant value'
},
quoted=True,
comma_replacement="-"
)
],
[
'''
{
"class": "uk.gov.gchq.gaffer.data.generator.JsonToElementGenerator"
}
''',
g.JsonToElementGenerator()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.types.function.FreqMapExtractor",
"key" : "key1"
}
''',
g.FreqMapExtractor(key="key1")
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.function.FunctionMap",
"function" : {
"class" : "uk.gov.gchq.koryphe.impl.function.MultiplyBy",
"by" : 10
}
}
''',
g.FunctionMap(
function=g.MultiplyBy(by=10)
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.graph.function.walk.ExtractWalkEdges"
}
''',
g.ExtractWalkEdges()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.graph.function.walk.ExtractWalkEdgesFromHop",
"hop" : 2
}
''',
g.ExtractWalkEdgesFromHop(
hop=2
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.graph.function.walk.ExtractWalkEntities"
}
''',
g.ExtractWalkEntities()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.graph.function.walk.ExtractWalkEntitiesFromHop",
"hop" : 1
}
''',
g.ExtractWalkEntitiesFromHop(
hop=1
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.graph.function.walk.ExtractWalkVertex"
}
''',
g.ExtractWalkVertex()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.Length",
"maxLength" : 100000
}
''',
g.Length(
max_length=100000
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.ExtractId",
"id" : "VERTEX"
}
''',
g.ExtractId(
id='VERTEX'
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.ExtractProperty",
"name" : "countByVehicleType"
}
''',
g.ExtractProperty(
name="countByVehicleType"
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.ExtractGroup"
}
''',
g.ExtractGroup()
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.CallMethod",
"method": "someMethod"
}
''',
g.CallMethod(method="someMethod")
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.UnwrapEntityId"
}
''',
g.UnwrapEntityId()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.SetValue",
"value": "value2"
}
''',
g.SetValue(value="value2")
],
[
'''
{
"class":"uk.gov.gchq.koryphe.impl.function.If",
"predicate":{"class":"uk.gov.gchq.koryphe.impl.predicate.IsA","type":"java.lang.Integer"},
"then":{"class":"uk.gov.gchq.koryphe.impl.function.SetValue","value":"value2"},
"otherwise":{"class":"uk.gov.gchq.koryphe.impl.function.SetValue","value":"value3"}
}
''',
g.func.If(
predicate=g.IsA(type="java.lang.Integer"),
then=g.SetValue(value="value2"),
otherwise=g.SetValue(value="value3")
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ToArray"
}
''',
g.func.ToArray()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ToList"
}
''',
g.func.ToList()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ToSet"
}
''',
g.func.ToSet()
],
[
'''
{
"class": "uk.gov.gchq.gaffer.types.function.ToFreqMap"
}
''',
g.func.ToFreqMap()
],
[
'''
{
"class": "uk.gov.gchq.gaffer.types.function.FreqMapPredicator",
"predicate": {
"class": "uk.gov.gchq.koryphe.impl.predicate.IsA",
"type": "java.lang.String"
}
}
''',
g.FreqMapPredicator(
predicate=g.IsA(
type="java.lang.String"
)
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.IterableFilter",
"predicate": {
"class": "uk.gov.gchq.koryphe.impl.predicate.IsA",
"type": "java.lang.String"
}
}
''',
g.func.IterableFilter(
predicate=g.IsA(type="java.lang.String")
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.MapFilter"
}
''',
g.func.MapFilter()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.MapFilter",
"keyPredicate": {
"class":"uk.gov.gchq.koryphe.impl.predicate.StringContains",
"value":"someValue",
"ignoreCase":false
}
}
''',
g.func.MapFilter(
key_predicate=g.pred.StringContains(
value="someValue",
ignore_case=False
)
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.FirstValid",
"predicate": {
"class":"uk.gov.gchq.koryphe.impl.predicate.StringContains",
"value":"someValue",
"ignoreCase":false
}
}
''',
g.func.FirstValid(
predicate=g.pred.StringContains(
value="someValue",
ignore_case=False
)
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.FirstValid",
"predicate": {
"class":"uk.gov.gchq.koryphe.impl.predicate.StringContains",
"value":"someValue",
"ignoreCase":false
}
}
''',
g.func.FirstValid(
predicate={
"class": "uk.gov.gchq.koryphe.impl.predicate.StringContains",
"value": "someValue",
"ignoreCase": False
}
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.MapFilter",
"keyPredicate": {
"class":"uk.gov.gchq.koryphe.impl.predicate.StringContains",
"value":"someValue",
"ignoreCase":false
},
"valuePredicate": {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 0
}
}
''',
g.func.MapFilter(
key_predicate=g.pred.StringContains(
value="someValue",
ignore_case=False
),
value_predicate=g.pred.IsMoreThan(
value=0,
or_equal_to=False
)
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.MapFilter",
"keyValuePredicate": {
"class": "uk.gov.gchq.koryphe.impl.predicate.AreEqual"
}
}
''',
g.func.MapFilter(
key_value_predicate=g.pred.AreEqual()
)
],
[
'''
{
"class" : "uk.gov.gchq.koryphe.impl.function.CreateObject",
"objectClass" : "java.lang.Long"
}
''',
g.func.CreateObject(
object_class="java.lang.Long"
)
],
[
'''
{
"class": "uk.gov.gchq.gaffer.time.function.MaskTimestampSetByTimeRange",
"startTime": {
"java.lang.Long": 15300000000000
},
"endTime": {
"java.lang.Long": 15400000000000
}
}
''',
g.func.MaskTimestampSetByTimeRange(
start_time=g.long(15300000000000),
end_time=g.long(15400000000000)
)
],
[
'''
{
"class": "uk.gov.gchq.gaffer.time.function.MaskTimestampSetByTimeRange",
"startTime": {
"java.lang.Long": 15300000000000
},
"endTime": {
"java.lang.Long": 15400000000000
},
"timeUnit": "SECOND"
}
''',
g.func.MaskTimestampSetByTimeRange(
start_time=g.long(15300000000000),
end_time=g.long(15400000000000),
time_unit=g.TimeUnit.SECOND
)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.Base64Decode"
}
''',
g.func.Base64Decode()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.CsvLinesToMaps",
"delimiter": "|",
"header": ["my", "csv", "file"],
"firstRow": 1,
"quoted": true,
"quoteChar": "'"
}
''',
g.func.CsvLinesToMaps(delimiter='|', header=["my", "csv", "file"], first_row=1, quoted=True,
quote_char='\'')
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.CsvToMaps",
"delimiter": "|",
"header": ["my", "csv", "file"],
"firstRow": 1,
"quoted": true,
"quoteChar": "'"
}
''',
g.func.CsvToMaps(delimiter='|', header=["my", "csv", "file"], first_row=1, quoted=True, quote_char='\'')
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.CurrentDate"
}
''',
g.func.CurrentDate()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.CurrentTime"
}
''',
g.func.CurrentTime()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.DeserialiseJson",
"outputClass": "uk.gov.gchq.gaffer.data.element.Edge"
}
''',
g.func.DeserialiseJson(output_class=g.Edge.CLASS)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.DeserialiseJson"
}
''',
g.func.DeserialiseJson()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.DeserialiseXml"
}
''',
g.func.DeserialiseXml()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.Gunzip"
}
''',
g.func.Gunzip()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.Increment",
"increment": {
"java.lang.Long": 1000000
}
}
''',
g.Increment(increment=g.long(1000000))
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.MapToTuple"
}
''',
g.func.MapToTuple()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ParseDate",
"timeZone": "BST",
"format": "DD-MM-YYYY"
}
''',
g.func.ParseDate(time_zone="BST", format="DD-MM-YYYY")
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ParseTime",
"timeZone": "EST",
"format": "MM-DD-YYYY HH:mm:ss.SSS",
"timeUnit": "MICROSECOND"
}
''',
g.func.ParseTime(time_zone="EST", format="MM-DD-YYYY HH:mm:ss.SSS", time_unit=g.TimeUnit.MICROSECOND)
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ToDateString",
"format": "YYYY-MMM-dd"
}
''',
g.func.ToDateString(format="YYYY-MMM-dd")
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ToBytes",
"charset": "UTF-8"
}
''',
g.func.ToBytes(charset="UTF-8")
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ApplyBiFunction",
"function": {
"class": "uk.gov.gchq.koryphe.impl.binaryoperator.Sum"
}
}
''',
g.func.ApplyBiFunction(function=g.gaffer_binaryoperators.Sum())
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ApplyBiFunction",
"function": {
"class": "uk.gov.gchq.koryphe.impl.binaryoperator.Product"
}
}
''',
g.func.ApplyBiFunction(function={
"class": "uk.gov.gchq.koryphe.impl.binaryoperator.Product"
})
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.ToTuple"
}
''',
g.func.ToTuple()
],
[
'''
{
"class": "uk.gov.gchq.gaffer.data.element.function.ToPropertiesTuple"
}
''',
g.func.ToPropertiesTuple()
],
[
'''
{
"class": "uk.gov.gchq.gaffer.data.element.function.ToElementTuple"
}
''',
g.func.ToElementTuple()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.ReduceRelatedElements",
"vertexAggregator" : {
"class" : "uk.gov.gchq.koryphe.impl.binaryoperator.Max"
},
"visibilityAggregator" : {
"class" : "uk.gov.gchq.koryphe.impl.binaryoperator.CollectionConcat"
},
"visibilityProperty" : "visibility",
"relatedVertexGroups" : [ "relatesTo" ]
}
''',
g.func.ReduceRelatedElements(
vertex_aggregator=g.gaffer_binaryoperators.Max(),
visibility_aggregator=g.gaffer_binaryoperators.CollectionConcat(),
visibility_property="visibility",
related_vertex_groups=["relatesTo"]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.function.ToTrailingWildcardPair",
"endOfRange" : "~"
}
''',
g.func.ToTrailingWildcardPair(end_of_range="~")
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.TypeValueToTuple"
}
''',
g.func.TypeValueToTuple()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.data.element.function.TypeSubTypeValueToTuple"
}
''',
g.func.TypeSubTypeValueToTuple()
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.FunctionChain",
"functions": [
{
"class": "uk.gov.gchq.koryphe.impl.function.Base64Decode"
},
{
"class": "uk.gov.gchq.koryphe.impl.function.CsvLinesToMaps",
"delimiter": "|",
"quoted": true
}
]
}
''',
g.FunctionChain(functions=[
g.Base64Decode(),
g.CsvLinesToMaps(delimiter="|", quoted=True)
])
],
[
'''
{
"class":"uk.gov.gchq.koryphe.tuple.function.TupleAdaptedFunction",
"selection":[0],
"function": {
"class": "uk.gov.gchq.gaffer.operation.function.ToEntityId"
},
"projection": [1]
}
''',
g.TupleAdaptedFunction(selection=[0], function=g.ToEntityId(), projection=[1])
],
[
'''
{
"class":"uk.gov.gchq.koryphe.impl.function.FunctionChain",
"functions": [
{
"class":"uk.gov.gchq.koryphe.tuple.function.TupleAdaptedFunction",
"selection":[0],
"function": {
"class": "uk.gov.gchq.koryphe.impl.function.ToUpperCase"
},
"projection": [1]
},
{
"class":"uk.gov.gchq.koryphe.tuple.function.TupleAdaptedFunction",
"selection": [1],
"function": {
"class": "uk.gov.gchq.koryphe.impl.function.ToSet"
},
"projection": [2]
}
]
}
''',
g.FunctionChain(functions=[
g.TupleAdaptedFunction(selection=[0], function=g.ToUpperCase(), projection=[1]),
g.TupleAdaptedFunction(selection=[1], function=g.gaffer_functions.ToSet(), projection=[2])
])
],
[
'''
{
"class": "uk.gov.gchq.koryphe.tuple.function.TupleAdaptedFunctionComposite",
"functions": [
{
"selection": [ "something" ],
"function": {
"class":"uk.gov.gchq.koryphe.impl.function.ToUpperCase"
},
"projection": [1]
}
]
}
''',
g.TupleAdaptedFunctionComposite(
functions=[g.FunctionContext(selection=["something"],
function=g.ToUpperCase(),
projection=[1]
)
]
),
],
[
'''
{
"class": "uk.gov.gchq.koryphe.impl.function.FunctionChain",
"functions": [
{
"class": "uk.gov.gchq.koryphe.tuple.function.TupleAdaptedFunctionComposite",
"functions": [
{
"selection": [0],
"function": {
"class":"uk.gov.gchq.koryphe.impl.function.ToUpperCase"
},
"projection": [1]
}
]
},
{
"class": "uk.gov.gchq.koryphe.tuple.function.TupleAdaptedFunctionComposite",
"functions": [
{
"selection": [1],
"function": {
"class":"uk.gov.gchq.koryphe.impl.function.ToSet"
},
"projection": [2]
}
]
}
]
}
''',
g.FunctionChain(functions=[
g.TupleAdaptedFunctionComposite(
functions=[g.FunctionContext(selection=[0], function=g.ToUpperCase(), projection=[1])]),
g.TupleAdaptedFunctionComposite(
functions=[g.FunctionContext(selection=[1], function=g.gaffer_functions.ToSet(), projection=[2])])
])
]
]
def test_examples(self):
for example in self.examples:
self.assertEqual(
json.loads(example[0]),
example[1].to_json(),
"json failed: \n" + example[0] + "\n"
+ g.JsonConverter.from_json(example[0]).to_code_string()
)
g.JsonConverter.from_json(example[0], validate=True)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
328510 | from argparse import Namespace
from ml4ir.base.config.keys import TFRecordTypeKey
from ml4ir.applications.classification.config.keys import LossKey, MetricKey
from ml4ir.base.config.parse_args import RelevanceArgParser
from typing import List
class ClassificationArgParser(RelevanceArgParser):
"""
States default arguments for classification model.
"""
def set_default_args(self):
super().set_default_args()
self.set_defaults(
tfrecord_type=TFRecordTypeKey.EXAMPLE,
loss_key=LossKey.CATEGORICAL_CROSS_ENTROPY,
metrics_keys=[MetricKey.CATEGORICAL_ACCURACY,
MetricKey.TOP_5_CATEGORICAL_ACCURACY],
monitor_metric=MetricKey.CATEGORICAL_ACCURACY,
monitor_mode="max",
group_metrics_min_queries=25,
output_name="category_label",
)
def get_args(args: List[str]) -> Namespace:
return ClassificationArgParser().parse_args(args)
| StarcoderdataPython |
1894836 | <reponame>rparini/cxroots<filename>cxroots/Derivative.py
from __future__ import division
import math
import numpy as np
from numpy import pi
import numdifftools.fornberg as ndf
@np.vectorize
def CxDerivative(f, z0, n=1, contour=None, absIntegrationTol=1e-10, verbose=False):
r"""
Compute the derivaive of an analytic function using Cauchy's
Integral Formula for Derivatives.
.. math::
f^{(n)}(z_0) = \frac{n!}{2\pi i} \oint_C \frac{f(z)}{(z-z_0)^{n+1}} dz
Parameters
----------
f : function
Function of a single variable f(x).
z0 : complex
Point to evaluate the derivative at.
n : int
The order of the derivative to evaluate.
contour : :class:`Contour <cxroots.Contour.Contour>`, optional
The contour, C, in the complex plane which encloses the point z0.
By default the contour is the circle |z-z_0|=1e-3.
absIntegrationTol : float, optional
The absolute tolerance required of the integration routine.
verbose : bool, optional
If True runtime information will be printed. False be default.
Returns
-------
f^{(n)}(z0) : complex
The nth derivative of f evaluated at z0
"""
if contour is None:
from .contours.Circle import Circle
C = lambda z0: Circle(z0, 1e-3)
else:
C = lambda z0: contour
integrand = lambda z: f(z)/(z-z0)**(n+1)
integral = C(z0).integrate(integrand, absTol=absIntegrationTol, verbose=verbose)
return integral * math.factorial(n)/(2j*pi)
def find_multiplicity(root, f, df=None, rootErrTol=1e-10, verbose=False):
"""
Find the multiplicity of a given root of f by computing the
derivatives of f, f^{(1)}, f^{(2)}, ... until
|f^{(n)}(root)|>rootErrTol. The multiplicity of the root is then
equal to n. The derivative is calculated with `numdifftools <http://numdifftools.readthedocs.io/en/latest/api/numdifftools.html#numdifftools.fornberg.derivative>`_
which employs a method due to Fornberg.
Parameters
----------
root : complex
A root of f, f(root)=0.
f : function
An analytic function of a single complex variable such that
f(root)=0.
df : function, optional
The first derivative of f. If not known then df=None.
contour : Contour, optional
The integration contour used to evaluate the derivatives.
rootErrTol : float, optional
It will be assumed that f(z)=0 if numerically |f(z)|<rootErrTol.
verbose : bool, optional
If True runtime information will be printed. False be default.
Returns
-------
multiplicity : int
The multiplicity of the given root.
"""
if abs(f(root)) > rootErrTol:
raise ValueError("""
The provided 'root' is not a root of the given function f.
Specifically, %f = abs(f(root)) > rootErrTol = %f
"""%(abs(f(root)), rootErrTol))
n = 1
while True:
if df is not None:
if n==1:
err = abs(df(root))
else:
# ndf.derivative returns an array [f, f', f'', ...]
err = abs(ndf.derivative(df, root, n-1)[n-1])
else:
err = abs(ndf.derivative(f, root, n)[n])
if verbose:
print('n', n, '|df^(n)|', err)
if err > rootErrTol:
break
n += 1
return n
| StarcoderdataPython |
11261037 | import os
import sys
import igraph as ig
def read_sif(path, directed=True):
'''
'''
nodes = set()
edges = set()
interactions = dict()
with open(path, 'r') as sif:
for line in sif:
items = [item.strip() for item in line.split('\t') if item]
print(items)
source, interaction, target = items[0].split(':')[1], items[1], items[2].split(':')[1]
nodes.add(source)
nodes.add(target)
edge = (source, target)
if edge not in interactions:
interactions[edge] = {interaction}
else:
interactions[edge].add(interaction)
for edge in interactions:
interactions[edge] = ','.join(list(interactions[edge]))
nodes = list(nodes)
print(nodes)
node2index = { node : nodes.index(node) for node in nodes }
print(node2index)
edges = [(node2index[edge[0]], node2index[edge[1]]) for edge in interactions]
print(edges)
interactions = [interactions[(nodes[edge[0]], nodes[edge[1]])] for edge in edges]
print(interactions)
graph = ig.Graph(directed=directed)
graph.add_vertices(len(nodes))
graph.vs['name'] = nodes
graph.add_edges(edges)
graph.es['interaction'] = interactions
return graph
def to_graphml(sif):
graph = read_sif(sif, True)
graphml = os.path.join(os.path.dirname(sif),
os.path.basename(sif).split('.')[0]+'.graphml')
graph.write_graphml(graphml)
if __name__ == '__main__':
to_graphml(sys.argv[1])
| StarcoderdataPython |
6454529 | <gh_stars>0
# Enter your code here. Read input from STDIN. Print output to STDOUT
import statistics
n=int(input())
p=list(map(float,input().split()))
q=list(map(float,input().split()))
x=statistics.mean(p)
y=statistics.mean(q)
z=0
for i in range(n):
z+=(p[i]-x)*(q[i]-y)
M=0
N=0
for i in range(n):
M+=(p[i]-x)**2
for i in range(n):
N+=(q[i]-y)**2
a=(M/n)**0.5
b=(N/n)**0.5
cov=(1/n)*z
r=cov/(a*b)
print("%.3f"%r) | StarcoderdataPython |
3309248 | """Find REAPER resource path without ``reapy`` dist API enabled."""
import os
import sys
import reapy
from .shared_library import is_windows, is_apple
if not reapy.is_inside_reaper():
# Third-party imports crash REAPER when run inside it.
import psutil
def get_candidate_directories(detect_portable_install=True):
if detect_portable_install:
yield get_portable_resource_directory()
if is_apple():
yield os.path.expanduser('~/Library/Application Support/REAPER')
elif is_windows():
yield os.path.expandvars(r'$APPDATA\REAPER')
else:
yield os.path.expanduser('~/.config/REAPER')
def get_portable_resource_directory():
process_path = get_reaper_process_path()
if is_apple():
return '/'.join(process_path.split('/')[:-4])
return os.path.dirname(process_path)
def get_reaper_process_path():
"""Return path to currently running REAPER process.
Returns
-------
str
Path to executable file.
Raises
------
RuntimeError
When zero or more than one REAPER instances are currently
running.
"""
processes = [
p for p in psutil.process_iter(['name', 'exe'])
if os.path.splitext(p.info['name'])[0].lower() == 'reaper'
]
if not processes:
raise RuntimeError('No REAPER instance is currently running.')
elif len(processes) > 1:
raise RuntimeError(
'More than one REAPER instance is currently running.'
)
return processes[0].info['exe']
def get_resource_path(detect_portable_install=True):
for dir in get_candidate_directories(detect_portable_install):
if os.path.exists(os.path.join(dir, 'reaper.ini')):
return dir
| StarcoderdataPython |
1887383 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
==================
prospect.utilities
==================
Utility functions for prospect.
"""
import os, glob
from pkg_resources import resource_string, resource_listdir
import numpy as np
import astropy.io.fits
from astropy.table import Table, vstack
import scipy.ndimage.filters
_desiutil_imported = True
try:
from desiutil.log import get_logger
except ImportError:
_desiutil_imported = False
_desispec_imported = True
try:
import desispec.spectra
import desispec.frame
from desispec.io.util import healpix_subdirectory
except ImportError:
_desispec_imported = False
_desitarget_imported = True
try:
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask, scnd_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_desi_mask
from desitarget.sv1.sv1_targetmask import bgs_mask as sv1_bgs_mask
from desitarget.sv1.sv1_targetmask import mws_mask as sv1_mws_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_desi_mask
from desitarget.sv2.sv2_targetmask import bgs_mask as sv2_bgs_mask
from desitarget.sv2.sv2_targetmask import mws_mask as sv2_mws_mask
from desitarget.sv2.sv2_targetmask import scnd_mask as sv2_scnd_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_desi_mask
from desitarget.sv3.sv3_targetmask import bgs_mask as sv3_bgs_mask
from desitarget.sv3.sv3_targetmask import mws_mask as sv3_mws_mask
from desitarget.sv3.sv3_targetmask import scnd_mask as sv3_scnd_mask
supported_desitarget_masks = {
'DESI_TARGET': desi_mask,
'BGS_TARGET': bgs_mask,
'MWS_TARGET': mws_mask,
'SECONDARY_TARGET': scnd_mask,
'CMX_TARGET': cmx_mask,
'SV1_DESI_TARGET': sv1_desi_mask,
'SV1_BGS_TARGET': sv1_bgs_mask,
'SV1_MWS_TARGET': sv1_mws_mask,
'SV1_SCND_TARGET': sv1_scnd_mask,
'SV2_DESI_TARGET': sv2_desi_mask,
'SV2_BGS_TARGET': sv2_bgs_mask,
'SV2_MWS_TARGET': sv2_mws_mask,
'SV2_SCND_TARGET': sv2_scnd_mask,
'SV3_DESI_TARGET': sv3_desi_mask,
'SV3_BGS_TARGET': sv3_bgs_mask,
'SV3_MWS_TARGET': sv3_mws_mask,
'SV3_SCND_TARGET': sv3_scnd_mask,
}
except ImportError:
_desitarget_imported = False
supported_desitarget_masks = dict()
_redrock_imported = True
try:
import redrock.results
except ImportError:
_redrock_imported = False
vi_flags = [
# Definition of VI flags
# shortlabels for "issue" flags must be a unique single-letter identifier
{"label" : "4", "type" : "quality", "description" : "Confident classification: two or more secure features."},
{"label" : "3", "type" : "quality", "description" : "Probable classification: at least one secure spectral feature + continuum or many weak spectral features."},
{"label" : "2", "type" : "quality", "description" : "Possible classification: one strong spectral feature but unsure what it is."},
{"label" : "1", "type" : "quality", "description" : "Unlikely classification: clear signal but features are unidentified."},
{"label" : "0", "type" : "quality", "description" : "Nothing there, no signal."},
{"label" : "Bad redshift fit", "shortlabel" : "R", "type" : "issue", "description" : "Mis-estimation of redshift by the pipeline fitter"},
{"label" : "Bad spectype fit", "shortlabel" : "C", "type" : "issue", "description" : "Mis-identification of spectral type from the best-fit pipeline solution; e.g., star vs QSO..."},
{"label" : "Bad spectrum", "shortlabel" : "S", "type" : "issue", "description" : "Bad spectrum; e.g. strong cosmic/skyline subtraction residuals."}
]
vi_file_fields = [
# Contents of VI files: [
# field name (in VI file header),
# associated variable in viewer_cds.cds_metadata,
# dtype in VI file
# default value ]
# Ordered list
["TARGETID", "TARGETID", "i8", -1],
["EXPID", "EXPID", "i4", -1],
["NIGHT", "NIGHT", "i4", -1],
["TILEID", "TILEID", "i4", -1],
["Spec_version", "spec_version", "U16", "-1"],
["Redrock_version", "redrock_version", "U16", "-1"],
["Template_version", "template_version", "U16", "-1"],
["Redrock_spectype", "SPECTYPE", "U10", ""],
["Redrock_z", "Z", "U6", "-1"],
["Redrock_deltachi2", "DELTACHI2", "U10", "-1"],
["VI_scanner", "VI_scanner", "U10", " "],
["VI_quality", "VI_quality_flag", "U2", "-1"],
["VI_issue", "VI_issue_flag", "U3", ""],
["VI_z", "VI_z", "U6", ""],
["VI_spectype", "VI_spectype", "U10", ""],
["VI_comment", "VI_comment", "U100", ""]
]
vi_spectypes =[
# List of spectral types to fill in VI categories
# in principle, it should match somehow redrock spectypes...
"STAR",
"GALAXY",
"QSO"
]
vi_std_comments = [
# Standardized VI comments
"Broad absorption line quasar (BAL)",
"Damped Lyman-alpha system (DLA)",
"Two objects in spectrum",
"Blazar"
]
_resource_cache = {'templates': None, 'js': None}
def get_resources(filetype):
"""Find all HTML template or JavaScript files in the package.
Caches the results for quick access.
Parameters
----------
filetype : {'templates', 'js'}
The type of file resource needed.
Returns
-------
:class:`dict`
A dictionary mapping filename to the contents of the file.
Raises
------
ValueError
If `filetype` is unknown.
"""
global _resource_cache
if filetype not in _resource_cache:
raise ValueError("Unknown filetype '{0}' for get_resources()!".format(filetype))
if _resource_cache[filetype] is None:
_resource_cache[filetype] = dict()
for f in resource_listdir('prospect', filetype):
if not f.startswith("."):
_resource_cache[filetype][f] = resource_string('prospect', filetype + '/' + f).decode('utf-8')
return _resource_cache[filetype]
def match_catalog_to_spectra(zcat_in, spectra, return_index=False):
""" Creates a subcatalog, matching a set of DESI spectra
Parameters
----------
zcat_in : :class:`~astropy.table.Table`, with TARGETID keys
spectra : :class:`~desispec.spectra.Spectra`
return_index : :class:`bool`, optional
If ``True``, returns the list of indices in zcat_in which match spectra
Returns
-------
:class:`~astropy.table.Table`
A subtable of zcat_in, with rows matching input spectra's TARGETIDs
If return_index is ``True``, returns (subtable, list of indices)
Raises
------
RuntimeError
If a unique row in zcat_in is not found matching each of spectra's TARGETIDs
"""
if zcat_in is None : return None
zcat_out = Table(dtype=zcat_in.dtype)
index_list = list()
for i_spec in range(spectra.num_spectra()) :
ww, = np.where((zcat_in['TARGETID'] == spectra.fibermap['TARGETID'][i_spec]))
if len(ww)<1 :
raise RuntimeError("No entry in zcat_in for TARGETID "+str(spectra.fibermap['TARGETID'][i_spec]))
elif len(ww)>1 :
raise RuntimeError("Several entries in zcat_in for TARGETID "+str(spectra.fibermap['TARGETID'][i_spec]))
zcat_out.add_row(zcat_in[ww[0]])
index_list.append(ww[0])
if return_index:
return (zcat_out, index_list)
else:
return zcat_out
def match_rrdetails_to_spectra(redrockfile, spectra, Nfit=None):
""" Creates a Table from a detailed Redrock output fit, matching a list of DESI spectra.
Parameters
----------
redrockfile : :class:`str`, filename for the detailed Redrock output file (.h5 file)
spectra : :class:`~desispec.spectra.Spectra`
Nfit : :class:`int`, optional
Number of best-fits to store in output Table. By default, store all fits available in the detailed Redrock file
Returns
-------
:class:`~astropy.table.Table`
Table with the following columns: TARGETID, CHI2, DELTACHI2, COEFF, Z, ZERR, ZWARN, SPECTYPE, SUBTYPE.
The rows are matched to spectra's TARGETIDs
Raises
------
RuntimeError
If a set of Nfit rows in redrockfile is not found matching each of spectra's TARGETIDs
"""
dummy, rr_table = redrock.results.read_zscan(redrockfile)
rr_targets = rr_table['targetid']
if Nfit is None:
ww, = np.where( (rr_targets == rr_targets[0]) )
Nfit = len(ww)
matched_redrock_cat = Table(
dtype=[('TARGETID', '<i8'), ('CHI2', '<f8', (Nfit,)),
('DELTACHI2', '<f8', (Nfit,)), ('COEFF', '<f8', (Nfit,10,)),
('Z', '<f8', (Nfit,)), ('ZERR', '<f8', (Nfit,)),
('ZWARN', '<i8', (Nfit,)), ('SPECTYPE', '<U6', (Nfit,)), ('SUBTYPE', '<U2', (Nfit,))])
for i_spec in range(spectra.num_spectra()):
ww, = np.where((rr_targets == spectra.fibermap['TARGETID'][i_spec]))
if len(ww)<Nfit :
raise RuntimeError("Redrock table cannot match spectra with "+str(Nfit)+" best fits")
ind = np.argsort(rr_table[ww]['chi2'])[0:Nfit] # Sort fit results by chi2 (independently of spectype)
sub_table = rr_table[ww][ind]
the_entry = [ spectra.fibermap['TARGETID'][i_spec] ]
for redrock_key in ['chi2', 'deltachi2', 'coeff', 'z', 'zerr', 'zwarn', 'spectype', 'subtype']:
the_entry.append(sub_table[redrock_key])
matched_redrock_cat.add_row(the_entry)
return matched_redrock_cat
def create_zcat_from_redrock_cat(redrock_cat, fit_num=0):
""" Extract a catalog with unique redshift fits from a redrock catalog containing several fit results per TARGETID
Parameters
----------
redrock_cat : :class:`~astropy.table.Table`
Catalog with rows as defined in `match_rrdetails_to_spectra()`
fit_num : :class:`int`, optional
The (fit_num)th fit in redrock_cat is extracted (default: 0 ie. redrock's best fit)
Returns
-------
:class:`~astropy.table.Table`
Table with the following columns: TARGETID, CHI2, COEFF, Z, ZERR, ZWARN, SPECTYPE, SUBTYPE, DELTACHI2.
"""
rr_cat_num_best_fits = redrock_cat['Z'].shape[1]
if (fit_num >= rr_cat_num_best_fits):
raise ValueError("fit_num too large wrt redrock_cat")
zcat_dtype=[('TARGETID', '<i8'), ('CHI2', '<f8'), ('COEFF', '<f8', (10,)),
('Z', '<f8'), ('ZERR', '<f8'), ('ZWARN', '<i8'),
('SPECTYPE', '<U6'), ('SUBTYPE', '<U2'), ('DELTACHI2', '<f8')]
zcat_out = Table( data=np.zeros(len(redrock_cat), dtype=zcat_dtype) )
zcat_out['TARGETID'] = redrock_cat['TARGETID']
for key in ['CHI2', 'DELTACHI2', 'COEFF', 'SPECTYPE', 'SUBTYPE', 'Z', 'ZERR', 'ZWARN']:
zcat_out[key] = redrock_cat[key][:,fit_num]
return zcat_out
def get_subset_label(subset, dirtree_type):
if dirtree_type=='cumulative':
label = 'thru'+subset
elif dirtree_type=='perexp':
label = 'exp'+subset
elif dirtree_type=='pernight':
label = subset
elif dirtree_type=='exposures':
label = subset
elif dirtree_type=='healpix':
label = subset
else:
raise ValueError("Unrecognized value for dirtree_type.")
return label
def create_subsetdb(datadir, dirtree_type=None, spectra_type='coadd', tiles=None, nights=None, expids=None,
survey_program=None, petals=None, pixels=None, with_zcat=True):
"""Create a 'mini-db' of DESI spectra files, in a given directory tree.
Supports tile-based and exposure-based directory trees for daily, andes, ... to everest.
This routine does not open any file, it just checks they exist.
Parameters
----------
datadir : :class:`string`
No description provided.
dirtree_type : :class:`string`
The directory tree and file names must match the types listed in the notes below.
spectra_type : :class:`string`, optional
[c/s]frames are only supported when dirtree_type='exposures'
petals : :class:`list`, optional
Filter a set of petal numbers.
tiles : :class:`list`, optional
Filter a list of tiles.
nights : :class:`list`, optional
Filter a list of nights (only if dirtree_type='pernight' or 'exposures').
expids : :class:`list`, optional
Filter a list of exposures (only if dirtree_type='perexp' or 'exposures').
survey_program : :class:`list`, optional
Filter a [survey, program], only if dirtree_type='healpix'.
pixels : :class:`list`, optional
Filter a list of Healpix pixels (only if dirtree_type='healpix').
with_zcat : :class:`bool`, optional
If True, filter spectra for which a 'redrock' (or 'zbest') fits file exists at the same location.
Returns
-------
:class:`dict`
Content of the 'mini-db':
- if dirtree_type='healpix': [ {'dataset':(survey, program), 'subset':'pixel', 'petals':[None]}]
- if dirtree_type='exposures': [ {'dataset':night, 'subset':expid, 'petals':[list of petals]}]
- if dirtree_type='perexp': [ {'dataset':tile, 'subset':expid, 'petals':[list of petals]}]
- else: [ {'dataset':tile, 'subset':night, 'petals':[list of petals]}]
Notes
-----
* `dirtree_type` must be one of the following:
- ``dirtree_type='healpix'``: ``{datadir}/{survey}/{program}/{pixel//100}/{pixel}/{spectra_type}-{survey}-{program}-{pixel}.fits``
- ``dirtree_type='pernight'``: ``{datadir}/{tileid}/{night}/{spectra_type}-{petal}-{tile}-{night}.fits``
- ``dirtree_type='perexp'``: ``{datadir}/{tileid}/{expid}/{spectra_type}-{petal}-{tile}-exp{expid}.fits``
- ``dirtree_type='cumulative'``: ``{datadir}/{tileid}/{night}/{spectra_type}-{petal}-{tile}-thru{night}.fits``
- ``dirtree_type='exposures'``: ``{datadir}/{night}/{expid}/{spectra_type}-{band}{petal}-{expid}.fits``
- Note that 'perexp' and 'exposures' are different.
- To use blanc/cascades 'all' (resp 'deep') coadds, use dirtree_type='pernight' and nights=['all'] (resp ['deep']).
"""
# TODO support (everest) healpix-based directory trees
if ( (nights is not None and dirtree_type!='pernight' and dirtree_type!='exposures')
or (expids is not None and dirtree_type!='perexp' and dirtree_type!='exposures') ):
raise ValueError('Nights/expids option is incompatible with dirtree_type.')
if (pixels is not None or survey_program is not None) and dirtree_type!='healpix':
raise ValueError('Pixels/survey_program option is incompatible with dirtree_type.')
if dirtree_type == 'exposures':
if spectra_type not in ['frame', 'cframe', 'sframe']:
raise ValueError('Unsupported spectra_type: '+spectra_type)
if with_zcat:
raise ValueError('Cannot filter redrock/zbest files when dirtree_type=exposures')
else:
if spectra_type not in ['coadd', 'spectra']:
raise ValueError('Unsupported spectra_type: '+spectra_type)
if petals is None:
petals = [str(i) for i in range(10)]
#- 'datasets': first level in the explored directory tree
if dirtree_type == 'healpix': #- in that case it's two levels survey/program
if survey_program is not None:
if len(survey_program)!=2:
raise ValueError('Argument survey_program: wrong length.')
datasets = [ (survey_program[0], survey_program[1]) ]
if not os.path.isdir(os.path.join(datadir, survey_program[0], survey_program[1])):
raise RuntimeError('survey_program not found in directory tree.')
else:
datasets = []
for survey in os.listdir(datadir):
for program in os.listdir(os.path.join(datadir, survey)):
datasets.append((survey, program))
else:
if dirtree_type == 'exposures':
datasets = nights
else:
datasets = tiles
if datasets is None:
datasets = os.listdir(datadir)
else :
if not all(x in os.listdir(datadir) for x in datasets):
raise RuntimeError('Some tile[s]/nights[s] were not found in directory tree.')
subsetdb = list()
for dataset in datasets:
#- 'subsets': second level in the explored directory tree
if dirtree_type == 'healpix': #- in that case it's two levels pixelgroup/pixel
all_subsets = []
for pixelgroup in os.listdir(os.path.join(datadir, dataset[0], dataset[1])):
all_subsets.extend(os.listdir(os.path.join(datadir, dataset[0], dataset[1], pixelgroup)))
else:
all_subsets = os.listdir(os.path.join(datadir, dataset))
if (nights is not None) and (dirtree_type!='exposures'):
all_subsets = [ x for x in all_subsets if x in nights ]
elif expids is not None:
all_subsets = [ x for x in all_subsets if x in expids ]
elif pixels is not None:
all_subsets = [ x for x in all_subsets if x in pixels ]
else:
#- No subset selection, but we discard subdirectories with non-decimal names
all_subsets = [ x for x in all_subsets if x.isdecimal() ]
for subset in all_subsets:
if dirtree_type == 'healpix':
nside = 64 # dummy, currently
subset_dir = os.path.join(datadir, dataset[0], dataset[1], healpix_subdirectory(nside, int(subset)))
file_label = '-'.join([dataset[0], dataset[1], subset])
spectra_fname = os.path.join(subset_dir, spectra_type+'-'+file_label+'.fits')
redrock_fname = os.path.join(subset_dir, 'redrock-'+file_label+'.fits')
zbest_fname = os.path.join(subset_dir, 'zbest-'+file_label+'.fits') # pre-everest nomenclature
if os.path.isfile(spectra_fname) and ( (not with_zcat) or os.path.isfile(zbest_fname) or os.path.isfile(redrock_fname)):
subsetdb.append( {'dataset':dataset, 'subset':subset, 'petals':[None]} )
else:
existing_petals = []
for petal in petals:
subset_label = get_subset_label(subset, dirtree_type)
if dirtree_type == 'exposures':
spectra_fnames = [ spectra_type+'-'+band+petal+'-'+subset_label+'.fits' for band in ['b', 'r', 'z'] ]
if all([os.path.isfile(os.path.join(datadir, dataset, subset, x)) for x in spectra_fnames]):
existing_petals.append(petal)
else:
file_label = '-'.join([petal, dataset, subset_label])
spectra_fname = os.path.join(datadir, dataset, subset, spectra_type+'-'+file_label+'.fits')
redrock_fname = os.path.join(datadir, dataset, subset, 'redrock-'+file_label+'.fits')
zbest_fname = os.path.join(datadir, dataset, subset, 'zbest-'+file_label+'.fits') # pre-everest nomenclature
if os.path.isfile(spectra_fname) and ( (not with_zcat) or os.path.isfile(zbest_fname) or os.path.isfile(redrock_fname)):
existing_petals.append(petal)
if len(existing_petals)>0:
subsetdb.append( {'dataset':dataset, 'subset':subset, 'petals':existing_petals} )
return subsetdb
def create_targetdb(datadir, subsetdb, dirtree_type=None):
"""Create a "mini-db" of DESI targetids.
To do so, `redrock` (or `zbest`) fits files are read (faster than reading spectra).
Parameters
----------
datadir : :class:`string`
No description provided.
subsetdb: :class:`list`
List of spectra subsets, as produced by `create_subsetdb`.
Format: [ {'dataset':dataset, 'subset':subset, 'petal':petal} ]
dirtree_type : :class:`string`
See documentation in `create_subsetdb`.
dirtree_type='exposures' is not supported here (no redrock file available in that case).
Tile-based directory trees for daily, andes, ... to everest are supported.
Healpix-based directory tree supported for everest.
Returns
-------
:class:`dict`
Content of the "mini-db": { (dataset, subset, petal): [list of TARGETIDs] }
where dataset is a tile, night, or a (survey, program) tuple;
subset is a night, expid or pixel; and petal is None when dirtree_type=healpix.
"""
if dirtree_type=='exposures':
raise ValueError("dirtree_type='exposures' is not supported in `create_targetdb`")
targetdb = dict()
for the_entry in subsetdb:
subset_label = get_subset_label(the_entry['subset'], dirtree_type)
for petal in the_entry['petals']:
if dirtree_type == 'healpix':
nside = 64 # dummy, currently
subset_dir = os.path.join(datadir, the_entry['dataset'][0], the_entry['dataset'][1],
healpix_subdirectory(nside, int(the_entry['subset'])))
file_label = '-'.join([the_entry['dataset'][0], the_entry['dataset'][1], subset_label])
else:
subset_dir = os.path.join(datadir, the_entry['dataset'], the_entry['subset'])
file_label = '-'.join([petal, the_entry['dataset'], subset_label])
fname = os.path.join(subset_dir, 'redrock-'+file_label+'.fits')
hduname = 'REDSHIFTS'
if not os.path.isfile(fname): # pre-everest Redrock file nomenclature
fname = os.path.join(subset_dir, 'zbest-'+file_label+'.fits')
hduname = 'ZBEST'
targetids = np.unique(Table.read(fname, hduname)['TARGETID'])
targetdb[ (the_entry['dataset'], the_entry['subset'], petal) ] = np.array(targetids, dtype='int64')
return targetdb
def load_spectra_zcat_from_targets(targetids, datadir, targetdb, dirtree_type=None, with_redrock_details=False, with_redrock_version=True):
"""Get spectra, redshift catalog and optional detailed Redrock catalog matched to a set of DESI TARGETIDs.
This works using a "mini-db" of targetids, as returned by `create_targetdb()`.
The outputs of this utility can be used directly by `viewer.plotspectra()`, to inspect a given list of targetids.
Output spectra/catalog(s) are sorted according to the input target list.
When several spectra are available for a given TARGETID, they are all included in the output, in random order.
Parameters
----------
targetids : :class:`list` or :class:`numpy.ndarray`
List of TARGETIDs, must be int64.
datadir : :class:`string`
No description provided.
dirtree_type : :class:`string`
The directory tree and file names must match the types listed in the notes below.
targetdb : :class:`dict`
Content of the "mini-db": { (dataset, subset, petal): [list of TARGETIDs] }, see `create_targetdb()`.
with_redrock_details : :class:`bool`, optional
If `True`, detailed Redrock output files (.h5 files) are also read
with_redrock_version : :class:`bool`, optional
If `True`, a column 'RRVER' is appended to the output redshift catalog, as given by HDU0 in `redrock`/`zbest` files.
This is used by `viewer.plotspectra()` to track Redrock version in visual inspection files.
Returns
-------
:func:`tuple`
If with_redrock_details is `False` (default), returns (spectra, zcat), where spectra is `~desispec.spectra.Spectra`
and zcat is `~astropy.table.Table`.
If with_redrock_details is `True`, returns (spectra, zcat, redrockcat) where redrockcat is `~astropy.table.Table`.
Notes
-----
* `dirtree_type` must be one of the following, for "coadd", "redrock"/"zbest" (.fits), and "rrdetails"/"redrock" (.h5) files:
- ``dirtree_type='healpix'``: ``{datadir}/{survey}/{program}/{pixel//100}/{pixel}/redrock-{survey}-{program}-{pixel}.fits``
- ``dirtree_type='pernight'``: ``{datadir}/{tileid}/{night}/redrock-{petal}-{tile}-{night}.fits``
- ``dirtree_type='perexp'``: ``{datadir}/{tileid}/{expid}/redrock-{petal}-{tile}-exp{expid}.fits``
- ``dirtree_type='cumulative'``: ``{datadir}/{tileid}/{night}/redrock-{petal}-{tile}-thru{night}.fits``
- To use blanc/cascades 'all' (resp 'deep') coadds, use dirtree_type='pernight' and nights=['all'] (resp 'deep')
"""
targetids = np.asarray(targetids)
if targetids.dtype not in ['int64', 'i8', '>i8']:
raise TypeError('TARGETIDs should be int64')
spectra = None
ztables, rrtables = [], []
for dataset, subset, petal in targetdb.keys():
targets_subset = set(targetdb[dataset, subset, petal])
targets_subset = targets_subset.intersection(set(targetids))
# Load spectra for that tile-subset-petal only if one or more target(s) are in the list
if len(targets_subset)>0 :
subset_label = get_subset_label(subset, dirtree_type)
if dirtree_type == 'healpix':
nside = 64 # dummy, currently
the_path = os.path.join(datadir, dataset[0], dataset[1], healpix_subdirectory(nside, int(subset)))
file_label = '-'.join([dataset[0], dataset[1], subset_label])
else:
the_path = os.path.join(datadir, dataset, subset)
file_label = '-'.join([petal, dataset, subset_label])
the_spec = desispec.io.read_spectra(os.path.join(the_path, "coadd-"+file_label+".fits"))
the_spec = the_spec.select(targets=sorted(targets_subset))
if os.path.isfile(os.path.join(the_path, "redrock-"+file_label+".fits")):
redrock_is_pre_everest = False
the_zcat = Table.read(os.path.join(the_path, "redrock-"+file_label+".fits"), 'REDSHIFTS')
else: # pre-everest Redrock file nomenclature
redrock_is_pre_everest = True
the_zcat = Table.read(os.path.join(the_path, "zbest-"+file_label+".fits"), 'ZBEST')
if with_redrock_version:
if redrock_is_pre_everest:
hdulist = astropy.io.fits.open(os.path.join(the_path, "zbest-"+file_label+".fits"))
else:
hdulist = astropy.io.fits.open(os.path.join(the_path, "redrock-"+file_label+".fits"))
the_zcat['RRVER'] = hdulist[hdulist.index_of('PRIMARY')].header['RRVER']
the_zcat = match_catalog_to_spectra(the_zcat, the_spec)
ztables.append(the_zcat)
if with_redrock_details:
if redrock_is_pre_everest:
rrfile = os.path.join(the_path, "redrock-"+file_label+".h5")
else:
rrfile = os.path.join(the_path, "rrdetails-"+file_label+".h5")
the_rrcat = match_rrdetails_to_spectra(rrfile, the_spec, Nfit=None)
rrtables.append(the_rrcat)
if spectra is None:
spectra = the_spec
else:
#- Still use update() instead of stack(), to handle case when fibermaps differ in different files.
spectra.update(the_spec)
#- Sort according to input target list. Check if all targets were found in spectra
tids_spectra = spectra.fibermap['TARGETID']
sorted_indices = []
for target in targetids:
w, = np.where(tids_spectra == target)
sorted_indices.extend(w)
if len(w)==0:
print("Warning! TARGETID not found:", target)
assert(len(tids_spectra)==len(sorted_indices)) # check, should always be true
spectra = spectra[ sorted_indices ]
zcat = vstack(ztables)
zcat = zcat[ sorted_indices ]
if with_redrock_details:
rrcat = vstack(rrtables)
rrcat = rrcat[ sorted_indices ]
return (spectra, zcat, rrcat)
else:
return (spectra, zcat)
def frames2spectra(frames, nspec=None, startspec=None, with_scores=False, with_resolution_data=False):
"""Convert list of frames into DESI Spectra object
Parameters
----------
frames : :class:`list`
A list of :class:`~desispec.frame.Frame`.
nspec : :class:`int`, optional
No description provided.
startspec : :class:`int`, optional
If nspec is set, only spectra in range [startspec:nspec+startspec] are kept
with_scores : :class:`bool`, optional
If `True`, include merged scores from input frames
with_resolution_data : :class:`bool`, optional
If `True`, include frames.resolution_data
Returns
-------
:class:`~desispec.spectra.Spectra`
No description provided.
"""
bands = list()
wave = dict()
flux = dict()
ivar = dict()
mask = dict()
res = dict()
for fr in frames:
fibermap = fr.fibermap
band = fr.meta['CAMERA'][0]
bands.append(band)
wave[band] = fr.wave
flux[band] = fr.flux
ivar[band] = fr.ivar
mask[band] = fr.mask
res[band] = fr.resolution_data
if nspec is not None :
if startspec is None : startspec = 0
flux[band] = flux[band][startspec:nspec+startspec]
ivar[band] = ivar[band][startspec:nspec+startspec]
mask[band] = mask[band][startspec:nspec+startspec]
res[band] = res[band][startspec:nspec+startspec,:,:]
fibermap = fr.fibermap[startspec:nspec+startspec]
merged_scores = None
if with_scores :
scores_columns = frames[0].scores.columns
for i in range(1,len(frames)) :
scores_columns += frames[i].scores.columns
merged_scores = astropy.io.fits.FITS_rec.from_columns(scores_columns)
if not with_resolution_data : res = None
spectra = desispec.spectra.Spectra(
bands, wave, flux, ivar, mask, fibermap=fibermap, meta=fr.meta, scores=merged_scores, resolution_data=res
)
return spectra
def metadata_selection(spectra, mask=None, mask_type=None, gmag_range=None, rmag_range=None, chi2_range=None, snr_range=None, clean_fiberstatus=False, with_dirty_mask_merge=False, zcat=None, log=None):
"""Simple selection of DESI spectra based on various metadata.
Filtering based on the logical AND of requested selection criteria.
Note: use X_range=[min, None] to filter X > min, X_range=[None, max] to filter X < max
Parameters
----------
spectra : :class:`~desispec.spectra.Spectra`
No description provided.
mask : :class:`string`, optional
DESI targeting mask to select, eg 'ELG'. Requires to set mask_type.
mask_type : :class:`string`, optional
DESI targeting mask category, currently supported: 'DESI_TARGET', 'BGS_TARGET',
'MWS_TARGET', 'SECONDARY_TARGET', 'CMX_TARGET', 'SV[1/2/3]_DESI_TARGET', 'SV[1/2/3]_BGS_TARGET',
'SV[1/2/3]_MWS_TARGET', 'SV[1/2/3]_SCND_TARGET'.
with_dirty_mask_merge : :class:`bool`, optional
Option for specific targeting mask selection in early CMX data, see code...
gmag_range : :class:`list`
g magnitude range to select, gmag_range = [gmag_min, gmag_max]
rmag_range : :class:`list`
r magnitude range to select, rmag_range = [rmag_min, rmag_max]
snr_range : :class:`list`
SNR range to select, snr_range = [snr_min, snr_max].
This filter applies on all B, R and Z bands, from scores.MEDIAN_COADD_SNR_band, or
scores.MEDIAN_CALIB_SNR_band if the former is not found.
chi2_range : :class:`list`
chi2 range to select, chi2_range = [chi2_min, chi2_max]. Requires to set zcat.
clean_fiberstatus : :class:`bool`
if True, remove spectra with FIBERSTATUS!=0 or COADD_FIBERSTATUS!=0
zcat : :class:`~astropy.table.Table`
catalog with chi2 information, must be matched to spectra (needed for chi2_range filter).
log : optional log.
Returns
-------
:class:`~desispec.spectra.Spectra`
No description provided.
"""
keep = np.ones(len(spectra.fibermap), bool)
#- SNR selection
if (snr_range is not None) and (snr_range!=[None, None]):
#- If a bound is set to None, replace by +-np.inf
if snr_range[0]==None:
snr_range[0] = -np.inf
if snr_range[1]==None:
snr_range[1] = np.inf
if len(snr_range)!=2 or snr_range[1]<snr_range[0]:
raise ValueError("Wrong input snr_range")
if spectra.scores is None:
raise RuntimeError('No scores in spectra: cannot select on SNR')
snr_var = 'MEDIAN_COADD_SNR'
if snr_var+'_B' not in spectra.scores.keys():
snr_var = 'MEDIAN_CALIB_SNR'
for band in ['B','R','Z'] :
keep_snr = ( (spectra.scores[snr_var+'_'+band]>snr_range[0]) &
(spectra.scores[snr_var+'_'+band]<snr_range[1]) )
if np.all(~keep_snr):
if log is not None :
log.info(" * No spectra with MEDIAN_CALIB_SNR_"+band+" in requested range")
return None
else :
keep = ( keep & keep_snr )
#- Target mask selection
if mask is not None :
if not _desitarget_imported:
raise RuntimeError('desitarget not imported: cannot select on targeting mask')
if mask_type not in spectra.fibermap.keys():
mask_candidates = [x for x in spectra.fibermap.keys() if '_TARGET' in x]
raise ValueError(mask_type+" is not in spectra.fibermap.\n Hints of available masks: "+(' '.join(mask_candidates)))
mask_used = supported_desitarget_masks[mask_type]
if mask not in mask_used.names():
raise ValueError("requested mask "+mask+" does not match mask_type "+mask_type)
keep_mask = (spectra.fibermap[mask_type] & mask_used[mask]) > 0 # boolean array
if mask_type == 'CMX_TARGET' and with_dirty_mask_merge:
#- Self-explanatory... only for fast VI of minisv
mask2 = None
if mask in ['SV0_QSO', 'SV0_ELG', 'SV0_LRG']: mask2 = mask.replace('SV0','MINI_SV')
if mask == 'SV0_BGS': mask2 = 'MINI_SV_BGS_BRIGHT'
if mask in ['SV0_STD_FAINT', 'SV0_STD_BRIGHT']: mask2 = mask.replace('SV0_','')
if mask2 is not None:
keep_mask = ( (spectra.fibermap[mask_type] & mask_used[mask]) |
(spectra.fibermap[mask_type] & mask_used[mask2]) ) > 0
if np.all(~keep_mask):
if log is not None : log.info(" * No spectra with mask "+mask)
return None
else :
keep = ( keep & keep_mask )
#- Photometry selection
if (gmag_range is not None) and (gmag_range!=[None, None]):
if gmag_range[0]==None:
gmag_range[0] = -np.inf
if gmag_range[1]==None:
gmag_range[1] = np.inf
if len(gmag_range)!=2 or gmag_range[1]<gmag_range[0]:
raise ValueError("Wrong input gmag_range")
gmag = np.zeros(spectra.num_spectra())
w, = np.where( (spectra.fibermap['FLUX_G']>0) )
gmag[w] = -2.5*np.log10(spectra.fibermap['FLUX_G'][w])+22.5
if 'MW_TRANSMISSION_G' in spectra.fibermap.keys():
w, = np.where( (spectra.fibermap['FLUX_G']>0) & (spectra.fibermap['MW_TRANSMISSION_G']>0) )
gmag[w] = -2.5*np.log10(spectra.fibermap['FLUX_G'][w]/spectra.fibermap['MW_TRANSMISSION_G'][w])+22.5
keep_gmag = ( (gmag>gmag_range[0]) & (gmag<gmag_range[1]) )
if np.all(~keep_gmag):
if log is not None : log.info(" * No spectra with g_mag in requested range")
return None
else :
keep = ( keep & keep_gmag )
if (rmag_range is not None) and (rmag_range!=[None, None]):
if rmag_range[0]==None:
rmag_range[0] = -np.inf
if rmag_range[1]==None:
rmag_range[1] = np.inf
if len(rmag_range)!=2 or rmag_range[1]<rmag_range[0]:
raise ValueError("Wrong input rmag_range")
rmag = np.zeros(spectra.num_spectra())
w, = np.where( (spectra.fibermap['FLUX_R']>0) )
rmag[w] = -2.5*np.log10(spectra.fibermap['FLUX_R'][w])+22.5
if 'MW_TRANSMISSION_R' in spectra.fibermap.keys():
w, = np.where( (spectra.fibermap['FLUX_R']>0) & (spectra.fibermap['MW_TRANSMISSION_R']>0) )
rmag[w] = -2.5*np.log10(spectra.fibermap['FLUX_R'][w]/spectra.fibermap['MW_TRANSMISSION_R'][w])+22.5
keep_rmag = ( (rmag>rmag_range[0]) & (rmag<rmag_range[1]) )
if np.all(~keep_rmag):
if log is not None : log.info(" * No spectra with r_mag in requested range")
return None
else :
keep = ( keep & keep_rmag )
#- Chi2 selection
if (chi2_range is not None) and (chi2_range!=[None, None]):
if chi2_range[0]==None:
chi2_range[0] = -np.inf
if chi2_range[1]==None:
chi2_range[1] = np.inf
if len(chi2_range)!=2 or chi2_range[1]<chi2_range[0]:
raise ValueError("Wrong input chi2_range")
if np.any(zcat['TARGETID'] != spectra.fibermap['TARGETID']) :
raise RuntimeError('zcat and spectra do not match (different targetids)')
keep_chi2 = ( (zcat['DELTACHI2']>chi2_range[0]) & (zcat['DELTACHI2']<chi2_range[1]) )
if np.all(~keep_chi2):
if log is not None : log.info(" * No target in this pixel with DeltaChi2 in requested range")
return None
else :
keep = ( keep & keep_chi2 )
#- Fiberstatus selection
if clean_fiberstatus:
if 'FIBERSTATUS' in spectra.fibermap.keys():
keep = ( keep & (spectra.fibermap['FIBERSTATUS']==0) )
elif 'COADD_FIBERSTATUS' in spectra.fibermap.keys():
keep = ( keep & (spectra.fibermap['COADD_FIBERSTATUS']==0) )
return spectra[keep]
def _coadd(wave, flux, ivar, rdat):
'''Return weighted coadd of spectra
Parameters
----------
wave : array-like
1D[nwave] array of wavelengths.
flux : array-like
2D[nspec, nwave] array of flux densities.
ivar : array-like
2D[nspec, nwave] array of inverse variances of `flux`.
rdat : array-like
3D[nspec, ndiag, nwave] sparse diagonals of resolution matrix.
Returns
-------
:class:`tuple`
The coadded spectrum (wave, outflux, outivar, outrdat).
'''
nspec, nwave = flux.shape
unweightedflux = np.zeros(nwave, dtype=flux.dtype)
weightedflux = np.zeros(nwave, dtype=flux.dtype)
weights = np.zeros(nwave, dtype=flux.dtype)
outrdat = np.zeros(rdat[0].shape, dtype=rdat.dtype)
for i in range(nspec):
unweightedflux += flux[i]
weightedflux += flux[i] * ivar[i]
weights += ivar[i]
outrdat += rdat[i] * ivar[i]
isbad = (weights == 0)
outflux = weightedflux / (weights + isbad)
outflux[isbad] = unweightedflux[isbad] / nspec
outrdat /= (weights + isbad)
outivar = weights
return wave, outflux, outivar, outrdat
def coadd_targets(spectra, targetids=None):
'''
Coadds individual exposures of the same targets; returns new Spectra object
Parameters
----------
spectra : :class:`desispec.spectra.Spectra`
targetids : array-like, optional
Subset of target IDs to keep.
Returns
-------
:class:`desispec.spectra.Spectra`
Where individual spectra of each target have been combined into a
single spectrumper camera.
Notes
-----
Coadds per camera but not across cameras.
'''
if targetids is None:
targetids = spectra.target_ids()
#- Create output arrays to fill
ntargets = spectra.num_targets()
wave = dict()
flux = dict()
ivar = dict()
rdat = dict()
if spectra.mask is None:
mask = None
else:
mask = dict()
for channel in spectra.bands:
wave[channel] = spectra.wave[channel].copy()
nwave = len(wave[channel])
flux[channel] = np.zeros((ntargets, nwave))
ivar[channel] = np.zeros((ntargets, nwave))
ndiag = spectra.resolution_data[channel].shape[1]
rdat[channel] = np.zeros((ntargets, ndiag, nwave))
if mask is not None:
mask[channel] = np.zeros((ntargets, nwave), dtype=spectra.mask[channel].dtype)
#- Loop over targets, coadding all spectra for each target
fibermap = Table(dtype=spectra.fibermap.dtype)
for i, targetid in enumerate(targetids):
ii = np.where(spectra.fibermap['TARGETID'] == targetid)[0]
fibermap.add_row(spectra.fibermap[ii[0]])
for channel in spectra.bands:
if len(ii) > 1:
outwave, outflux, outivar, outrdat = _coadd(
spectra.wave[channel],
spectra.flux[channel][ii],
spectra.ivar[channel][ii],
spectra.resolution_data[channel][ii]
)
if mask is not None:
outmask = spectra.mask[channel][ii[0]]
for j in range(1, len(ii)):
outmask |= spectra.mask[channel][ii[j]]
else:
outwave, outflux, outivar, outrdat = (
spectra.wave[channel],
spectra.flux[channel][ii[0]],
spectra.ivar[channel][ii[0]],
spectra.resolution_data[channel][ii[0]]
)
if mask is not None:
outmask = spectra.mask[channel][ii[0]]
flux[channel][i] = outflux
ivar[channel][i] = outivar
rdat[channel][i] = outrdat
if mask is not None:
mask[channel][i] = outmask
return desispec.spectra.Spectra(spectra.bands, wave, flux, ivar,
mask=mask, resolution_data=rdat, fibermap=fibermap,
meta=spectra.meta)
| StarcoderdataPython |
4847031 | <reponame>Rohan-Raj-1729/myPackage
#Imports
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
#from my_package.model import ObjectDetectionModel
#from my_package.data import DataSet
#from my_package.analysis import show_boxes
from my_package.data.transforms import FlipImage, RescaleImage, BlurImage, CropImage, RotateImage
"""
def experiment(annotation_file, detector, transforms, outputs):
'''
Function to perform the desired experiments
Arguments:
annotation_file: Path to annotation file
detector: The object detector
transforms: List of transformation classes
outputs: path of the output folder to store the images
'''
#Create the instance of the dataset.
data = DataSet(annotation_file, transforms)
#Iterate over all data items.
ds_list = []
for i in range(len(data)):
ds_list.append(data[i]['gt_bboxes'])
#Get the predictions from the detector.
allPredictions = [detector(i['image']) for i in ds_list]
#Draw the boxes on the image and save them.
#Do the required analysis experiments.
all_images = []
my_image = Image.open('./data/imgs/2.jpg')
my_image.save("original_img.jpg", outputs)
all_images.append(my_image)
A = np.asarray(my_image)
flipped = flip.FlipImage('horizontal')
image = Image.fromarray(flipped(A), 'RGB')
image.save("flipped_img.jpg", outputs)
all_images.append(image)
blurred = blur.BlurImage(1.7)
image = Image.fromarray(blurred(A), 'RGB')
image.save("blurred_img.jpg", outputs)
all_images.append(image)
shapeA = A.shape()
resized2x = rescale.RescaleImage((shapeA[0]*2, shapeA[1]*2))
image = Image.fromarray(resized2x(A), 'RGB')
image.save("2x_img.jpg", outputs)
all_images.append(image)
resizedHalf = rescale.RescaleImage((shapeA[0]//2, shapeA[1]//2))
image = Image.fromarray(resizedHalf(A), 'RGB')
image.save("halfResized_img.jpg", outputs)
all_images.append(image)
rotated90CW = RotateImage(-90)
image = Image.fromarray(rotated90CW(A), 'RGB')
image.save("rotated90_img.jpg", outputs)
all_images.append(image)
rotated45ACW = RotateImage(45)
image = Image.fromarray(rotated45ACW(A), 'RGB')
image.save("rotated45_img.jpg", outputs)
all_images.append(image)
plt.figure(1)
for i in range(len(all_images)):
plt.subplot(7, i, 1)
plt.imshow(all_images[i])
"""
def main():
#detector = ObjectDetectionModel()
#experiment('./data/annotations.jsonl', detector, [FlipImage(), BlurImage()], "./Outputs") # Sample arguments to call experiment()
img = Image.open('./data/imgs/2.jpg')
A = np.asarray(img)
image = Image.fromarray(A, 'RGB')
image.show()
#cropped = CropImage([1000, 100])
#image = Image.fromarray(cropped(A), 'RGB')
#image.show()
R = RotateImage(90)
image = Image.fromarray(R(A), 'RGB')
image.show()
# F=FlipImage('horizontal')
# C=CropImage((300,300,))
#B = BlurImage(1)
#Re = RescaleImage(100)
#transforms = [R, B, Re]
#ds = DataSet("./data/annotations.jsonl", transforms)
#print(ds[2])
if __name__ == '__main__':
main() | StarcoderdataPython |
86619 | pytest_plugins = ("pytester",)
def test_help_message(testdir):
result = testdir.runpytest("--help")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
[
"stress:",
"*--delay=DELAY*The amount of time to wait between each test loop.",
"*--hours=HOURS*The number of hours to loop the tests for.",
"*--minutes=MINUTES*The number of minutes to loop the tests for.",
"*--seconds=SECONDS*The number of seconds to loop the tests for.",
]
)
def test_ini_file(testdir):
testdir.makeini(
"""
[pytest]
addopts = --delay=0 --hours=0 --minutes=0 --seconds=0
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def addopts(request):
return request.config.getini('addopts')
def test_ini(addopts):
assert addopts[0] == "--delay=0"
assert addopts[1] == "--hours=0"
assert addopts[2] == "--minutes=0"
assert addopts[3] == "--seconds=0"
"""
)
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
["*::test_ini PASSED*", ]
)
# Make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
| StarcoderdataPython |
6505795 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import *
from .forms import *
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import CustomUserCreationForm
# Create your views here.
@login_required(login_url='login')
def index(request):
user = request.user
tasks = Task.objects.filter(user=user)
form = TaskForm()
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
task = form.save(commit=False)
task.user = user
task.save()
return redirect('/')
context = {
'tasks':tasks,
'form': form,
}
return render(request, 'tasks/list.html', context)
@login_required(login_url='login')
def updateTask(request, pk):
task=Task.objects.get(id=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
form=TaskForm(request.POST, instance=task)
if form.is_valid():
form.save()
return redirect('/')
context={'form': form}
return render(request, 'tasks/update_task.html', context)
@login_required(login_url='login')
def deleteTask(request, pk):
item = Task.objects.get(id=pk)
if request.method == 'POST':
item.delete()
return redirect('/')
context = {'item': item}
return render(request, 'tasks/delete.html', context)
def loginUser(request):
page = 'login'
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('list')
context = {
'page': page,
}
return render(request, 'tasks/login_register.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
def registerUser(request):
page = 'register'
form = CustomUserCreationForm()
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
user = authenticate(request, username=user.username, password=request.POST['<PASSWORD>'])
if user is not None:
login(request, user)
return redirect('list')
context = {
'form': form,
'page': page,
}
return render(request, 'tasks/login_register.html', context )
| StarcoderdataPython |
1827637 | <filename>buddymove_holidayiq.py
from sqlalchemy import create_engine
engine = create_engine('sqlite://', echo=False)
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv')
print(df.shape)
df.to_sql('buddymove_holidayiq', con=engine)
engine.execute("SELECT * FROM buddymove_holidayiq").fetchall() | StarcoderdataPython |
6543303 | """
RPC Provider using Requests Library
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015-2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
import requests
from vmware.vapi.lib.log import get_vapi_logger
from vmware.vapi.protocol.client.http_lib import HTTPMethod, HTTPResponse
from vmware.vapi.protocol.client.rpc.provider import HTTPProvider
logger = get_vapi_logger(__name__)
class RequestsRpcProvider(HTTPProvider):
"""
vAPI RPC provider using requests library
"""
_http_method_map = {
HTTPMethod.DELETE: 'delete',
HTTPMethod.GET: 'get',
HTTPMethod.HEAD: 'head',
HTTPMethod.OPTIONS: 'options',
HTTPMethod.PATCH: 'patch',
HTTPMethod.POST: 'post',
HTTPMethod.PUT: 'put'
}
def __init__(self, session, base_url, default_timeout, pool_size):
"""
Initialize RequestsRpcProvider
:type session: :class:`requests.Session`
:param session: Session object
:type msg_protocol: :class:`str`
:param msg_protocol: Message protocol to be used for the connection.
Valid values are 'json'.
:type base_url: :class:`str`
:param base_url: HTTP(S) URL to be used
:type default_timeout: :class:`int`
:param default_timeout: Request default timeout
:type pool_size: :class:`int`
:param pool_size: Connection pool size to be used
"""
HTTPProvider.__init__(self)
self._session = session
self._base_url = base_url[:-1] if base_url.endswith('/') else base_url
self._pool_size = pool_size
self._default_timeout = default_timeout
if pool_size:
http_adapter = requests.adapters.HTTPAdapter(
pool_connections=pool_size,
pool_maxsize=pool_size)
https_adapter = requests.adapters.HTTPAdapter(
pool_connections=pool_size,
pool_maxsize=pool_size)
self._session.mount('http://', http_adapter)
self._session.mount('https://', https_adapter)
def __del__(self):
""" Requests rpc provider on delete """
self.disconnect()
def connect(self):
"""
connect
:rtype: :class:`vmware.vapi.protocol.client.rpc.provider.RpcProvider`
:return: http rpc provider
"""
return self
def disconnect(self):
""" disconnect """
pass
def do_request(self, http_request):
"""
Send an HTTP request
:type http_request: :class:`vmware.vapi.protocol.client.http_lib.HTTPRequest`
:param http_request: The http request to be sent
:rtype: :class:`vmware.vapi.protocol.client.http_lib.HTTPResponse`
:return: The http response received
"""
method_name = http_request.method
if http_request.url_path:
url = ''.join([self._base_url, http_request.url_path])
else:
url = self._base_url
timeout = http_request.timeout if http_request.timeout else self._default_timeout
output = self._session.request(
method=self._http_method_map[method_name], url=url,
data=http_request.body, headers=http_request.headers,
cookies=http_request.cookies, timeout=timeout)
output.encoding = 'utf-8'
return HTTPResponse(output.status_code, output.headers, output.text,
output)
| StarcoderdataPython |
3461355 | <gh_stars>0
def main():
"""
testing to see how the assignment's datafile is organized:
zip,eiaid,utility_name,state,service_type,ownership,comm_rate,ind_rate,res_rate
zip = [0]
name = [2]
state = [3]
comm_rate = [6]
"""
cumulative_rate_sum = 0
num_of_rates = 0
file_to_use = '/home/cs241/assign02/rates.csv'
with open(file_to_use) as open_file:
first_line = open_file.readline().strip()
next(open_file)
for line in open_file:
line = open_file.readline().strip()
data = line.split(',')
rate = float(data[6])
# zipcode = int(data[0])
ult_name = data[2]
num_of_rates += 1
cumulative_rate_sum += rate
# print(zipcode)
print(ult_name)
print(cumulative_rate_sum/num_of_rates)
print(first_line)
if __name__ == '__main__':
main()
| StarcoderdataPython |
157546 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
class Stage2(torch.nn.Module):
def __init__(self):
super(Stage2, self).__init__()
self.layer1 = torch.nn.Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer2 = torch.nn.ReLU(inplace=True)
self.layer3 = torch.nn.Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer4 = torch.nn.ReLU(inplace=True)
self.layer5 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer6 = torch.nn.ReLU(inplace=True)
def forward(self, input0):
out0 = input0.clone()
out1 = self.layer1(out0)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
out6 = self.layer6(out5)
return out6
| StarcoderdataPython |
1809967 | #! /usr/bin/python3
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
""".. _conf_00_lib_capture:
Configuration API for capturing audio and video
-----------------------------------------------
These capture objects are meant to be fed to the capture interface
declaration of a target in the server, for example, in any server
configuration file you could have added a target and then a capture
interface can be added with:
.. code-block:: python
ttbl.test_target.get('TARGETNAME').interface_add(
"capture",
ttbl.capture.interface(
screen = "hdmi0_screenshot",
screen_stream = "hdmi0_vstream",
audio_stream = "front_astream",
front_astream = capture_front_astream_vtop_0c76_161e,
hdmi0_screenshot = capture_screenshot_ffmpeg_v4l,
hdmi0_vstream = capture_vstream_ffmpeg_v4l,
hdmi0_astream = capture_astream_ffmpeg_v4l,
)
)
This assumes we have connected and configured:
- an HDMI grabber to the target's HDMI0 output (see :data:`setup
instructions <capture_screenshot_ffmpeg_v4l>`)
- an audio grabber to the front audio output (see :data:`setup
instructions <capture_front_astream_vtop_0c76_161e>`).
to create multiple capture capabilityies (video and sound streams, and
screenshots) with specific names for the ouputs and aliases)
Note the audio capturers are many times HW specific because they
expose different audio controls that have to be set or queried.
"""
import signal
import ttbl.capture
#: A capturer to take screenshots from a v4l device using ffmpeg
#:
#: Note the fields are target's tags and others specified in
#: :class:`ttbl.capture.generic_snapshot` and
#: :class:`ttbl.capture.generic_stream`.
#:
#: To use:
#:
#: - define a target
#:
#: - physically connect the capture interface to it and to the
#: server
#:
#: - Create a *udev* configuration so the capture device exposes
#: itself as */dev/video-TARGETNAME-INDEX*.
#:
#: This requires creating a *udev* configuration so that the v4l
#: device gets recognized and an alias created, which can be
#: accomplished by dropping a udev rule in */etc/udev/rules.d* such
#: as::
#:
#: SUBSYSTEM == "video4linux", ACTION == "add", \
#: KERNEL=="video*", \
#: ENV{ID_SERIAL_SHORT} == "SOMESERIALNUMBER", \
#: SYMLINK += "video-nuc-01A-$attr{index}"
#:
#: note some USB devices don't offer a serial number, then you
#: can use a device path, such as::
#:
#: ENV{ID_PATH} == "pci-0000:00:14.0-usb-0:2.1:1.0", \
#:
#: this is shall be a last resort, as then moving cables to
#: different USB ports will change the paths and you will have to
#: reconfigure.
#:
#: See :ref:`methods to find device information <find_usb_info>`
#:
#: - add the configuration snippet::
#:
#: ttbl.test_target.get(TARGETNAME).interface_add(
#: "capture",
#: ttbl.capture.interface(
#: screen = "hdmi0_screenshot",
#: screen_stream = "hdmi0_vstream",
#: hdmi0_screenshot = capture_screenshot_ffmpeg_v4l,
#: hdmi0_vstream = capture_vstream_ffmpeg_v4l,
#: ))
#:
#: Note in this case we have used an
#:
#: This has tested with with:
#:
#: - https://www.agptek.com/AGPTEK-USB-3-0-HDMI-HD-Video-Capture-1089-212-1.html
#:
#: Which shows in USB as::
#:
#: 3-2.2.4 1bcf:2c99 ef 3.10 5000MBit/s 512mA 4IFs (VXIS Inc ezcap U3 capture)
#: 3-2.2.4:1.2 (IF) 01:01:00 0EPs (Audio:Control Device) snd-usb-audio sound/card5
#: 3-2.2.4:1.0 (IF) 0e:01:00 1EP (Video:Video Control) uvcvideo video4linux/video5 video4linux/video4 input/input15
#: 3-2.2.4:1.3 (IF) 01:02:00 0EPs (Audio:Streaming) snd-usb-audio
#: 3-2.2.4:1.1 (IF) 0e:02:00 1EP (Video:Video Streaming) uvcvideo
#:
#: Note this also can be used to capture video of the HDMI stream
#: using capture_vstream_ffmpeg_v4l and audio played over HDMI via
#: an exposed ALSA interface (see capture_astream_ffmpeg_v4l below).
capture_screenshot_ffmpeg_v4l = ttbl.capture.generic_snapshot(
"screenshot:/dev/video-%(id)s-0",
"ffmpeg -i /dev/video-%(id)s-0"
# -ss .50 to let the capturer warm up; 0 will come a
# black frame always
" -ss 0.5 -frames 1 -c:v png -f image2pipe "
"-y %(output_file_name)s",
mimetype = "image/png", extension = ".png"
)
#: A capturer to take screenshots from VNC
#:
#: Note the fields are target's tags and others specified in
#: :class:`ttbl.capture.generic_snapshot` and
#: :class:`ttbl.capture.generic_stream`.
#:
#: Deprecated in favour of :func:`mk_capture_screenshot_vnc`
capture_screenshot_vnc = ttbl.capture.generic_snapshot(
# dont set the port for the name, otherwise the UPID keeps
# changing
"VNC %(id)s@%(vnc-host)s",
# need to make sure vnc-host/port are defined in the target's tags
# needs the .png, otherwise it balks at guessing extensions
# don't do -q, otherwise when it fails, it fails silently; for
# QEMU, it is *localhost*.
"gvnccapture %(vnc-host)s:%(vnc-port)s %(output_file_name)s",
mimetype = "image/png",
extension = ".png"
)
def mk_capture_screenshot_vnc(name):
"""
Create a VNC screenshot capturer that captures off a VNC source
declared in inventory entry *vnc.NAME*
Note the fields are target's tags and others specified in
:class:`ttbl.capture.generic_snapshot` and
:class:`ttbl.capture.generic_stream`.
to use, add in a :ref:`server configuration file
<ttbd_configuration>` to any target that offers a VNC source:
>>> target.interface_add("capture", ttbl.capture.interface(
>>> vnc0_screenshot = mk_capture_screenshot_vnc("vnc0"),
>>> screen = "vnc0_screenshot",
>>> ))
"""
assert isinstance(name, str)
# note the %(FIELD)s will be mapped to entries in the target's
# inventory when the capture is going to be done, so if name is
# ABC, it will capture off vnc.ABC,host
return ttbl.capture.generic_snapshot(
# dont set the port for the name, otherwise the UPID keeps
# changing
f"VNC %(id)s@%(vnc.{name}.host)s",
# need to make sure vnc-host/port are defined in the target's tags
# needs the .png, otherwise it balks at guessing extensions
# don't do -q, otherwise when it fails, it fails silently; for
# QEMU, it is *localhost*.
f"gvnccapture %(vnc.{name}.host)s:%(vnc.{name}.port)s %(output_file_name)s",
mimetype = "image/png",
extension = ".png"
)
#: Capture a screenshot off VNC port declared in inventory *vnc.vnc0*
capture_screenshot_vnc0 = mk_capture_screenshot_vnc("vnc0")
#: Capture video off a v4l device using ffmpeg
#:
#: See capture_screenshot_ffmpeg_v4l for setup instructions, as they
#: are common.
capture_vstream_ffmpeg_v4l = ttbl.capture.generic_stream(
"video:/dev/video-%(id)s-0",
"ffmpeg -y -nostdin -i /dev/video-%(id)s-0"
" -flush_packets" # disable some buffering
" -f avi -qscale:v 10 -y %(stream_filename)s",
mimetype = "video/avi", extension = ".avi",
wait_to_kill = 4, use_signal = signal.SIGINT # flushes ffmpeg
)
#: Capture audio off an Alsa device using ffmpeg
#:
#: See capture_screenshot_ffmpeg_v4l for setup instructions, as they
#: are similar.
#:
#: Note the udev setup instructions for Alsa devices are slightly
#: different; instead of *SYMLINKS* we have to set *ATTR{id}*::
#:
#: SUBSYSTEM == "sound", ACTION == "add", \
#: ENV{ID_PATH} == "pci-0000:00:14.0-usb-0:2.1:1.2", \
#: ATTR{id} = "TARGETNAME"
#:
#: Once this configuration is completed, udev is reloaded (*sudo
#: udevadm control --reload-rules*) and the
#: device is triggered (with *udevadm trigger /dev/snd/controlCX* or
#: the machine restarted), */proc/asound* should contain a symlink to
#: the actual card::
#:
#: $ ls /proc/asound/ -l
#: total 0
#: dr-xr-xr-x. 3 root root 0 Jun 21 21:52 card0
#: dr-xr-xr-x. 7 root root 0 Jun 21 21:52 card4
#: ..
#: lrwxrwxrwx. 1 root root 5 Jun 21 21:52 TARGETNAME -> card4
#: ...
#:
#: Device information for Alsa devices (Card 0, Card 1, etc...) can be
#: found with::
#:
#: $ udevadm info /dev/snd/controlC0
#: P: /devices/pci0000:00/0000:00:1f.3/sound/card0/controlC0
#: N: snd/controlC0
#: S: snd/by-path/pci-0000:00:1f.3
#: E: DEVLINKS=/dev/snd/by-path/pci-0000:00:1f.3
#: E: DEVNAME=/dev/snd/controlC0
#: E: DEVPATH=/devices/pci0000:00/0000:00:1f.3/sound/card0/controlC0
#: E: ID_PATH=pci-0000:00:1f.3
#: E: ID_PATH_TAG=pci-0000_00_1f_3
#: E: MAJOR=116
#: E: MINOR=11
#: E: SUBSYSTEM=sound
#: E: TAGS=:uaccess:
#: E: USEC_INITIALIZED=30391111
#:
#: As indicated in capture_screenshot_ffmpeg_v4l, using
#: *ENV{ID_SERIAL_SHORT}* is preferred if available.
capture_astream_ffmpeg_v4l = ttbl.capture.generic_stream(
"audio:%(id)s",
"ffmpeg -f alsa -i sysdefault:%(id)s"
" -f avi -qscale:v 10 -y %(output_file_name)s",
mimetype = "audio/wav"
)
#:
#: Capture HDMI Audio from an AGPTEK USB 3.0 HDMI HD Video Capture
#:
#: - https://www.agptek.com/AGPTEK-USB-3-0-HDMI-HD-Video-Capture-1089-212-1.html
#:
#: We can't use a generic ALSA capturer because there seem to be
#: glitches in the device
#:
capture_agptek_hdmi_astream = ttbl.capture.generic_stream(
"hdmi0-audio:%(id)s",
"ffmpeg -f alsa -i sysdefault:%(id)s-hdmi"
" -f avi -qscale:v 10 -y %(output_file_name)s",
mimetype = "audio/wav",
pre_commands = [
# somehow the adapter doesn't work right unless "reset" it
# with the USB kernel interface.
#
# This gets the path in the
# /sys sysfs filesystem of /dev/video-%(id)s-0 (wih 'udevadm
# info') that yiedls something like:
#
# $ udevadm info /dev/video-%(id)s-0 -q path
# /devices/pci0000:00/0000:00:14.0/usb1/1-4/1-4.2/1-4.2:1.0/video4linux/video0
#
# three levels up (removing 1-4.2:1.0/video4linux/video0) gets
# us to the top level USB device information node:
#
# /devices/pci0000:00/0000:00:14.0/usb1/1-4/1-4.2
#
# so in /sys/devices/pci0000:00/0000:00:14.0/usb1/1-4/1-4.2
# there is a file called 'authorized' that will force the USB
# device to be disconnected or connected to the
# system. Writing 0 we soft-disconnect it, writing 1 we ask
# for it to be connected.
"echo 0 > /sys/$(udevadm info video-%(id)s-0 -q path)/../../../authorized",
"sleep 0.5s",
"echo 1 > /sys/$(udevadm info video-%(id)s-0 -q path)/../../../authorized",
"sleep 1s",
# vtop HW has "Digital In" for an input name
# FIXME: we have issues with the spaces, somewhere it is being
# split?
"amixer -c %(id)s-hdmi sset 'Digital In' 75%%"
]
)
#: Capture audio with the USB capturer VTOP/JMTEK 0c76:161e
#:
#: https://www.amazon.com/Digital-Audio-Capture-Windows-10-11/dp/B019T9KS04
#:
#: This is for capturing audio on the audio grabber connected to the
#: main builtin sound output of the target (usually identified as
#: *front* by the Linux driver subsystem), which UDEV has configured
#: to be called TARGETNAME-front::
#:
#: SUBSYSTEM == "sound", ACTION == "add", \
#: ENV{ID_PATH} == "pci-0000:00:14.0-usb-0:2.3.1:1.0", \
#: ATTR{id} = "TARGETNAME-front"
#:
capture_front_astream_vtop_0c76_161e = ttbl.capture.generic_stream(
"audio:%(id)s-front",
"ffmpeg -f alsa -i sysdefault:%(id)s-front"
" -f wav -qscale:v 10 -y %(output_file_name)s",
mimetype = "audio/wav",
# vtop HW has Mic for an input name
pre_commands = [ "amixer -c %(id)s-front sset Mic 75%%" ]
)
| StarcoderdataPython |
11305250 | from petroleum import PetroleumObject
from petroleum.json_encoder import ToJSONMixin
class ConditionalTask(PetroleumObject, ToJSONMixin):
def __init__(self, task, condition, default=False):
self.task = task
self.condition = condition
self.default = default
| StarcoderdataPython |
9641374 | from __future__ import print_function
from invoke import task
from pprint import pformat
@task
def myfunc(ctx, *args, **kwargs):
"""
Note there is a bug where we couldn't do
def mine(ctx, mypositionalarg, *args, **kwargs):
pass
But something is better than nothing :) Search "TODO 531"
to find the comment describing our options.
Keyword optional args work but they can be filled by positional args
(because they're not KEYWORD_ONLY!) so we don't recommend their use.
"""
print("args: {}".format(args))
print("kwargs: {}".format(pformat(kwargs)))
| StarcoderdataPython |
1830748 | <gh_stars>0
import enum
class Orientation(enum.Enum):
orthogonal = "orthogonal"
isometric = "isometric"
staggered = "staggered"
hexagonal = "hexagonal"
class RenderOrder(enum.Enum):
right_down = "right-down"
right_up = "right-up"
left_down = "left-down"
left_up = "left-up"
class StaggerAxis(enum.Enum):
x = "x"
y = "y"
class StaggerIndex(enum.Enum):
odd = "odd"
even = "even"
class Compression(enum.Enum):
zlib = "zlib"
gzip = "gzip"
zstd = "zstd"
none = ""
class DrawOrder(enum.Enum):
topdown = "topdown"
index = "index"
class Encoding(enum.Enum):
csv = "csv"
base64 = "base64"
class Type(enum.Enum):
tilelayer = "tilelayer"
objectgroup = "objectgroup"
imagelayer = "imagelayer"
group = "group"
class Alignment(enum.Enum):
unspecified = "unspecified"
topleft = "topleft"
top = "top"
topright = "topright"
left = "left"
center = "center"
right = "right"
bottomleft = "bottomleft"
bottom = "bottom"
bottomright = "bottomright"
| StarcoderdataPython |
4935099 | # Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import torch
import logging
from pytorch_benchmark import benchmark
from opendr.perception.object_tracking_2d import ObjectTracking2DDeepSortLearner
from opendr.perception.object_tracking_2d.datasets.mot_dataset import MotDataset
from projects.perception.object_tracking_2d.demos.fair_mot_deep_sort.data_generators import (
disk_image_with_detections_generator,
)
logger = logging.getLogger("benchmark")
logging.basicConfig()
logger.setLevel("DEBUG")
def benchmark_fair_mot():
root_dir = "./projects/perception/object_tracking_2d/benchmark"
temp_dir = root_dir + "/tmp"
models_dir = root_dir + "/models"
num_runs = 100
models = [
"deep_sort",
]
if not os.path.exists(temp_dir + "/nano_MOT20"):
MotDataset.download_nano_mot20(
os.path.join(temp_dir, "mot_dataset"), True
).path
batch_size = 2
data_generator = disk_image_with_detections_generator(
temp_dir,
{
"nano_mot20": os.path.join(
".",
"src",
"opendr",
"perception",
"object_tracking_2d",
"datasets",
"splits",
"nano_mot20.train",
)
},
cycle=True
)
sample = next(data_generator)
samples = [next(data_generator) for _ in range(batch_size)]
if os.path.exists(root_dir + "/results_deep_sort.txt"):
os.remove(root_dir + "/results_deep_sort.txt")
for model_name in models:
print(
f"==== Benchmarking ObjectTracking2DDeepSortLearner ({model_name}) ===="
)
learner = ObjectTracking2DDeepSortLearner(
temp_path=temp_dir,
)
if model_name is not None and not os.path.exists(
models_dir + "/" + model_name
):
learner.download(model_name, models_dir)
learner.load(models_dir + "/" + model_name, verbose=True)
def get_device_fn(*args):
nonlocal learner
return torch.device(learner.device)
def transfer_to_device_fn(
sample,
device,
):
return sample
print("== Benchmarking learner.infer ==")
results1 = benchmark(
model=learner.infer,
sample=samples,
sample_with_batch_size1=sample,
num_runs=num_runs,
get_device_fn=get_device_fn,
transfer_to_device_fn=transfer_to_device_fn,
batch_size=batch_size,
)
inner_fps = learner.infers_count / (learner.infers_time)
print("Inner FPS =", inner_fps)
print(yaml.dump({"learner.infer": results1}))
with open(root_dir + "/results_deep_sort.txt", "a") as f:
print(
f"==== Benchmarking ObjectTracking2DDeepSortLearner ({model_name}) ====",
file=f,
)
print("Inner FPS =", inner_fps, file=f)
print(yaml.dump({"learner.infer": results1}), file=f)
print("\n\n", file=f)
# print("== Benchmarking model directly ==")
# results2 = benchmark(learner.model, sample, num_runs=num_runs)
# print(yaml.dump({"learner.model.forward": results2}))
print("===END===")
if __name__ == "__main__":
benchmark_fair_mot()
| StarcoderdataPython |
3588936 | from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name='storm-indicator-pyqt',
version='1.2.1',
description='PyQt based indicator for connecting to your SSH connections easily.',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url='https://github.com/olegbuevich/storm-indicator',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=['storm_indicator_pyqt'],
entry_points={
'console_scripts': [
'ssh-indicator-pyqt=storm_indicator_pyqt.__main__:main',
]
},
package_data={'storm_indicator_pyqt': ['icons/tray.svg']},
install_requires=[
"stormssh",
"PyQt5",
"envparse",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: POSIX",
"Operating System :: Unix",
"Topic :: System :: Systems Administration"
]
)
| StarcoderdataPython |
4826781 | <gh_stars>1-10
import click
import os
import yaml
import logging
def execute():
add_subcommands()
entry_point(obj={})
@click.group()
@click.option('--log-level', default='info', help='Set the logging level. (default: info, options: debug|info|warning|error)')
@click.option('--config-file', default='config/default.yaml', help='Set the YAML config file. (default: config/default.yaml)')
@click.pass_context
def entry_point(ctx, log_level, config_file):
"""
Hey, welcome! This project aims to provide a set of simple trading bots
developed to autonomously trade inside general or cryptocurrency Exchanges,
such as Binance.
"""
setup_logging(log_level)
ctx.obj['config'] = parse_config(config_file)
def add_subcommands():
from . import serial_trader
from . import parallel_trader
modules = (
serial_trader,
parallel_trader,
)
for mod in modules:
for attr in dir(mod):
foo = getattr(mod, attr)
if callable(foo) and type(foo) is click.core.Command:
entry_point.add_command(foo)
def setup_logging(log_level):
format = '%(levelname)s\t%(asctime)s %(message)s'
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log_level)
logging.basicConfig(encoding='utf-8', format=format, level=numeric_level)
def parse_config(filename, env_vars_prefix='bot'):
"""
Load a yaml configuration file and resolve any environment variables.
"""
if not os.path.isfile(filename):
raise ValueError('Invalid filename: %s' % filename)
config = None
with open(filename) as data:
config = yaml.load(data, yaml.loader.SafeLoader)
def update_config_with_env_vars(config, config_name, env_var_separator='_'):
if config == None:
return config
if type(config) == dict:
for node in config:
config[node] = update_config_with_env_vars(
config[node], '{}.{}'.format(config_name, node))
return config
env_var_name = config_name.upper().replace('.', env_var_separator)
return os.environ.get(env_var_name) or config
return update_config_with_env_vars(config, env_vars_prefix)
| StarcoderdataPython |
11296485 | """Check live state management command."""
from datetime import datetime, timedelta, timezone
import json
import re
from django.conf import settings
from django.core.management.base import BaseCommand
import boto3
from dateutil.parser import isoparse
from marsha.core.defaults import RUNNING, STOPPING
from marsha.core.models import Video
from marsha.core.utils.medialive_utils import stop_live_channel
aws_credentials = {
"aws_access_key_id": settings.AWS_ACCESS_KEY_ID,
"aws_secret_access_key": settings.AWS_SECRET_ACCESS_KEY,
"region_name": settings.AWS_S3_REGION_NAME,
}
# Configure medialive client
medialive_client = boto3.client("medialive", **aws_credentials)
# Configure cloudwatch logs client
logs_client = boto3.client("logs", **aws_credentials)
def parse_iso_date(iso_date):
"""Parse an iso 8601 date and return a datetime object."""
return isoparse(iso_date)
def generate_expired_date():
"""Generate a datetime object 25 minutes in the past."""
return datetime.now(tz=timezone.utc) - timedelta(minutes=25)
class Command(BaseCommand):
"""Check every live streaming running state on AWS."""
help = (
"Check activity on AWS for every live streaming running"
"and close them if there is not."
)
def handle(self, *args, **options):
"""Execute management command."""
extract_message_pattern = (
r"^(?P<ingestion_time>.*)\t"
r"(?P<request_id>.*)\t"
r"(?P<level>.*)\t"
r"Received event:(?P<message>.*)$"
)
extract_message_regex = re.compile(extract_message_pattern)
videos = Video.objects.filter(live_state=RUNNING)
for video in videos:
"""
For each running live video, we query cloudwatch on the current live
to search messages having detail.alert_type set to `RTMP Has No Audio/Video`.
This alert tell us there is no stream and the live can be stopped if the message is
older than 25 minutes.
"""
self.stdout.write(f"Checking video {video.id}")
live_info = video.live_info
logs = logs_client.filter_log_events(
logGroupName=live_info["cloudwatch"]["logGroupName"],
startTime=int(datetime.timestamp(video.created_on) * 1000),
filterPattern=(
"{"
'($.detail-type = "MediaLive Channel Alert") && '
f"($.resources[0] = \"{live_info['medialive']['channel']['arn']}\") &&"
'($.detail.alert_type = "RTMP Has No Audio/Video")'
"}"
),
)
pipelines_queue = {"0": [], "1": []}
for event in logs["events"]:
"""
All events must be parsed to extract the JSON message. When an alert is added,
the `alarm_state` property value is `SET` and when the alert is removed,
the `alarm_state` property value is `CLEARED`.
We have 2 pipelines, a live is over when the 2 pipeline have `SET` value
in `alarm_state`.
Alarm state act like a list with all the event history. It means a `CLEARED` event
is related to a `SET` one. So we have to look over all events, put in a list all
`SET` events and remove it if a `CLEARED` event is here. At the end if we have
2 `SET` events, the live has no activity and we have to check the time of the
last `SET` event. If this time is older than 25 minutes we stop the channel.
"""
log = extract_message_regex.match(event["message"])
message = json.loads(log.group("message"))
if message["detail"]["alarm_state"] == "SET":
pipelines_queue[message["detail"]["pipeline"]].append(message)
else:
pipelines_queue[message["detail"]["pipeline"]].pop()
if len(pipelines_queue["0"]) == 1 and len(pipelines_queue["1"]) == 1:
"""
Both pipelines receive no stream, we have to check the more recent one
and if the time is older than 25 minutes we stop the channel.
"""
datetime_pipeline0 = parse_iso_date(pipelines_queue["0"][0]["time"])
datetime_pipeline1 = parse_iso_date(pipelines_queue["1"][0]["time"])
expired_date = generate_expired_date()
if (
datetime_pipeline0 < expired_date
or datetime_pipeline1 < expired_date
):
"""Stop this channel."""
self.stdout.write(
f"Stopping channel with id {live_info['medialive']['channel']['id']}"
)
stop_live_channel(live_info["medialive"]["channel"]["id"])
video.live_state = STOPPING
video.save()
self.stdout.write("Channel stopped")
| StarcoderdataPython |
1673458 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import gidgethub.sansio
import importlib_resources
import pytest
from __app__.github import labels, news
from . import samples
class FakeGH:
def __init__(self, *, getiter_=[]):
self.post_ = []
self.getiter_ = getiter_
async def post(self, url, url_vars={}, *, data):
post_url = gidgethub.sansio.format_url(url, url_vars)
self.post_.append((post_url, data))
async def getiter(self, url):
for item in self.getiter_:
yield item
@pytest.mark.asyncio
async def test_status():
url = "https://api.github.com/repos/Microsoft/vscode-python/statuses/f1013549456d13eb15dab4fffaa6cfe172b4244e"
gh = FakeGH()
event = gidgethub.sansio.Event(
{"pull_request": {"statuses_url": url}}, event="pull_request", delivery_id="1"
)
description = "some description"
await news.status(event, gh, news.Status.error, description)
assert len(gh.post_) == 1
args = gh.post_[0]
assert len(args) == 2
assert args[0] == url
assert args[1] == {
"state": "error",
"target_url": "https://github.com/Microsoft/vscode-python/tree/master/news",
"description": description,
"context": "pvscbot/news",
}
@pytest.mark.asyncio
async def test_check_for_skip_news_label(monkeypatch):
status_args = None
async def status(*args):
nonlocal status_args
status_args = args
monkeypatch.setattr(news, "status", status)
data = json.loads(
importlib_resources.read_text(samples, "pull_request-labeled-skip_news.json")
)
event = gidgethub.sansio.Event(data, event="pull_request", delivery_id="1")
assert await news.check_for_skip_news_label(event, object())
assert news.Status.success in status_args
data["pull_request"]["labels"] = []
status_args = None
assert not await news.check_for_skip_news_label(event, object())
assert status_args is None
@pytest.mark.asyncio
async def test_check_for_skip_news_removed(monkeypatch):
check_args = None
async def check_for_news_file(*args):
nonlocal check_args
check_args = args
monkeypatch.setattr(news, "check_for_news_file", check_for_news_file)
data = json.loads(
importlib_resources.read_text(samples, "pull_request-unlabeled-skip_news.json")
)
event = gidgethub.sansio.Event(data, event="pull_request", delivery_id="1")
await news.check_for_skip_news_label_removed(event, object())
assert check_args is not None
check_args = None
data["label"]["name"] = "something other than 'skip news'"
await news.check_for_skip_news_label_removed(event, object())
assert check_args is None
@pytest.mark.asyncio
@pytest.mark.parametrize(
"path,expected,status_check",
[
("news/3 Code Health/3684.md", True, news.Status.success),
("news/__pycache__/3684.md", False, news.Status.failure),
("news/3684.md", False, news.Status.failure),
("news/3 Code Health/3684.txt", False, news.Status.failure),
],
)
async def test_check_for_news_file(path, expected, status_check, monkeypatch):
status_args = None
async def status(*args):
nonlocal status_args
status_args = args
monkeypatch.setattr(news, "status", status)
event_data = json.loads(
importlib_resources.read_text(samples, "pull_request-reopened-skip_news.json")
)
event = gidgethub.sansio.Event(event_data, event="pull_request", delivery_id="1")
files_data = json.loads(
importlib_resources.read_text(samples, "pull_request-files.json")
)
files_data[1]["filename"] = path
gh = FakeGH(getiter_=files_data)
assert await news.check_for_news_file(event, gh) == expected
assert status_args[2] == status_check
@pytest.mark.asyncio
async def test_check_for_news(monkeypatch):
status_args = None
async def status(*args):
nonlocal status_args
status_args = args
monkeypatch.setattr(news, "status", status)
event_data = json.loads(
importlib_resources.read_text(samples, "pull_request-reopened-skip_news.json")
)
event = gidgethub.sansio.Event(event_data, event="pull_request", delivery_id="1")
files_data = json.loads(
importlib_resources.read_text(samples, "pull_request-files.json")
)
original_file_path = files_data[1]["filename"]
assert original_file_path == "news/3 Code Health/3684.md"
files_data[1]["filename"] = "README"
gh = FakeGH(getiter_=files_data)
assert await news.check_for_news(event, gh)
assert status_args[2] == news.Status.success
event_data["pull_request"]["labels"] = []
files_data[1]["filename"] = original_file_path
status_args = None
assert await news.check_for_news(event, gh)
assert status_args[2] == news.Status.success
files_data[1]["filename"] = "README"
status_args = None
assert not await news.check_for_news(event, gh)
assert status_args[2] == news.Status.failure
# Also tests that the status check is initially set to "pending".
@pytest.mark.asyncio
@pytest.mark.parametrize("action", ["opened", "reopened", "synchronize"])
async def test_PR_nonlabel_routing(action, monkeypatch):
status_args = None
async def status(*args):
nonlocal status_args
status_args = args
monkeypatch.setattr(news, "status", status)
async def check_for_skip_news_label(*args, **kwargs):
return True
monkeypatch.setattr(news, "check_for_skip_news_label", check_for_skip_news_label)
event = gidgethub.sansio.Event(
{"action": action}, event="pull_request", delivery_id="1"
)
await news.router.dispatch(event, object())
assert status_args is not None # We hit the route.
assert status_args[2] == news.Status.pending
@pytest.mark.asyncio
async def test_PR_labeled_routing(monkeypatch):
called = False
def has_label(*args, **kwargs):
nonlocal called
called = True
return False
monkeypatch.setattr(news, "has_label", has_label)
event = gidgethub.sansio.Event(
{"action": "labeled"}, event="pull_request", delivery_id="1"
)
await news.router.dispatch(event, object())
assert called
@pytest.mark.asyncio
async def test_PR_unlabeled_routing(monkeypatch):
called = False
def changed_label_matches(*args, **kwargs):
nonlocal called
called = True
return False
monkeypatch.setattr(news, "changed_label_matches", changed_label_matches)
event = gidgethub.sansio.Event(
{"action": "unlabeled"}, event="pull_request", delivery_id="1"
)
await news.router.dispatch(event, object())
assert called
| StarcoderdataPython |
6404185 | import argparse
import h5py
import json
import os
import scipy.misc
import sys
import zipfile
import numpy as np
import cv2
from os.path import join
def get_pixels(mask):
return np.sum(mask)
def find_bbox(mask):
contour, _ = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
x0, y0 = np.min(np.concatenate(contour), axis=(0, 1))
x1, y1 = np.max(np.concatenate(contour), axis=(0, 1))
return x0, y0, x1, y1
def imdecode(im, flags=-1):
if isinstance(im, zipfile.ZipExtFile):
im = im.read()
im = np.asarray(bytearray(im), dtype='uint8')
return cv2.imdecode(im, flags)
def convert_ytb_vos(data_dir, out_dir):
json_name = 'instances_%s.json'
num_obj = 0
num_ann = 0
print('Starting')
ann_dict = {}
ann_dir = 'valid/Annotations/'
zf = zipfile.ZipFile(data_dir)
with zf.open('valid/meta.json') as fp:
json_ann = json.load(fp)
for vid, video in enumerate(json_ann['videos']):
v = json_ann['videos'][video]
frames = []
for obj in v['objects']:
o = v['objects'][obj]
frames.extend(o['frames'])
frames = sorted(set(frames))[:1]
annotations = []
instanceIds = []
for frame in frames:
file_name = f'{video}/{frame}'
fullname = f'{ann_dir}{file_name}.png'
with zf.open(fullname) as fp:
img = imdecode(fp, flags=0)
h, w = img.shape[:2]
objects = dict()
for instanceId in np.unique(img):
if instanceId == 0:
continue
instanceObj = Instance(img, instanceId)
instanceObj_dict = instanceObj.toDict()
mask = (img == instanceId).astype(np.uint8)
contour, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print(len(contour[0]), type(contour[0]), contour[0].shape)
polygons = [c.reshape(-1).tolist() for c in contour]
instanceObj_dict['contours'] = [p for p in polygons if len(p) > 4]
if len(instanceObj_dict['contours']) and instanceObj_dict['pixelCount'] > 1000:
objects[instanceId] = instanceObj_dict
for objId in objects:
if len(objects[objId]) == 0:
continue
obj = objects[objId]
len_p = [len(p) for p in obj['contours']]
if min(len_p) <= 4:
print('Warning: invalid contours.')
continue # skip non-instance categories
ann = dict()
ann['h'] = h
ann['w'] = w
ann['file_name'] = file_name
ann['id'] = int(objId)
ann['area'] = obj['pixelCount']
ann['bbox'] = xyxy_to_xywh(polys_to_boxes([obj['contours']])).tolist()[0]
annotations.append(ann)
instanceIds.append(objId)
num_ann += 1
instanceIds = sorted(set(instanceIds))
num_obj += len(instanceIds)
video_ann = {str(iId): [] for iId in instanceIds}
for ann in annotations:
video_ann[str(ann['id'])].append(ann)
ann_dict[video] = video_ann
if vid % 50 == 0 and vid != 0:
print("process: %d video" % (vid+1))
print("Num Videos: %d" % len(ann_dict))
print("Num Objects: %d" % num_obj)
print("Num Annotations: %d" % num_ann)
items = list(ann_dict.items())
train_dict = dict(items)
with open(os.path.join(out_dir, json_name % 'train'), 'w') as outfile:
json.dump(train_dict, outfile, indent=2)
if __name__ == '__main__':
im = cv2.imread('00020.png', 0)
rect = find_bbox(im)
print(rect)
exit()
if len(sys.argv) < 3:
print('python preprocess.py <datadir> <outdir>')
exit(1)
datadir, outdir = sys.argv[1], sys.argv[2]
convert_ytb_vos(datadir, outdir)
| StarcoderdataPython |
5032153 | from lib import action
class ListBulbsAction(action.BaseAction):
def run(self):
bulbs = {}
lights = self.hue.state['lights']
for light_id, light in lights.iteritems():
bulbs["l%s" % light_id] = light['name']
return bulbs
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.