id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1856724 | <filename>python/tests/tests_api_client/testSelection.py
import unittest
import sys
import os
sys.path.append(os.path.abspath('../..'))
import api_example
from api_client import Client, StagingClient
from requests import exceptions
import json
import time
SELECTION_ID = api_example.SELECTION_ID
class TestCase(unittest.TestCase):
def setUp(self):
self.client = Client()
class GetSelectionValid(TestCase):
def runTest(self):
test_result = self.client.get_selection(SELECTION_ID)
self.assertNotEqual(test_result, None)
class GetSelectionInvalid(TestCase):
def runTest(self):
self.assertRaises(exceptions.HTTPError, self.client.get_selection, SELECTION_ID + 'm')
class DeleteCreateSelectionValid(TestCase):
def runTest(self):
selection_template = self.client.get_selection(SELECTION_ID)
selection_template['Id'] = ''
selection = self.client.create_selection(selection_template)
self.assertNotEqual(selection, None)
try:
self.client.get_selection(selection['Id'])
except:
self.fail("Selection creation failed unexpectedly...")
self.client.delete_selection(selection['Id'])
self.assertRaises(exceptions.HTTPError, self.client.get_selection, selection['Id'])
# class CreateSelectionInvalid(TestCase):
class UpdateSelectionValid(TestCase):
def runTest(self):
old_selection = self.client.get_selection(SELECTION_ID)
old_name = old_selection['Name']
old_selection['Name'] += 'u'
self.client.update_selection(old_selection)
new_selection = self.client.get_selection(old_selection['Id'])
self.assertEqual(new_selection['Name'], old_name + 'u')
# class UpdateSelectionInvalid(TestCase):
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12819429 | <reponame>MonolithAILtd/caching
"""this file defines the worker for managing local cache directories"""
import datetime
import os
import shutil
from typing import Optional
from uuid import UUID
from .errors import WorkerCacheError
from .register import Register
class Worker:
"""
This is a class for managing a directory and meta data for temp files.
Attributes:
id (str): unique id for the worker
"""
CLASS_BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def __init__(
self,
port: Optional[int],
host: Optional[str],
existing_cache: Optional[str] = None,
local_cache: Optional[str] = None,
) -> None:
"""
The constructor for the Worker class.
:param port: (Optional[int]) port for the redis connection tracking caches
:param host: (Optional[str]) host for the redis connection tracking caches
:param existing_cache: (Optional[str]) path to existing cache
:param local_cache: (Optional[str]) path to the local cache
"""
self._locked: bool = False
self._port: Optional[int] = port
self._host: Optional[str] = host
# pylint: disable=invalid-name
self.id: str = str(UUID(bytes=os.urandom(16), version=4))
self._existing_cache: Optional[str] = existing_cache
self.class_base_dir: str = (
self.CLASS_BASE_DIR if local_cache is None else local_cache
)
self._base_dir: str = str(self.class_base_dir) + "/cache/{}/".format(self.id)
self._connect_directory()
if self._port is not None and host is not None:
Register(host=self._host, port=self._port).register_cache(cache_path=self.base_dir) # type: ignore
@staticmethod
def update_timestamp(cache_path: str) -> None:
"""
Updates the cache timestamp.txt log with a new timestamp
:param cache_path: (str) path to the cache being
:return:
"""
timestamp = datetime.datetime.now()
file = open(cache_path + "timestamp.txt", "a")
file.write("\n{}".format(timestamp))
file.close()
def lock(self) -> None:
"""
Sets self._lock to True preventing cleanup.
:return: None
"""
self._locked = True
def unlock(self) -> None:
"""
Sets the self._lock to False enabling cleanup.
:return: None
"""
self._locked = False
def _connect_directory(self) -> None:
"""
Checks existing path, creates new cache if existing path not supplied.
:return: None
"""
if self._existing_cache is not None:
if not os.path.isdir(self._existing_cache):
raise WorkerCacheError(
message="directory '{}' was supplied as an existing cache but does not exist".format(
self._existing_cache
)
)
self._base_dir = self._existing_cache
else:
self._generate_directory()
def _generate_directory(self) -> None:
"""
Generates cache directory with self.id (private).
:return: None
"""
if os.path.isdir(self._base_dir):
raise WorkerCacheError(
message="directory {} already exists. Check __del__ and self.id methods".format(
self._base_dir
)
)
os.makedirs(self._base_dir)
self.update_timestamp(cache_path=self._base_dir)
def _delete_directory(self) -> None:
"""
Deletes cache directory (private).
:return: None
"""
if self._port is None and self._locked is False:
shutil.rmtree(self.base_dir)
elif self._port is not None:
count: int = Register(host=self._host, port=self._port).deregister_cache( # type: ignore
cache_path=self.base_dir, locked=self._locked # type: ignore
) # type: ignore
if count == 0 and self._locked is False:
shutil.rmtree(self.base_dir)
@property
def base_dir(self) -> str:
"""
Dynamic property that defines the base directory for the cache.
:return: (str) the base directory of the cache
"""
return self._base_dir
def __del__(self):
"""
Fires when self is deleted, deletes the directory.
:return: None
"""
self._delete_directory()
| StarcoderdataPython |
1870268 | <filename>tests/sync/conftest.py
import os
import pytest
import dotenv
from codingame import Client
dotenv.load_dotenv()
@pytest.fixture(name="client", scope="function")
def create_client() -> Client:
with Client() as client:
yield client
@pytest.fixture(name="auth_client")
def create_logged_in_client() -> Client:
with Client() as client:
client.login(
remember_me_cookie=os.environ.get("TEST_LOGIN_REMEMBER_ME_COOKIE"),
)
yield client
| StarcoderdataPython |
1896241 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2009 <NAME> All rights reserved.
#
"""
"""
#end_pymotw_header
import abc
from abc_base import PluginBase
import abc_subclass
import abc_register
for sc in PluginBase.__subclasses__():
print sc.__name__
| StarcoderdataPython |
9618160 | <reponame>lorycontixd/LiteBIRD-inclination-analysis
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from __future__ import print_function
import math
import models
import importlib
from tabulate import tabulate
from dataclasses import dataclass
import json
import time
import os,sys
from pathlib import Path
from shutil import copyfile
from typing import Dict, Any, List, Union
import subprocess as s
import markdown as md
from scipy.interpolate import UnivariateSpline
from typing import Callable
import timeit
import subprocess
import database
import settings
import logging
logging.basicConfig(
level = logging.INFO,
format = '[%(asctime)s] %(levelname)s: %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S'
)
from tqdm import tqdm
import litebird_sim as lbs
import healpy
import numpy as np
from scipy import optimize, interpolate, integrate
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import matplotlib.pyplot as plot
PLANET_RADIUS = {
"jupiter" : 7.1492e+07,
"neptune" : 2.4622e+07,
"uranus" : 2.5362e+07
}
TELESCOPES = {
"low" : "LFT/L1-040",
"mid" : "MFT/M2-166",
"high" : "HFT/H3-402"
}
FREQUENCIES = {
"low" : 40,
"mid" : 166,
"high" : 402
}
@dataclass
class Parameters:
planet_name: str
sed_file_name: str
planet_radius_m: float
scanning_simulation: Path
detector: lbs.DetectorInfo
num_of_mc_runs: int
error_amplitude_map_file_name: str
eccentricity: float
inclination: float
def __post_init__(self):
self.sed_file_name = Path(self.sed_file_name)
self.scanning_simulation = Path(self.scanning_simulation)
def end_fig():
plt.close()
def get_radius(planet):
return PLANET_RADIUS[planet]
def get_telescope(frequency):
if frequency == "low":
return "LFT/L1-040"
elif frequency == "mid":
return "MFT/M2-166"
else:
return "HFT/H3-402"
def check_planet(planet):
planet = planet.lower()
valid = ["jupiter","neptune","uranus"]
if planet not in valid:
raise ValueError(f"Invalid planet {planet}")
radius = get_radius(planet)
return (planet,radius)
def check_frequency(freq):
freq = freq.lower()
valid = ["low","mid","high"]
if freq not in valid:
raise ValueError(freq)
telescope = get_telescope(freq)
return (freq,telescope)
def create_toml(settings,planet,frequency,inclination):
planet,radius = check_planet(planet)
frequency,telescope = check_frequency(frequency)
with open('tempfile.toml', 'w+') as file:
content = f"""[simulation]
base_path = "./results/"
num_of_mc_runs = {settings.simulation_runs}
[planet]
planet_name = "{planet}"
sed_file_name = "sed_files/{planet}_hf3.csv"
planet_radius_m = {radius}
scanning_simulation = "./scanning_strategies/{planet}/"
[detector]
channel_obj = "/releases/v1.0/satellite/{telescope}/channel_info"
sampling_rate_hz = 1.0
eccentricity = {settings.simulation_eccentricity}
frequency = "{frequency}"
inclination = {inclination}
"""
#print(content)
file.write(content)
def parse_frequency(freq:str):
return freq
def check_eccentricity(e):
assert 0<=e<1,f"Invalid value of e {e}"
def read_detector(parameters: Dict[str, Any], imo: lbs.Imo):
if "channel_obj" in parameters:
detobj = lbs.FreqChannelInfo.from_imo(
imo, parameters["channel_obj"]
).get_boresight_detector()
elif "detector_obj" in parameters:
detobj = lbs.DetectorInfo.from_imo(imo, parameters["detector_obj"])
else:
detobj = lbs.DetectorInfo()
for param_name in (
"name",
"wafer",
"pixel",
"pixtype",
"channel",
"sampling_rate_hz",
"fwhm_arcmin",
"net_ukrts",
"fknee_mhz",
"fmin_hz",
"alpha",
"pol",
"orient",
"frequency"
"eccentricity",
"bandwidth_ghz",
"bandcenter_ghz",
"inclination"
):
if param_name in parameters:
setattr(detobj, param_name, parameters[param_name])
return detobj
def load_parameters(sim: lbs.Simulation) -> Parameters:
return Parameters(
planet_name=sim.parameters["planet"]["planet_name"],
sed_file_name=sim.parameters["planet"]["sed_file_name"],
planet_radius_m=sim.parameters["planet"]["planet_radius_m"],
scanning_simulation=sim.parameters["planet"]["scanning_simulation"],
detector=read_detector(sim.parameters["detector"], sim.imo),
eccentricity=sim.parameters["detector"]["eccentricity"],
num_of_mc_runs=sim.parameters["simulation"].get("num_of_mc_runs", 20),
error_amplitude_map_file_name=sim.parameters["simulation"].get(
"error_amplitude_map_file_name", "error_map.fits"
),
inclination=np.deg2rad(sim.parameters["detector"]["inclination"])
)
#**************************************+ BEAM FUNCTIONS **********************************************
def beamfunc(pixel_theta, fwhm_arcmin, amplitude=1.0):
return amplitude * np.exp(
-np.log(2) * ((pixel_theta / np.deg2rad(fwhm_arcmin / 60.0)) ** 2)
)
def asymmetric_beam_good(mytuple,fwhm_arcmin,eccentricity,angle,amplitude=1.0):
pixel_theta, pixel_phi = mytuple
sin_theta = np.sin(pixel_theta)
x = sin_theta * np.cos(pixel_phi)
y = sin_theta * np.sin(pixel_phi)
#print("Type theta: ",type(pixel_theta)," - Type phi: ",type(pixel_phi))
#assert len(x) == len(y), "DimensionalError: theta and phi must have the same length"
u = np.cos(angle)*x + np.sin(angle)*y
v = -np.sin(angle)*x + np.cos(angle)*y
a0 = fwhm_arcmin
a2 = a0*(1-eccentricity)
exponential = -4*np.log(2) * ( (u/np.deg2rad(a0 / 60.0))**2 + (v/np.deg2rad(a2/60.0))**2 )
result = amplitude * np.exp(exponential)
if not isinstance(pixel_theta,(float,np.float64)) and (pixel_theta > np.pi / 2):
result = 0
else:
result[pixel_theta > np.pi / 2] = 0.0
return result
#********************************************************************************************************
def calc_symm_beam_solid_angle(fwhm_arcmin):
π = np.pi
integrand = lambda t : print(type(t))
return (
2 * π * integrate.quad(integrand, 0, π)[0]
)
def calc_beam_solid_angle(fwhm_arcmin,eccentricity,angle):
integrand: Callable[[np.array, np.array], np.array] = lambda phi,theta: (np.sin(theta) * asymmetric_beam_good((theta,phi), fwhm_arcmin,eccentricity,angle))
return integrate.dblquad(
integrand , 0, np.pi, lambda theta: 0, lambda theta: np.pi
)[0]
def project_map_north_pole(pixels, width_deg, pixels_per_side=150):
theta_max = np.deg2rad(width_deg)
u = np.linspace(-np.sin(theta_max), +np.sin(theta_max), pixels_per_side)
v = np.linspace(-np.sin(theta_max), +np.sin(theta_max), pixels_per_side)
u_grid, v_grid = np.meshgrid(u, v)
theta_grid = np.arcsin(np.sqrt(u_grid ** 2 + v_grid ** 2))
phi_grid = np.arctan2(v_grid, u_grid)
return (
u_grid,
v_grid,
healpy.get_interp_val(pixels, theta_grid.flatten(), phi_grid.flatten()).reshape(
pixels_per_side, -1
),
)
def create_uv_plot(
fig, ax, pixels, width_deg, contour_lines=True, smooth=False, fwhm_arcmin=None
):
from scipy.ndimage.filters import gaussian_filter
u_grid, v_grid, grid = project_map_north_pole(pixels, width_deg)
if smooth:
grid = gaussian_filter(grid, 0.7)
cs = ax.contourf(u_grid, v_grid, grid, cmap=plt.cm.bone)
if fwhm_arcmin:
from matplotlib.patches import Circle
ax.add_artist(
Circle(
(0, 0),
np.sin(np.deg2rad(fwhm_arcmin / 60.0)),
edgecolor="w",
lw=5,
facecolor="none",
alpha=0.25,
)
)
if contour_lines:
cs2 = ax.contour(cs, levels=cs.levels[::2], colors="r")
ax.clabel(cs2)
ax.set_xlabel("U coordinate")
ax.set_ylabel("V coordinate")
ax.set_aspect("equal")
cbar = fig.colorbar(cs)
if contour_lines:
cbar.add_lines(cs2)
def create_gamma_plots(gamma_map, gamma_error_map, fwhm_arcmin):
plot_size_deg = 2 * fwhm_arcmin / 60.0
gamma_fig, gamma_ax = plt.subplots()
create_uv_plot(
gamma_fig, gamma_ax, gamma_map, width_deg=plot_size_deg, fwhm_arcmin=None
)
gamma_error_fig, gamma_error_ax = plt.subplots()
create_uv_plot(
gamma_error_fig,
gamma_error_ax,
gamma_error_map,
width_deg=plot_size_deg,
contour_lines=False,
fwhm_arcmin=fwhm_arcmin,
)
gamma_over_error_fig, gamma_over_error_ax = plt.subplots()
create_uv_plot(
gamma_over_error_fig,
gamma_over_error_ax,
gamma_map / gamma_error_map,
width_deg=plot_size_deg,
smooth=True,
fwhm_arcmin=fwhm_arcmin,
)
return (plot_size_deg, gamma_fig, gamma_error_fig, gamma_over_error_fig)
def read_file(filename):
lines = open(filename,"r+").readlines()
mylist = []
for line in lines[1:]:
elems = line.split(",")
mylist.append((elems[0],elems[4]))
return mylist
def write_to_file(filename,ecc_true,n_of_runs,ecc_estimate,fwhm,fwhm_error):
file = open(filename,"a+")
#file.write(f"{n_of_runs},{fwhm},{fwhm_error}")
if os.stat(filename).st_size == 0:
file.write("eccenticity,n_runs,ecc_estimate,fwhm,fwhm_error")
#print("eccenticity,n_runs,ecc_estimate,fwhm,fwhm_error")
#print(f"{ecc_true},{n_of_runs},{ecc_estimate},{fwhm},{fwhm_error}")
# file.write(f"{ecc_true},{n_of_runs},{ecc_estimate},{fwhm},{fwhm_error}")
def compute(i,tot,planet,data_path: Path,logger_debug:bool, data_debug:bool):
angle_data= []
ampl_data = []
sim = lbs.Simulation(
parameter_file=str(data_path),
name="In-flight estimation of the beam properties",
base_path="./results/",
description="""
This report contains the result of a simulation of the reconstruction
of in-flight beam parameters, assuming a scanning strategy and some
noise/optical properties of a detector.
""",
)
params = load_parameters(sim)
det = read_detector(sim.parameters["detector"], sim.imo)
_frequency = parse_frequency(sim.parameters["detector"]["frequency"])
# TODO: This should be done by the framework
copyfile(
src=params.sed_file_name, dst=sim.base_path / params.sed_file_name.name,
)
print(" ")
if logger_debug:
logging.info(f"Starting simulation {i} of {tot}")
logging.info(f"Planet: {params.planet_name.capitalize()}")
logging.info(f"Frequency: {str(_frequency).capitalize()}")
logging.info(f"Inclination angle: {params.inclination} [rad] --> {np.rad2deg(params.inclination)} [deg]")
info = models.Information(
planet = params.planet_name,
frequency = _frequency,
inclination = params.inclination
)
# Calculate the brightness temperature of the planet over the band
print("sed file: ",params.sed_file_name)
sed_data = np.loadtxt(params.sed_file_name, delimiter=",")
sed_fn = interpolate.interp1d(sed_data[:, 0], sed_data[:, 1],bounds_error=False,fill_value=1)
print("sed_fn: ",sed_fn)
planet_temperature_k = (
integrate.quad(
sed_fn,
params.detector.bandcenter_ghz - params.detector.bandwidth_ghz / 2,
params.detector.bandcenter_ghz + params.detector.bandwidth_ghz / 2,
)[0]
/ params.detector.bandwidth_ghz
)
print("temp: ",planet_temperature_k)
print("fwhm input: ",det.fwhm_arcmin)
print("ecc input: ",params.eccentricity)
beam_solid_angle = calc_beam_solid_angle(fwhm_arcmin=det.fwhm_arcmin,eccentricity=params.eccentricity,angle=params.inclination)
sampling_time_s = 1.0 / params.detector.sampling_rate_hz
input_map_file_name = params.scanning_simulation / "map.fits.gz"
hit_map, time_map_s, dist_map_m2 = healpy.read_map(
input_map_file_name, field=(0, 1, 2), verbose=False, dtype=np.float32
)
nside = healpy.npix2nside(len(dist_map_m2))
pixel_theta, pixel_phi = healpy.pix2ang(nside, np.arange(len(hit_map)))
gamma_map = asymmetric_beam_good(
(pixel_theta, pixel_phi), params.detector.fwhm_arcmin, params.eccentricity, params.inclination, 1.0) # CHANGED
mask = (hit_map > 0.0) & (
pixel_theta < np.deg2rad(3 * params.detector.fwhm_arcmin / 60.0)
)
assert hit_map[mask].size > 0, "no data available for the fit"
error_amplitude_map = (
beam_solid_angle
* (params.detector.net_ukrts * 1e-6)
/ (
np.pi
* (params.planet_radius_m ** 2)
* planet_temperature_k
* np.sqrt(sampling_time_s)
)
) * dist_map_m2
if data_debug:
print("\nMap means:")
print("Mean dist_map: ",np.mean(dist_map_m2))
print("Mean error_map: ",np.mean(error_amplitude_map))
print("\nPrinting sqrt terms:")
print("Solid angle: ",beam_solid_angle)
print("WN: ",params.detector.net_ukrts * 1e-6)
print("Planet radius: ",params.planet_radius_m," ----> radius squared: ",params.planet_radius_m**2)
print("Planet temperature: ",planet_temperature_k)
print("Sqrt tau: ",np.sqrt(sampling_time_s))
print("Bandcenter: ",params.detector.bandcenter_ghz,"\tBandwidth: ",params.detector.bandwidth_ghz)
print(" ")
#print(dist_map_m2)
(
plot_size_deg,
gamma_fig,
gamma_error_fig,
gamma_over_error_fig,
) = create_gamma_plots(
gamma_map, error_amplitude_map, fwhm_arcmin=params.detector.fwhm_arcmin,
)
#(gamma_fig, "gamma.svg"),
#(gamma_error_fig, "gamma_error.svg"),
#(gamma_over_error_fig, "gamma_over_error.svg"),
destfile = sim.base_path / params.error_amplitude_map_file_name
healpy.write_map(
destfile,
[gamma_map, error_amplitude_map],
coord="DETECTOR",
column_names=["GAMMA", "ERR"],
column_units=["", ""],
dtype=[np.float32, np.float32, np.float32],
overwrite=True,
)
fwhm_estimates_arcmin = np.empty(params.num_of_mc_runs)
ampl_estimates = np.empty(len(fwhm_estimates_arcmin))
eccentricity_estimates = np.empty(len(fwhm_estimates_arcmin))
angle_estimates = np.empty(len(fwhm_estimates_arcmin))
for i in tqdm(range(len(fwhm_estimates_arcmin))):
noise_gamma_map = gamma_map + error_amplitude_map * np.random.randn(
len(dist_map_m2)
)
# Run the fit
try:
best_fit,pcov = optimize.curve_fit(
asymmetric_beam_good,
(pixel_theta[mask],pixel_phi[mask]),
noise_gamma_map[mask],
p0=[params.detector.fwhm_arcmin, params.eccentricity, params.inclination, 1.0],
maxfev=1000000
)
except RuntimeError as e:
print(e)
return
fwhm_estimates_arcmin[i] = best_fit[0]
eccentricity_estimates[i] = best_fit[1]
angle_estimates[i] = best_fit[2]
ampl_estimates[i] = best_fit[3]
fwhm_fig,fwhm_ax = models.fig_hist(fwhm_estimates_arcmin,"Counts","FWHM [arcmin]","FWHM distribution")
fwhm_plot = models.Plot("FWHM",fwhm_fig)
end_fig()
ampl_fig,ampl_ax = models.fig_hist(ampl_estimates,"Counts","AMPL [arcmin]","Amplitude distribution (Gamma)")
ampl_plot = models.Plot("AMPL",ampl_fig)
end_fig()
ecc_fig,ecc_ax = models.fig_hist(eccentricity_estimates,"Counts","Eccentricity","Eccentricity distribution")
ecc_plot = models.Plot("ECC",ecc_fig)
end_fig()
angle_fig,angle_ax = models.fig_hist(angle_estimates,"Counts","Inclination Angle","Inclination distribution")
ang_plot = models.Plot("ANGLE",angle_fig)
end_fig()
planet_ = models.Planet(params.planet_name.capitalize(),"%.2f" % planet_temperature_k,params.planet_radius_m)
info.planet = planet_
info.frequency = _frequency
info.inclination = float(params.inclination)
info.runs = len(fwhm_estimates_arcmin)
info.fwhm = np.mean(fwhm_estimates_arcmin)
info.fwhm_error = np.std(fwhm_estimates_arcmin)
info.angle = 0#np.mean(angle_estimates)
info.angle_error = 0#np.std(angle_estimates)
info.ampl = 0#np.mean(ampl_estimates)
info.ampl_error = 0#np.std(ampl_estimates)
info.ecc = 0#np.mean(eccentricity_estimates)
info.ecc_error = 0#np.std(eccentricity_estimates)
info.maps = (gamma_map,error_amplitude_map)
info.hitmap = hit_map
info.plots = [fwhm_plot,ampl_plot,ecc_plot,ang_plot]
print("FWHM error: ",np.std(fwhm_estimates_arcmin)," - ",info.fwhm_error)
print("Angle error: ",np.std(angle_estimates)," - ",info.angle_error)
return info
def remove_temptoml(logger_debug:bool):
if os.path.isfile('tempfile.toml'):
subprocess.run(["rm","-rf","tempfile.toml"],shell=True)
if (logger_debug):
print("[MAIN] tempfile.toml file has been deleted")
def clear_map_files():
subprocess.run(["chmod +x clean_maps.sh"],shell=True)
subprocess.run(["./clean_maps.sh"])
def write_maps_to_file(i,planet,freq,angle,gamma_map=None,error_map=None,hit_map=None,logger_debug=True):
"""Saves maps to file:
-- Gamma_map: map of the radiation pattern
-- Error_map: map of the error of the radiation pattern
"""
planet = planet.lower()
base_dir = "results/maps/"
planets = ["jupiter","neptune","uranus"]
if planet not in planets:
raise ValueError(f"Invalid planet {planet} for write_maps.")
directories = [base_dir+str(p) for p in planets]
for dir in directories:
if not os.path.isdir(dir):
subprocess.run(["mkdir",dir])
angle = float("{:.2f}".format(angle))
if gamma_map is not None:
map_file = open(f"{base_dir}{planet}/gammamap_{freq}_{angle}.dat","w+")
for x in gamma_map:
map_file.write(str(x)+"\n")
map_file.close()
if error_map is not None:
error_file = open(f"{base_dir}{planet}/errormap_{freq}_{angle}.dat","w+")
for x in error_map:
error_file.write(str(x)+"\n")
error_file.close()
if hit_map is not None:
hitmap_file = open(f"{base_dir}{planet}/hitmap_{freq}_{angle}.dat","w+")
for x in hit_map:
hitmap_file.write(str(x)+"\n")
hitmap_file.close()
if (logger_debug):
print("Pixel maps written to file")
return
def main(filename:str):
mysettings = settings.Settings(filename)
sim2 = lbs.Simulation(
base_path = "results/",
name = mysettings.simulation_title,
description= mysettings.simulation_description
)
if mysettings.settings_loggerdebug:
print(mysettings)
angle_data = models.Data(name="angles") #Store simulation results (or informations)
fwhm_data = models.Data(name="fwhm")
infos = []
if mysettings.database_active:
d = database.SimulationDatabase("db/"+mysettings.database_name)
tbl = d.create_simulation()
if mysettings.settings_clear_maps:
clear_map_files()
intro = """
# Base information
This report contains information about the dependency of the in-flight beam's inclination angle on planet and telescope frequency.
The document contains plots
"""
sim2.append_to_report(intro)
index = 0
tot = len(mysettings.simulation_planets)*len(mysettings.simulation_frequencies)*len(mysettings.simulation_angles)
for p in mysettings.simulation_planets:
for f in mysettings.simulation_frequencies:
for a in mysettings.simulation_angles:
create_toml(mysettings,p,f,a) # Create a temporary TOML file with parameters
info = compute(index+1,tot,p,"tempfile.toml",mysettings.settings_loggerdebug,mysettings.settings_datadebug) #extract data from the simulation by passing the temporary TOML file
angle_data.append_data(p,f,(models.rad2arcmin(info.inclination),models.rad2arcmin(info.angle_error)))
fwhm_data.append_data(p,f,(models.rad2arcmin(info.inclination),info.fwhm_error))
if mysettings.settings_save_error_map:
write_maps_to_file(index,p,f,a,error_map = info.maps[1],logger_debug=mysettings.settings_loggerdebug)
if mysettings.settings_save_hitmap:
write_maps_to_file(index,p,f,a,hit_map = info.hitmap,logger_debug=mysettings.settings_loggerdebug)
if mysettings.database_active:
d.insert_run(info,table_name=tbl)
sim2.append_to_report("""
### Results of the Monte Carlo simulation: {{pname}} - {{freq}} - {{inc}}
Parameter | Value
---------- | -----------------
# of runs | {{ num_of_runs }}
FWHM | {{"%.3f"|format(fwhm_arcmin)}} ± {{"%.3f"|format(fwhm_err)}} arcmin
γ0 | {{"%.3f"|format(ampl)}} ± {{"%.3f"|format(ampl_err)}} arcmin
e | {{"%.3f"|format(ecc)}} ± {{"%.3f"|format(ecc_err)}}
theta | {{"%.3f"|format(angle)}} ± {{"%.3f"|format(angle_err)}}




""",
figures=[
(info.plots[0].figure, "fwhm_distribution.svg"),
(info.plots[1].figure, "ampl_distribution.svg"),
(info.plots[2].figure, "ecc_distribution.svg"),
(info.plots[3].figure,"angle_distribution.svg")
],
pname = info.planet.name,
freq = info.frequency,
inc = info.inclination,
num_of_runs=info.runs,
fwhm_arcmin=info.fwhm,
fwhm_err=info.fwhm_error,
ampl=info.ampl,
ampl_err=info.ampl_error,
ecc=info.ecc,
ecc_err = info.ecc_error,
angle=info.angle,
angle_err=info.angle_error
)
index +=1
#Information was stored in a Data object instead of being iterated directly to make it visually easier to understand
#At this point we have a Data object with 3 different planets each with 3 different frequencies and each is a list of tuples (angle,gamma_error)
for p in mysettings.simulation_planets:
fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1, sharex = True,figsize=(5, 5))
ax1.set_xlabel("Inclination angle [arcmin]")
ax1.get_xaxis().set_visible(False)
ax2.set_xlabel("Inclination angle [arcmin]")
ax1.set_ylabel("FWHM Error [arcmin]", labelpad = -2)
ax2.set_ylabel("Inclination angle Error [arcmin] ",labelpad = -2)
for f in mysettings.simulation_frequencies:
ax1.scatter(
[data[0] for data in fwhm_data.get_planet(p)[f]],
[data[1] for data in fwhm_data.get_planet(p)[f]],
s = 12
)
ax1.plot(
[data[0] for data in fwhm_data.get_planet(p)[f]],
[data[1] for data in fwhm_data.get_planet(p)[f]]
)
#s = UnivariateSpline(x, y, s=4)
#sx1 = np.linspace( 0, len(fwhm_data.get_planet(p)[f]), 100)
#sy1 = s(sx)
#ßplt.plot(sx1, sy1)
ax2.scatter(
[data[0] for data in angle_data.get_planet(p)[f]],
[data[1] for data in angle_data.get_planet(p)[f]],
s = 12
)
ax2.plot(
[data[0] for data in angle_data.get_planet(p)[f]],
[data[1] for data in angle_data.get_planet(p)[f]],
label = f"{f} frequency - {FREQUENCIES[f]}GHz"
)
plt.legend(prop={'size': 6})
fig = plot.gcf()
sim2.append_to_report(
"""
## {{planet}} plot

""",
figures = [
(fig,f'{p}.png')
],
planet = p.capitalize()
)
plot.close()
remove_temptoml(mysettings.settings_loggerdebug)
sim2.flush()
if __name__ == "__main__":
assert len(sys.argv) == 2,f"Program requires parameter <TOML parameter_file>"
start = time.time()
main(str(sys.argv[1]))
end = time.time()
print(f"Simulation executed in {end-start} seconds.") | StarcoderdataPython |
1618327 | import numpy as np
from pylab import sqrt,linspace,array,argmax
from scipy.interpolate import RegularGridInterpolator, RectBivariateSpline
import math
from constants import *
'''
colorToProj: color -> projected
dataToProj: data -> projected
colorCoord: data, projected -> color
dataToColor: data -> color
colorToData: color -> data
dataCoord: color, projected -> data
'''
def colorToProj(x,y):
# returns x,y in the global projected coordinates
# no way to check if in color or map coord
if y >= map['cmap_proj_y1'] and y <= map['cmap_proj_y0']:
if x >= map['cmap_proj_x0'] and x <= map['cmap_proj_x1']:
return x, y
else:
print 'ERROR IN PROJCOORD'
return -1
else:
return ((150*x) + float(map['cmap_proj_x0'])), ((-150*y) + float(map['cmap_proj_y0']))
def dataToProj(x,y):
return dataToColor(colorToProj(x,y))
def colorCoord(x,y):
# must assume data in data coord or proj coord
# first return data to color
# else return proj to map
if y >= map['y0'] and y <=map['y1']:
if x >= map['x0'] and x <= map['x1']:
return dataToColor(x,y)
else:
print 'ERROR IN MAPCOORD'
return -1
else:
return ((-float(map['cmap_proj_x0']) + x)/150.0), (-(-float(map['cmap_proj_y0']) + y)/150.0)
def dataToColor(x,y):
# turns colormap data point into data point
return x*(float(map['cmap_x1'])/float(map['x1'])), y*(float(map['cmap_y1'])/float(map['y1']))
def colorToData(x,y):
# turns colormap data point into data point
return x*(float(map['x1'])/float(map['cmap_x1'])), y*(float(map['y1'])/float(map['cmap_y1']))
def dataCoord(x,y):
#must assume data is in colormap or projected coord
if y >= map['cmap_y0'] and y <=map['cmap_y1']:
if x >= map['cmap_x0'] and x <= map['cmap_x1']:
return colorToData(x,y)
elif y >= map['cmap_proj_y0'] and y <=map['cmap_proj_y1']:
if x >= map['cmap_proj_x0'] and x <= map['cmap_proj_x1']:
return colorToData(colorCoord(x,y))
else:
print 'ERROR: dataCoord(x, y) error'
return -1
def findSlopes(lines, vlist):
for i in range(len(vlist)-1):
m = float(vlist[i+1][1] -vlist[i][1])/float(vlist[i+1][0] -vlist[i][0])
b = float(vlist[i][1]) - m*(float(vlist[i][0]))
lines.append([m,b])
#for last point
j = len(vlist)-1
m = float(vlist[j][1] -vlist[j-1][1])/float(vlist[j][0] -vlist[j-1][0])
b = float(vlist[j][1]) - m*(float(vlist[j][0]))
lines.append([m, b])
return lines
def circArr(x,y):
r = 1
t = linspace(0,2*np.pi,50)
xArr = x + r*np.cos(t)
yArr = y + r*np.sin(t)
return xArr, yArr
def curveDistance(x0, y0, cData):
imin = -1
found = False
snapDistance = 2
minD = 31
i = 0
while i < len(cData[0]): # and not found:
d = sqrt((x0-cData[0][i])**2 + (y0-cData[1][i])**2)
if not found and d < snapDistance:
found = True
minD = d
imin = i
elif found and d < minD:
minD = d
imin = i
i += 1
return imin
| StarcoderdataPython |
3253248 | # Copyright (c) 2004 Divmod.
# See LICENSE for details.
from twisted.internet import defer
from nevow import context, inevow
from nevow import testutil
from nevow.flat import twist
from nevow.util import Deferred
from nevow import rend, loaders, tags
def deferit(data):
return data.d
def deferdot(data):
return data.d2
class RenderHelper(testutil.TestCase):
def renderIt(self):
req = testutil.FakeRequest()
self.r.renderHTTP(context.PageContext(tag=self.r, parent=context.RequestContext(tag=req)))
return req
class LaterRenderTest(RenderHelper):
def setUp(self):
self.d = Deferred()
self.d2 = Deferred()
self.r = rend.Page(
docFactory=loaders.stan(
tags.html(data=self)[
'Hello ', tags.invisible[tags.invisible[tags.invisible[tags.invisible[deferit]]]],
deferdot,
]
)
)
def test_deferredSupport(self):
req = self.renderIt()
self.assertEqual(req.v, b'<html>Hello ')
self.d.callback("world")
self.assertEqual(req.v, b'<html>Hello world')
self.d2.callback(".")
self.assertEqual(req.v, b'<html>Hello world.</html>')
def test_deferredSupport2(self):
req = self.renderIt()
self.assertEqual(req.v, b'<html>Hello ')
self.d2.callback(".")
self.assertEqual(req.v, b'<html>Hello ')
self.d.callback("world")
self.assertEqual(req.v, b'<html>Hello world.</html>')
def test_deferredSupport3(self):
self.r.buffered = True
req = self.renderIt()
self.assertEqual(req.v, b'')
self.d.callback("world")
self.assertEqual(req.v, b'')
self.d2.callback(".")
self.assertEqual(req.v, b'<html>Hello world.</html>')
def test_renderNestedDeferredCallables(self):
"""
Test flattening of a renderer which returns a Deferred which fires with
a renderer which returns a Deferred.
"""
def render_inner(ctx, data):
return defer.succeed('')
def render_outer(ctx, data):
return defer.succeed(render_inner)
ctx = context.WovenContext()
ctx.remember(None, inevow.IData)
out = []
d = twist.deferflatten(render_outer, ctx, out.append)
def flattened(ign):
self.assertEqual(out, [''])
d.addCallback(flattened)
return d
def test_renderNestedDeferredErrorHandling(self):
"""
Test that flattening a renderer which returns a Deferred which fires
with a renderer which raises an exception causes the outermost Deferred
to errback.
"""
class NestedException(Exception):
pass
def render_inner(ctx, data):
raise NestedException()
def render_outer(ctx, data):
return defer.succeed(render_inner)
ctx = context.WovenContext()
ctx.remember(None, inevow.IData)
out = []
d = twist.deferflatten(render_outer, ctx, out.append)
return self.assertFailure(d, NestedException)
class LaterDataTest(RenderHelper):
def data_later(self, context, data):
return self.d
def data_later2(self, context, data):
return self.d2
def setUp(self):
self.d = Deferred()
self.d2 = Deferred()
self.r = rend.Page(docFactory=loaders.stan(
tags.html(data=self.data_later)[
'Hello ', str, ' and '
'goodbye ',str,
tags.span(data=self.data_later2, render=str)]))
def test_deferredSupport(self):
req = self.renderIt()
self.assertEqual(req.v, b'')
self.d.callback("world")
self.assertEqual(req.v, b'<html>Hello world and goodbye world')
self.d2.callback(".")
self.assertEqual(req.v, b'<html>Hello world and goodbye world.</html>')
class SuperLaterDataTest(RenderHelper):
def test_reusedDeferredSupport(self):
"""
Two occurrences of a particular slot are each replaced with the
result of the Deferred which is used to fill that slot.
"""
doc = tags.html[
tags.slot('foo'), tags.slot('foo')]
doc.fillSlots('foo', defer.succeed(tags.span['Foo!!!']))
self.r = rend.Page(docFactory=loaders.stan(doc))
req = self.renderIt()
self.assertEqual(req.v, b'<html><span>Foo!!!</span><span>Foo!!!</span></html>')
def test_rendererCalledOnce(self):
"""
Make sure that if a Deferred fires with a render function that the
render function is called only once.
"""
calls = []
def recorder(ctx, data):
calls.append(None)
return str(len(calls))
doc = tags.html[tags.directive('renderer')]
class RendererPage(rend.Page):
docFactory = loaders.stan(doc)
def render_renderer(self, ctx, data):
return defer.succeed(recorder)
self.r = RendererPage()
req = self.renderIt()
self.assertEqual(req.v, b'<html>1</html>')
| StarcoderdataPython |
3259011 | from __future__ import division
def diff_percentage(max_count, min_count, **kwargs):
difference_count = int(max_count) - int(min_count)
if difference_count == 0:
return 0
variation = int(difference_count/int(min_count))*100
return variation
| StarcoderdataPython |
6568893 | import os
import re
import argparse
import csv
import numpy as np
import pandas as pd
from AnalyzeMotionThroughTime import analyze_motion as analyze_motion_through_time
from AnalyzeMotionThroughTime import get_baseline
def parse_arguments():
parser = argparse.ArgumentParser("Analyzes the predictions across all motions in a particular experiment.")
parser.add_argument("--folder", type=str, help="The folder in which to look for data.")
parser.add_argument("--model", type=str, help="The model used in predicting the annotations.")
parser.add_argument("--reps", type=int, default=0, help="How many times a model had to be run repeatedly")
parser.add_argument("--baseline", type=str, help="The baselin to compare agains.")
parser.add_argument("--prefix", type=str, default="", help="The prefix to filter for the test motions.")
parser.add_argument("--output", type=str, help="The output filename")
parser.add_argument("--experiment", choices=["loo", "traindev", "traindev"], type=str, default="loo", help="What kind of experiment was conducted.")
return parser.parse_args()
def get_motions(folder, prefix):
dir_content = os.listdir(folder)
test_motions = list(filter(lambda f: os.path.isdir(os.path.join(folder, f)) , dir_content))
if prefix:
test_motions = list(filter(lambda f: f.startswith(prefix), test_motions))
return sorted(test_motions)
def analyze_across_motions(folder, model, reps, labels, prefix, baseline, experiment):
motions = sorted(get_motions(folder, prefix))
print(motions)
full_results = list()
for motion in motions:
all_baseline_results = get_baseline(folder, motion, baseline, reps)
baseline_results = all_baseline_results[4]
baseline_f1 = all_baseline_results[3]
motion_results = list()
motion_performance = list()
motion_errors = list()
for i in range(reps):
result_i = analyze_motion_through_time(folder, motion, model, labels, i, experiment)
f1 = result_i[2]
error_rate = (1 - result_i[0]) + (1 - result_i[1]) # (1 - p) + (1 - r)
motion_performance.append(f1[-1])
motion_errors.append(error_rate[-1])
below_baseline = error_rate < baseline_results
if below_baseline.any():
i = len(below_baseline) - 1
while below_baseline[i] and i >= 0: i -= 1 # iterate backwards until first error_i >= baseline
motion_results.append(i + 2) # we iterated one step to far and index zero is after one training doc
else:
motion_results.append(len(f1) + 1) # assume that it would have taken one more to beat the baseline
motion_mean = np.mean(motion_results)
motion_performance_mean = np.mean(motion_performance)
motion_error_mean = np.mean(motion_errors)
full_results.append([motion.replace("_", " "), motion_error_mean, motion_performance_mean, baseline_results, baseline_f1, motion_mean, len(f1) + 1 ])
results = pd.DataFrame(full_results, columns=["motion", "Error-rate", "F1", "Baseline", "Baseline F1", "Min. training", "Docs"])
# results = results.set_index("motion")
return results
if __name__=="__main__":
args = parse_arguments()
# from pudb import set_trace; set_trace()
results = analyze_across_motions(args.folder, args.model, args.reps, ["Evidence"], args.prefix, args.baseline, args.experiment)
# results = results.sort_values("Docs")
results.to_latex(args.output, float_format=lambda x: '%.3f' % x, index=True, index_names=True, columns=["Docs", "Min. training", "Error-rate", "Baseline", "F1", "Baseline F1"])
print(results)
| StarcoderdataPython |
8036653 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 10:17:32 2020
@author: briere
"""
import argparse
import logging
from GraphMetrics import *
from neo4j import GraphDatabase
descr = '''This script computes the optimal number of supports to cluster the IntegrationGraph.
Once filtered, the graph is clustered using either the Louvain community detection algorithm
or the Markov Clustering algorithm.
The resulting clustering is an integrated clustering and is stored in file.'''
parser = argparse.ArgumentParser(description=descr)
parser.add_argument("-id", "--neo_id",
help="Neo4j ID used to connect to the database")
parser.add_argument("-pwd", "--neo_pwd",
help="Neo4j password used to connect to the database")
parser.add_argument("-host", "--neo_localhost",
help="Neo4j graph localhost")
parser.add_argument("-subj", "--subject",
help="Analysis subject")
parser.add_argument("-cls_name", "--cluster_nodes_name",
help="Name of the Cluster nodes in the graph (defined in the metadata file)")
parser.add_argument("-obj_name", "--object_nodes_name",
help=" Name of the Object nodes in the graph\
(main nodes, defined in the metadata file)")
parser.add_argument("-out", "--log_out",
help="Log file")
parser.add_argument("-out_cls", "--clustering_out",
help="Output clustering file")
parser.add_argument("-rel", "--rel_name",
help="Name of the IntegrationEdges")
parser.add_argument("-dt", "--datatypes",
help="Datatypes to integrate (pipe | separated)")
parser.add_argument("-met", "--methods",
help="Clustering methods to integrate (pipe | separated)")
parser.add_argument("-min_clust", "--min_nodes_clust",
help="Minimum number of nodes for a cluster to be returned")
parser.add_argument("-min_nodes", "--min_nodes_analyse",
help="Minimum number of nodes for a clustering to be returned")
parser.add_argument("-nb_sup", "--user_nb_supports",
help="If 0, NeOmics coputes the optimal number of supports to filter the graph \
else, the provided nuber of supports is used.")
parser.add_argument("-wa", "--writeAll",
help="If False, will only return objects that passes all filters")
parser.add_argument("-re", "--reassign",
help="If False, will only return objects that passes all filters")
args = parser.parse_args()
out = args.clustering_out
neo_id = args.neo_id
neo_pwd = args.neo_pwd
neo_localhost = args.neo_localhost
subject = args.subject
obj = args.object_nodes_name
clust = args.cluster_nodes_name
rel_name = args.rel_name
datatypes = args.datatypes
methods = args.methods
datatypes = list(datatypes.split('|'))
methods = list(methods.split('|'))
nb_nodes_clust = int(args.min_nodes_clust)
min_tot_nodes = int(args.min_nodes_analyse)
user_nb_supports = args.user_nb_supports
writeAll = args.writeAll
if (writeAll.upper()=="TRUE"):
writeAll=True
else:
writeAll=False
reassign_unclassified = args.reassign
if (reassign_unclassified.upper()=="TRUE"):
reassign_unclassified=True
else:
reassign_unclassified=False
log_file = args.log_out
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(message)s')
logging.info('Integrating clusterings from : %s' % (subject))
logging.info('On datatypes : %s' % (str(datatypes)))
logging.info('For methods : %s' % (str(methods)))
logging.info('Relationship type : %s' % (rel_name))
def print_and_log(string):
print(string)
logging.info(string)
def fuse_clusterings(subject, obj, clust, rel_name, nb_nodes_clust, min_tot_nodes,
methods, datatypes, driver, out, opt=True, user_nb_sup=None, writeAll=False, reassign_unclassified=False):
'''
Parameters
----------
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
obj : STRING
Main nodes name (e.g: Patient)
clust : STRING
Cluster nodes name.
rel_name : STRING
Name of integratio edges.
nb_nodes_clust : INTEGER
Minimum number of nodes in a cluster for it to be returned (min_size_clust).
min_tot_nodes : INTEGER
Minimum number of nodes to be partitioned in the consensus (min_size_consensus).
methods : LIST OF STRINGS
Input clusterings to consider (algorithm).
datatypes : LIST OF STRINGS
Input clusterings to consider (datatype).
driver : NEO4J PYTHON DRIVER
driver = GraphDatabase.driver(uri=neo_localhost, auth=(neo_id, neo_pwd)).
out : STRING
Output filename.
opt : BOOL, optional
Should the number of supports threshold be computed automatically ? The default is True.
If set to False, user_nb_sup will be used to filter the graph
and the min_tot_nodes_parameter will be ignored.
user_nb_sup : INT, optional
If opt=False, which number of supports threshold should be used ? The default is None.
Returns
-------
Consensus clustering.
'''
with driver.session() as session:
clust_node = "(c:" + clust + ":" + subject + ")"
datatypes_cond = "c:"
for datatype in datatypes:
if datatype == datatypes[0]:
datatypes_cond = datatypes_cond + datatype
else:
datatypes_cond = datatypes_cond + " OR c:" + datatype
methods_cond = "c:"
for method in methods:
if method == methods[0]:
methods_cond = methods_cond + method
else:
methods_cond = methods_cond + " OR c:" + method
max_pos_nb_sup = session.run("MATCH " + clust_node + " WHERE (" + datatypes_cond + ") AND (" + methods_cond + ") RETURN count(distinct labels(c)) as max")
max_pos_nb_sup = int([record['max'] for record in max_pos_nb_sup][0])
clust_algos = ["Markov", "Louvain"]
if opt == True:
MQ_louvain_markov = []
nb_supp_louvain_markov = []
for clust_algo in clust_algos:
nb_supp = []
MQ = []
nb_nodes_kept = []
nb_clusters = []
for nb_supports in range(1, max_pos_nb_sup+1):
nb_supp.append(nb_supports)
mq, nb_nodes, nb_clust = wModularization(subject, obj, clust, rel_name, clust_algo, nb_nodes_clust, nb_supports, max_pos_nb_sup, driver)
MQ.append(mq)
nb_nodes_kept.append(nb_nodes)
nb_clusters.append(nb_clust)
MQ = np.asarray(MQ)
nb_nodes_kept = np.asarray(nb_nodes_kept)
nb_clusters = np.asarray(nb_clusters)
nb_supp = np.asarray(nb_supp)
nb_supports, max_MQ = find_opt_nb_supports(MQ, nb_supp, nb_nodes_kept, min_tot_nodes, 0.05)
MQ_louvain_markov.append(max_MQ)
nb_supp_louvain_markov.append(nb_supports)
# If both algo yield to same nb_supports, keep algo yielding to maxMQ
if nb_supp_louvain_markov[0] == nb_supp_louvain_markov[1]:
nb_supports = nb_supp_louvain_markov[0]
max_MQ = max(MQ_louvain_markov)
max_MQ_index = np.where(MQ_louvain_markov == max_MQ)
clust_algo = clust_algos[max_MQ_index[0][0]]
# Else, chose between the 2 using find_opt_nb_suports
else:
# nb_supp_louvain_markov must be sorted by increasing nb_supports
if nb_supp_louvain_markov[0] > nb_supp_louvain_markov[1]:
nb_supp_louvain_markov = np.asarray([nb_supp_louvain_markov[1], nb_supp_louvain_markov[0]])
MQ_louvain_markov = np.asanyarray([MQ_louvain_markov[1], MQ_louvain_markov[0]])
clust_algos = ["Louvain", "Markov"]
else:
nb_supp_louvain_markov = np.asarray(nb_supp_louvain_markov)
MQ_louvain_markov = np.asanyarray(MQ_louvain_markov)
nb_supports, max_MQ = find_opt_nb_supports(MQ_louvain_markov, nb_supp_louvain_markov, np.asarray([min_tot_nodes, min_tot_nodes]), min_tot_nodes, 0.05)
max_MQ_index = np.where(MQ_louvain_markov == max_MQ)
clust_algo = clust_algos[max_MQ_index[0][0]]
print_and_log("Optimal number of supports is %s" % nb_supports)
print_and_log("The graph will be clustered with %s clustering using only IntegrationEdges with nb_supports >= %s" % (clust_algo, nb_supports))
else:
nb_supports = user_nb_sup
MQ_louvain_markov = []
nb_nodes_kept = []
nb_clusters = []
nb_supp_louvain_markov = [user_nb_sup, user_nb_sup]
for clust_algo in clust_algos:
mq, nb_nodes, nb_clust = wModularization(subject, obj, clust, rel_name, clust_algo, nb_nodes_clust, nb_supports, max_pos_nb_sup, driver)
MQ_louvain_markov.append(mq)
nb_nodes_kept.append(nb_nodes)
nb_clusters.append(nb_clust)
MQ_louvain_markov = np.asarray(MQ_louvain_markov)
nb_nodes_kept = np.asarray(nb_nodes_kept)
nb_clusters = np.asarray(nb_clusters)
nb_supp_louvain_markov = np.asarray(nb_supp_louvain_markov)
max_MQ_index = np.where(MQ_louvain_markov == max(MQ_louvain_markov))
# if both algo yield to same MQ, keep Markov
if len(max_MQ_index[0]) > 1:
clust_algo = 'Markov'
max_MQ = MQ_louvain_markov[0]
else:
clust_algo = clust_algos[max_MQ_index[0][0]]
max_MQ = max(MQ_louvain_markov)
if clust_algo == "Louvain":
node_clust_name = "LouvainCommunity"
rel_clust_name = "FROM_COMMUNITY"
elif clust_algo == "Markov":
node_clust_name = "MarkovCluster"
rel_clust_name = "FROM_MARKOVCLUST"
if opt:
communities = "(c:OptimalNbSupports:" + subject + ":" + node_clust_name + ":" + rel_name + ")"
else:
communities = "(c:UserNbSupports:" + subject + ":" + node_clust_name + ":" + rel_name + "{nb_supports:" + str(nb_supports) + "})"
main_results, small_clusters, unclassified = get_main_results(subject, obj, clust, rel_name, clust_algo, nb_nodes_clust, nb_supports, driver)
# Reassign unclassified nodes
if small_clusters is None and unclassified is None:
reassign_unclassified = False
print_and_log('No node to reassign')
if reassign_unclassified:
if small_clusters is not None and unclassified is not None:
all_unclassified = np.concatenate([small_clusters, unclassified])
elif small_clusters is not None:
all_unclassified = np.copy(small_clusters)
elif unclassified is not None:
all_unclassified = np.copy(unclassified)
reassigned = reassign(subject, obj, rel_name, main_results, all_unclassified, driver)
if main_results is not None:
nb_nodes_kept = len(main_results)
nb_clusters = len(np.unique(main_results[:, 1]))
print_and_log('%s nodes classified in %s clusters after remooving too small clusters (min_accepted_nb_nodes_in_clusters set to %s in the configuration file)' % (nb_nodes_kept, nb_clusters, nb_nodes_clust))
print_and_log("Weighted Modularization Quality for the clustering is %s" % str(max_MQ))
if reassign_unclassified:
print_and_log('%s unclassified nodes reassigned to consensus clusters' %str(len(reassigned)))
print_and_log(str(reassigned))
small_clusters = None
unclassified = None
writeAll = False
main_results = np.concatenate([main_results, reassigned])
print_and_log('Storing results into the graph')
# Set nodes in small clusters as unclassified
if small_clusters is not None:
small_clusters[:, 1] = 'unclassified'
results_to_neo4j(main_results, small_clusters, unclassified, subject, obj, rel_name, clust_algo, nb_supports, driver, opt)
obj_nodes = "(o:"+ obj + ":" + subject + ")"
check_graph_query = "MATCH " + obj_nodes + "-[r:" + rel_clust_name + "]-" + communities + " RETURN o, r, c"
print_and_log('Check the graph with the following Cypher query : \n %s' % check_graph_query)
head = obj + "\t" + clust
file = open(out, "w")
if writeAll:
if small_clusters is not None:
if unclassified is not None:
np.savetxt(file, np.concatenate([main_results, small_clusters, unclassified]), fmt='%s', header=head, comments='', delimiter='\t')
else:
np.savetxt(file, np.concatenate([main_results, small_clusters]), fmt='%s', header=head, comments='', delimiter='\t')
else:
if unclassified is not None:
np.savetxt(file, np.concatenate([main_results, unclassified]), fmt='%s', header=head, comments='', delimiter='\t')
else:
np.savetxt(file, main_results, fmt='%s', header=head, comments='', delimiter='\t')
else:
np.savetxt(file, main_results, fmt='%s', header=head, comments='', delimiter='\t')
file.close()
print_and_log('Clustering results stored in file : %s' % out)
else:
exit("ERROR: Can not cluster the graph (only 1 cluster found). Please, use a lower number of supports.")
def results_to_neo4j(main_results, small_clusters, unclassified, subject, obj, rel_name, clust_algo, nb_supports, driver, opt=False):
if clust_algo == "Louvain":
node_clust_name = "LouvainCommunity"
rel_clust_name = "FROM_COMMUNITY"
elif clust_algo == "Markov":
node_clust_name = "MarkovCluster"
rel_clust_name = "FROM_MARKOVCLUST"
for community in np.unique(main_results[:, 1]):
community_id = rel_name + "_" + community
if opt == True:
community_node = "(c:OptimalNbSupports:" + subject + ":" + node_clust_name + ":" + rel_name + \
" {id: '" + community_id + "'" + ", nb_supports: " + str(nb_supports) + ", clust:'" + community + "'})"
else:
community_node = "(c:UserNbSupports:" + subject + ":" + node_clust_name + ":" + rel_name + \
" {id: '" + community_id + "'" + ", nb_supports: " + str(nb_supports) + ", clust:'" + community + "'})"
create_community_node = "MERGE " + community_node
with driver.session() as session:
session.run(create_community_node)
# Get all nodes belonging to same community
nodes = [node for node in main_results[np.where(main_results == str(community))[0], :][:, 0]]
for node in nodes:
# Make relationship between community node and patients nodes
make_rel = "MATCH (o:" + obj + ":" + subject + " {id: '" + str(node) + "'}) " + \
"MATCH " + community_node + " MERGE (o)-[r:" + rel_clust_name + "]-(c) RETURN o, r, c"
session.run(make_rel)
if small_clusters is not None:
for community in np.unique(small_clusters[:, 1]):
community_id = rel_name + "_" + community
if opt == True:
community_node = "(c:OptimalNbSupports:SmallCommunity:" + subject + ":" + node_clust_name + ":" + rel_name + \
" {id: '" + community_id + "'" + ", nb_supports: " + str(nb_supports) + ", clust:'" + community + "'})"
else:
community_node = "(c:UserNbSupports:SmallCommunity:" + subject + ":" + node_clust_name + ":" + rel_name + \
" {id: '" + community_id + "'" + ", nb_supports: " + str(nb_supports) + ", clust:'" + community + "'})"
create_community_node = "MERGE " + community_node
with driver.session() as session:
session.run(create_community_node)
# Get all nodes belonging to same community
nodes = [node for node in small_clusters[np.where(small_clusters == str(community))[0], :][:, 0]]
for node in nodes:
# Make relationship between community node and patients nodes
make_rel = "MATCH (o:" + obj + ":" + subject + " {id: '" + str(node) + "'}) " + \
"MATCH " + community_node + " MERGE (o)-[r:" + rel_clust_name + "]-(c) RETURN o, r, c"
session.run(make_rel)
if unclassified is not None:
for community in np.unique(unclassified[:, 1]):
community_id = rel_name + "_" + community
if opt == True:
community_node = "(c:OptimalNbSupports:Unclassified:" + subject + ":" + node_clust_name + ":" + rel_name + \
" {id: '" + community_id + "'" + ", nb_supports: " + str(nb_supports) + ", clust:'" + community + "'})"
else:
community_node = "(c:UserNbSupports:Unclassified:" + subject + ":" + node_clust_name + ":" + rel_name + \
" {id: '" + community_id + "'" + ", nb_supports: " + str(nb_supports) + ", clust:'" + community + "'})"
create_community_node = "MERGE " + community_node
with driver.session() as session:
session.run(create_community_node)
# Get all nodes belonging to same community
nodes = [node for node in unclassified[np.where(unclassified == str(community))[0], :][:, 0]]
for node in nodes:
# Make relationship between community node and patients nodes
make_rel = "MATCH (o:" + obj + ":" + subject + " {id: '" + str(node) + "'}) " + \
"MATCH " + community_node + " MERGE (o)-[r:" + rel_clust_name + "]-(c) RETURN o, r, c"
session.run(make_rel)
def main():
driver = GraphDatabase.driver(uri=neo_localhost, auth=(neo_id, neo_pwd))
if user_nb_supports == 'False':
fuse_clusterings(subject, obj, clust, rel_name, nb_nodes_clust, min_tot_nodes, methods, datatypes, driver, out, opt=True, user_nb_sup=None, writeAll=writeAll, reassign_unclassified=reassign_unclassified)
else:
fuse_clusterings(subject, obj, clust, rel_name, nb_nodes_clust, min_tot_nodes, methods, datatypes, driver, out, opt=False, user_nb_sup=int(user_nb_supports), writeAll=writeAll, reassign_unclassified=reassign_unclassified)
driver.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
340029 | from fastapi.testclient import TestClient
from app.core.config import settings
def test_hello_world(client: TestClient) -> None:
resp = client.get("/hello-world")
data = resp.json()
assert data["msg"] == "Hello world!"
| StarcoderdataPython |
1737656 | <reponame>snoopyjc/pythonizer
def _caller(expr=None):
""" Implementation of caller function in perl"""
try:
fr = sys._getframe(2 if expr is None else (max(int(expr),0)+1))
package = 'main'
if hasattr(fr.f_builtins, '__PACKAGE__'):
package = fr.f_builtins.__PACKAGE__
if expr is None:
return [package, fr.f_code.co_filename, fr.f_lineno]
return [package, fr.f_code.co_filename, fr.f_lineno,
fr.f_code.co_name, fr.f_code.co_argcount, 1,
'', 0, 0, 0, 0]
except ValueError:
return None
| StarcoderdataPython |
3476187 | """
Author: <NAME>
Date: Today
This file is some simple calculations for Tom
"""
import numpy as np
import scipy.linalg as la
from hk_price_optimisticbeliefs import price_optimisticbeliefs
from hk_price_singlebeliefs import price_singlebeliefs
from hk_price_pessimisticbeliefs import price_pessimisticbeliefs
# ------------------------------------------------------------------- #
# Set Up Parameters
# ------------------------------------------------------------------- #
beta = .75
dividendreturn = np.array([[0], [1]])
qa = np.array([[1./2, 1./2], [2./3, 1./3]])
qb = np.array([[2./3, 1./3], [1./4, 3./4]])
qpess = np.array([[2./3, 1./3], [2./3, 1./3]])
qopt = np.array([[1./2, 1./2], [1./4, 3./4]])
qs_names = ["Qa", "Qb", "Qpess", "Qopt"]
the_qs = [qa, qb, qpess, qopt]
class PriceHolder(object):
"""
This holds the results for Harrison Kreps. In particular, it
accepts two matrices Qa and Qb and compares the single belief,
optimistic belief, and pessimistic belief prices
"""
def __init__(self, qa, qb, dividend_payoff, beta=.75):
# Unpack the parameters
self.qa, self.qb = qa, qb
self.dividend_payoff = dividend_payoff
self.beta = .75
self.max_iters = 10000
self.tolerance = 1e-16
# Create the Pessimistic and Optimistic Beliefs
self.qpess = np.empty((2, 2))
self.qpess[0, :] = qa[0, :] if qa[0, 1] < qb[0, 1] else qb[0, :]
self.qpess[1, :] = qa[1, :] if qa[1, 1] < qb[1, 1] else qb[1, :]
self.qopt = np.empty((2, 2))
self.qopt[0, :] = qa[0, :] if qa[0, 1] > qb[0, 1] else qb[0, :]
self.qopt[1, :] = qa[1, :] if qa[1, 1] > qb[1, 1] else qb[1, :]
# Price everything
self.create_prices()
def __repr__(self):
ret_str = "The Single Belief Price Vectors are:\n"+\
"P(Qa) = {}\nP(Qb) = {}\nP(Qopt) = {}\nP(Qpess) = {}\n\n"+\
"The Optimistic Belief Price Vector is:\n"+\
"P(Optimistic) = {}\n\n"+\
"Phat(a) = {}\n"+\
"Phat(b) = {}\n"+\
"The Pessimistic Belief Price Vector is:\n"+\
"P(Pessimistic) = {}"
qaprice, qbprice, qpessprice, qoptprice = map(np.squeeze, [self.qaprice, self.qbprice, self.qpessprice, self.qoptprice])
optimisticprice, pessimisticprice = map(np.squeeze, [self.optimisticprice, self.pessimisticprice])
phata, phatb = map(np.squeeze, [self.phat_a, self.phat_b])
return ret_str.format(qaprice, qbprice, qoptprice,
qpessprice, optimisticprice, phata, phatb,
pessimisticprice)
def create_prices(self):
"""
Computes prices under all belief systems
"""
transitionmatrix = [self.qa, self.qb, self.qpess, self.qopt]
# Single Belief Prices
p_singlebelief = [price_singlebeliefs(q, self.dividend_payoff) for
q in transitionmatrix]
# Compute Optimistic and Pessimistic beliefs
p_optimistic, phat_a, phat_b = price_optimisticbeliefs([qa, qb], self.dividend_payoff)
p_pessimistic = price_pessimisticbeliefs([qa, qb], self.dividend_payoff)
self.qaprice = p_singlebelief[0]
self.qbprice = p_singlebelief[1]
self.qpessprice = p_singlebelief[2]
self.qoptprice = p_singlebelief[3]
self.phat_a = phat_a
self.phat_b = phat_b
self.optimisticprice = p_optimistic
self.pessimisticprice = p_pessimistic
return p_singlebelief, p_optimistic, p_pessimistic
ph = PriceHolder(qa, qb, dividendreturn)
print(ph)
##### Problems start here
ea = la.eig(qa)
eb = la.eig(qb)
print("ea =")
print(ea)
print("eb=")
print(eb)
eaa = np.linalg.matrix_power(qa, 100)
print("100th power of qa")
print(eaa)
ebb = np.linalg.matrix_power(qb, 100)
print("100th power of qb")
print(ebb)
import quantecon as qe
qa = np.array([[1./2, 1./2], [2./3, 1./3]])
qb = np.array([[2./3, 1./3], [1./4, 3./4]])
mcA = qe.MarkovChain(qa)
mcB = qe.MarkovChain(qb)
ppa = mcA.stationary_distributions
ppb = mcB.stationary_distributions
print("stationary distribution of P_a")
print(ppa)
mcB = qe.MarkovChain(qb)
ppb = mcB.stationary_distributions
print("stationary distribution of P_b")
print(ppb)
| StarcoderdataPython |
224517 | <filename>black_list/black_list/middlewares.py
# -*- coding: utf-8 -*-
class ProxyMiddleware(object):
def __init__(self, proxy_url):
self.proxy_url = proxy_url
def process_request(self, request, spider):
request.meta['proxy'] = self.proxy_url
@classmethod
def from_crawler(cls, crawler):
return cls(
proxy_url=crawler.settings.get('PROXY_URL')
)
| StarcoderdataPython |
6662608 | from scrapy import cmdline
name = '../spiders/itacasaCatalogSpider.py'
cmd = 'scrapy runspider {0}'.format(name)
cmdline.execute(cmd.split())
| StarcoderdataPython |
8122414 | <reponame>Cogito2012/DEAR
from ..registry import RECOGNIZERS
from .recognizer3d import Recognizer3D
@RECOGNIZERS.register_module()
class Recognizer3DRPL(Recognizer3D):
"""3D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, loss_aux = self.neck(x, labels.squeeze())
losses.update(loss_aux)
outputs = self.cls_head(x)
gt_labels = labels.squeeze()
loss_dict = self.cls_head.loss_cls(outputs, gt_labels, **kwargs)
losses.update(loss_dict)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, _ = self.neck(x)
outputs = self.cls_head(x)
cls_score = outputs['dist'] # the negative distance is equivalent to the cls_score before softmax
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
outputs = self.cls_head(x)
outs = (outputs['dist'], )
return outs
| StarcoderdataPython |
1706940 | <reponame>h-terao/hazuchi<gh_stars>0
from __future__ import annotations
import jax
import jax.numpy as jnp
from flax import struct
import chex
@struct.dataclass
class Observation:
"""An immutable class to summarize metrics.
Attributes:
values (dict of str: tuple[array, array]): Accumulated metrics to summarize.
"""
values: dict[str, tuple[chex.Array, chex.Array]] = struct.field(True, default_factory=dict)
@classmethod
def create(
cls, metrics: dict[str, chex.Array] | None = None, weight: float = 1.0
) -> Observation:
"""Create a new observation.
Args:
metrics (dict of str: array): Metrics to summarize.
weight (float, optional): Weight of metrics for accumulation.
Maybe, batch size is usually passed.
Returns:
Observation: Returns a new observation initialized by the given metrics.
"""
if metrics is None:
return cls()
else:
values = jax.tree_map(lambda v: (v * weight, weight), metrics)
return cls(values)
def add(self, metrics: dict[str, chex.Array], weight: float = 1.0) -> Observation:
"""Accumulate metrics."""
return self + Observation.create(metrics, weight)
def update(self, metrics: dict[str, chex.Array], weight: float = 1.0) -> Observation:
"""Overwrite metrics."""
return self | Observation.create(metrics, weight)
def summary(self, **kwargs) -> dict[str, chex.Array]:
"""Summarize metrics.
Args:
**kwargs: Values to overwrite a summary.
Useful to add current steps, epochs, and elapsed time into the summary.
"""
summary = {
key: jnp.sum(val) / jnp.sum(weight) for key, (val, weight) in self.values.items()
}
return dict(summary, **kwargs)
def scalar_summary(self, *, prefix: str | None = None, **kwargs) -> dict[str, float]:
"""Returns a summary."""
if prefix is None:
prefix = ""
summary = {f"{prefix}{key}": float(val) for key, val in self.summary().items()}
return dict(summary, **kwargs)
def __add__(self, other: Observation) -> Observation:
updates = {}
for key, (val, weight) in other.values.items():
accum_val, accum_weight = self.values.get(key, (0, 0))
accum_val += val
accum_weight += weight
updates[key] = (accum_val, accum_weight)
values = dict(self.values, **updates)
return Observation(values)
def __iadd__(self, other: Observation) -> Observation:
return self + other
def __or__(self, other: Observation) -> Observation:
values = dict(self.values, **other.values)
return Observation(values)
def __ior__(self, other: Observation) -> Observation:
return self | other
def __mul__(self, other: float) -> Observation:
return Observation(
{key: (val * other, weight * other) for key, (val, weight) in self.values.items()}
)
def __truediv__(self, other: float) -> Observation:
return Observation(
{key: (val / other, weight / other) for key, (val, weight) in self.values.items()}
)
if __name__ == "__main__":
obs = Observation.create()
obs += Observation.create({"x": 1, "y": 2, "z": 3}, weight=5)
obs += Observation.create({"a": 1, "x": 1}, 2.0)
print(obs.summary())
print(obs.summary(epoch=10))
print(obs.values)
print((obs / 10).values)
print(obs.scalar_summary(prefix="train/", epoch=100, iteration=1000, elapsed_time=0.1))
@jax.jit
def _test_jit(obs):
obs += Observation.create({"x": 1, "y": 2, "z": 3}, weight=5)
obs |= Observation.create({"a": 1, "k": 1}, 2.0)
return obs.summary(epoch=100)
new_obs = _test_jit(obs)
print(new_obs)
| StarcoderdataPython |
4949579 | import unicodedata
from .models import *
from django import forms
from django.contrib.auth import (
authenticate, get_user_model, password_validation,
)
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, identify_hasher,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.text import capfirst
from django.utils.translation import gettext, gettext_lazy as _
class SubmissionForm(forms.ModelForm):
class Meta:
model = Submission
fields = '__all__'
widgets = {
'description': forms.Textarea(attrs={'color': "white", 'padding-top':'50px'}),
}
class BountyForm(forms.ModelForm):
class Meta:
model = Bounty
# fields = '__all__'
exclude = ['hunter']
widgets = {
'city': forms.TextInput(attrs={'color': "white"}),
}
class UsernameField(forms.CharField):
def to_python(self, value):
return unicodedata.normalize('NFKC', super().to_python(value))
def widget_attrs(self, widget):
return {
**super().widget_attrs(widget),
'autocapitalize': 'none',
'autocomplete': 'username',
}
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _('The two password fields didn’t match.'),
}
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = User
fields = ("username",)
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs['autofocus'] = True
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get('password2')
if password:
try:
password_validation.validate_password(password, self.instance)
except ValidationError as error:
self.add_error('password2', error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
print("couldnt commit")
user.save()
return user | StarcoderdataPython |
1702178 | <reponame>ronekko/differential_geometry
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 17 20:58:03 2021
@author: ryuhei
"""
import matplotlib.pyplot as plt # type: ignore
import numpy as np # type: ignore
import open3d as o3d # type: ignore
import torch
def spherical_to_cartesian(u, radius=1.0):
theta = u.T[0]
phi = u.T[1]
x = radius * np.sin(theta) * np.cos(phi)
y = radius * np.sin(theta) * np.sin(phi)
z = radius * np.cos(theta)
return np.vstack((x, y, z)).T
def torus_to_cartesian(u, R=2.0, r=1.0):
if not isinstance(u, torch.Tensor):
u = torch.tensor(u)
u_transposed = u.T
theta = u_transposed[0]
phi = u_transposed[1]
sin_theta = torch.sin(theta)
x = (R + r * sin_theta) * torch.cos(phi)
y = (R + r * sin_theta) * torch.sin(phi)
z = r * torch.cos(theta)
return torch.vstack((x, y, z)).T
def generate_points_on_torus(N, R=2.0, r=1.0):
acceptance_rate = R / (R + r)
N_proposal = int(N / acceptance_rate) // 2
theta_minus = np.random.uniform(-np.pi, 0, N_proposal)
theta_plus = np.random.uniform(0, np.pi, N_proposal)
threashold_minus = theta_minus * (2 * r) / np.pi + R + r
mask = np.random.uniform(0, R + r, N_proposal) < threashold_minus
theta_minus = theta_minus[mask]
threashold_plus = -theta_plus * (2 * r) / np.pi + R + r
mask = np.random.uniform(0, R + r, N_proposal) < threashold_plus
theta_plus = theta_plus[mask]
theta = np.concatenate((theta_minus, theta_plus)) + np.pi / 2.0
phi = np.random.uniform(-np.pi, np.pi, len(theta))
u = np.vstack((theta, phi)).T.copy()
x = torus_to_cartesian(u, R, r)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(x)
pcd.estimate_normals()
normals = spherical_to_cartesian(u)
pcd_normals_array = np.asarray(pcd.normals)
pcd_normals_array[:] = normals[:]
return pcd
def create_torus(R=2.0, r=1.0, num_vertices=3000):
"""Create a torus mesh.
Parameters
----------
R : float, optional
Major radius. The default is 2.0.
r : TYPE, optional
Minor radius. The default is 1.0.
num_vertices : TYPE, optional
Approximate number of vertices of the mesh. The default is 3000.
Returns
-------
mesh : o3d.geometry.TriangleMesh
DESCRIPTION.
"""
# Generate random points uniformly on torus.
pcd = generate_points_on_torus(num_vertices, R, r)
# Create mesh from sample points.
radii = [0.5]
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
pcd, o3d.utility.DoubleVector(radii))
return mesh
if __name__ == '__main__':
R = 2.0
r = 1.0
N = 3000
mesh = create_torus(R, r, N)
mesh.paint_uniform_color([0.7, 0.5, 0.5])
mesh2 = o3d.geometry.TriangleMesh.create_torus(R, r)
mesh2.compute_vertex_normals()
mesh2.paint_uniform_color([0.5, 0.5, 0.7])
mesh.translate([0, 0, 2])
mesh2.translate([0, 0, -2])
o3d.visualization.draw([mesh, mesh2], show_ui=True)
# o3d.visualization.draw_geometries([mesh, mesh2])
| StarcoderdataPython |
1763562 | <reponame>Gothingbop/JT_AWS
import pip
def InstallRequirements(requirements_path, target_path):
"""
Installs all the packages for an AWS Lambda function.
This function tries to install the version of a package built for the specific operating system that AWS Lambda functions run on
:param requirements_path: Path to the requirements file
:param target_path: Path to the folder where the requirements
:return:
"""
with open(requirements_path) as _:
requirements = _.readlines()
for requirement in requirements:
result = pip.main([
'install',
'-q',
'--platform=manylinux1_x86_64',
f'--target={target_path}',
'--only-binary=:all:',
'--upgrade',
requirement
])
if result:
pip.main(['install', '-q', f'--target={target_path}', '--upgrade', requirement])
| StarcoderdataPython |
1877886 | """
training_dataset.py
"""
# 3rd party imports
import os
import pickle
import numpy as np
class TrainingDataset:
"""Training dataset for machine learning."""
def __init__(self, train_geochem_index, x_train, y_train, hole_id_train, depth_train, train_holes,
test_geochem_index, x_test, y_test, hole_id_test, depth_test, test_holes):
# Set parameters
self.train_geochem_index = train_geochem_index
self.x_train = x_train
self.y_train = y_train
self.hole_id_train = hole_id_train
self.depth_train = depth_train
self.train_holes = train_holes
self.test_geochem_index = test_geochem_index
self.x_test = x_test
self.y_test = y_test
self.hole_id_test = hole_id_test
self.depth_test = depth_test
self.test_holes = test_holes
# Check train shapes
assert len(train_geochem_index) == x_train.shape[0]
assert x_train.shape[0] == y_train.shape[0]
assert hole_id_train.shape[0] == y_train.shape[0]
assert all([train_hole in np.unique(hole_id_train.values) for train_hole in train_holes])
assert np.all(train_geochem_index.index.values == x_train.index.values)
assert np.all(train_geochem_index.index.values == y_train.index.values)
assert np.all(train_geochem_index.index.values == hole_id_train.index.values)
assert np.all(x_train.index.values == y_train.index.values)
# Check test shapes
assert len(test_geochem_index) == x_test.shape[0]
assert x_test.shape[0] == y_test.shape[0]
assert hole_id_test.shape[0] == y_test.shape[0]
assert all([train_hole in np.unique(hole_id_train.values) for train_hole in train_holes])
assert np.all(test_geochem_index.index.values == x_test.index.values)
assert np.all(test_geochem_index.index.values == y_test.index.values)
assert np.all(test_geochem_index.index.values == hole_id_test.index.values)
assert np.all(x_test.index.values == y_test.index.values)
# Check train-test borehole separation
assert any([train_hole not in test_holes for train_hole in train_holes])
# Generate CV folds
self.cv_folds = self._generate_cv_folds()
def save(self, path):
"""Pickle data model."""
with open(path, 'wb') as f:
pickle.dump(self, f)
def _generate_cv_folds(self):
"""Generate leave-one-hole-out CV folds."""
# Dictionary for CV folds
cv_folds = dict()
# Loop through train holes
for index, test_hole in enumerate(self.train_holes):
# Train holes
train_holes = [hole_id for hole_id in self.train_holes if hole_id != test_hole]
# Add fold indices
cv_folds[index] = {
'test_hole': test_hole,
'train_holes': train_holes,
'test_index': self.hole_id_train[self.hole_id_train == test_hole].index.tolist(),
'train_index': self.hole_id_train[~self.hole_id_train.isin([test_hole])].index.tolist()
}
return cv_folds
| StarcoderdataPython |
87736 | # following PEP 386
__version__ = "1.0b1"
# Do some sane version checking
import django
import mptt
if django.VERSION < (1,3,0):
raise ImportError("At least Django 1.3.0 is required to run this application")
if mptt.VERSION < (0,5,1):
raise ImportError("At least django-mptt 0.5.1 is required to run this application")
| StarcoderdataPython |
4807282 | def register_blueprint(app):
from apps.schedule import job
from apps.schedule import history
app.register_blueprint(job.blueprint, url_prefix='/schedule/jobs')
app.register_blueprint(history.blueprint, url_prefix='/schedule/histories')
| StarcoderdataPython |
4979514 | # -*- coding:utf-8 -*-
import codecs
import os
import jieba
stopwords_path = "./setting/stop_words.txt"
synonyms_path = "./setting/synonyms_words.txt"
user_dicts = "./setting/user_defined_dicts.txt"
def is_instr(str):
if "一" in str or \
"二" in str or \
"三" in str or \
"四" in str or \
"五" in str or \
"六" in str or \
"七" in str or \
"八" in str or \
"九" in str or \
"十" in str:
return True
else:
return False
# Tokenize and remove stop words and merge synonyms
def tokenize(input_dir, output_dir):
# load user-defined dicts
jieba.load_userdict(user_dicts)
# load synonyms
combine_dict = {}
for line in codecs.open(synonyms_path, 'r', 'utf-8'):
seperate_word = line.strip().split('-')
num = len(seperate_word)
for i in range(1, num):
# 将同义词表中后一个词全部映射到前一个词
combine_dict[seperate_word[i]] = seperate_word[0]
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# remove stop words and merge synonyms
print("Tokenize and remove stop words...")
for root, dirs, files in os.walk(input_dir):
for file in files:
doc_path = os.path.join(root, file)
out_file = output_dir+'/'+file
fin = codecs.open(doc_path, 'r', 'utf-8')
fout = codecs.open(out_file, 'w', 'utf-8')
f_stop = codecs.open(stopwords_path, 'r', 'utf-8')
texts = fin.read()
stop_text = f_stop.read()
word_list = []
# 分词
seg_list = jieba.cut(texts)
seg_list = "/".join(seg_list)
# 同义词替换
seg_text = []
for word in seg_list.split('/'):
word = word.strip()
if word in combine_dict.keys():
seg_text.append(combine_dict[word])
else:
seg_text.append(word)
# 去除停用词
stop_seg_list = stop_text.split('\n')
for word in seg_text:
if (not (word in stop_seg_list)) and (not is_instr(word.strip())):
word_list.append(word)
word_list = " ".join(word_list)
fout.write(word_list)
print("Saved files to [%s]\n" % output_dir)
if __name__ == '__main__':
data_dir = "./data_cleaned/clean_text_out"
out_dir = "./data_cleaned/tokenize_out"
tokenize(data_dir, out_dir)
| StarcoderdataPython |
1733807 | """
Tests for magic imports.
"""
import os
import time
import pandas as pd
from six import string_types
from quilt.nodes import GroupNode, DataNode
from quilt.tools import command
from quilt.tools.const import PACKAGE_DIR_NAME
from quilt.tools.package import Package, PackageException
from quilt.tools.store import PackageStore
from .utils import patch, QuiltTestCase
class ImportTest(QuiltTestCase):
def test_imports(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package', build_path)
# Good imports
from quilt.data.foo import package
from quilt.data.foo.package import dataframes
from quilt.data.foo.package import README
# Contents of the imports
assert isinstance(package, GroupNode)
assert isinstance(dataframes, GroupNode)
assert isinstance(dataframes.csv, DataNode)
assert isinstance(README, DataNode)
assert package.dataframes == dataframes
assert package.README == README
assert set(dataframes._keys()) == {'csv', 'nulls'}
assert set(dataframes._group_keys()) == set()
assert set(dataframes._data_keys()) == {'csv', 'nulls'}
assert isinstance(README(), string_types)
assert isinstance(README._data(), string_types)
assert isinstance(dataframes.csv(), pd.DataFrame)
assert isinstance(dataframes.csv._data(), pd.DataFrame)
str(package)
str(dataframes)
str(README)
# Bad attributes of imported packages
with self.assertRaises(AttributeError):
package.foo
with self.assertRaises(AttributeError):
package.dataframes.foo
with self.assertRaises(AttributeError):
package.dataframes.csv.foo
# Bad imports
with self.assertRaises(ImportError):
import quilt.data.foo.bad_package
with self.assertRaises(ImportError):
import quilt.data.bad_user.bad_package
with self.assertRaises(ImportError):
from quilt.data.foo.dataframes import blah
with self.assertRaises(ImportError):
from quilt.data.foo.baz import blah
def test_team_imports(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('test:bar/package', build_path)
# Good imports
from quilt.team.test.bar import package
from quilt.team.test.bar.package import dataframes
from quilt.team.test.bar.package import README
# Contents of the imports
assert isinstance(package, GroupNode)
assert isinstance(dataframes, GroupNode)
assert isinstance(dataframes.csv, DataNode)
assert isinstance(README, DataNode)
assert package.dataframes == dataframes
assert package.README == README
assert set(dataframes._keys()) == {'csv', 'nulls'}
assert set(dataframes._group_keys()) == set()
assert set(dataframes._data_keys()) == {'csv', 'nulls'}
assert isinstance(README(), string_types)
assert isinstance(README._data(), string_types)
assert isinstance(dataframes.csv(), pd.DataFrame)
assert isinstance(dataframes.csv._data(), pd.DataFrame)
str(package)
str(dataframes)
str(README)
# Bad attributes of imported packages
with self.assertRaises(AttributeError):
package.foo
with self.assertRaises(AttributeError):
package.dataframes.foo
with self.assertRaises(AttributeError):
package.dataframes.csv.foo
# Bad imports
with self.assertRaises(ImportError):
import quilt.team.test.bar.bad_package
with self.assertRaises(ImportError):
import quilt.team.test.bad_user.bad_package
with self.assertRaises(ImportError):
from quilt.team.test.bar.dataframes import blah
with self.assertRaises(ImportError):
from quilt.team.test.bar.baz import blah
def test_import_group_as_data(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build_group_data.yml')
command.build('foo/grppkg', build_path)
# Good imports
from quilt.data.foo.grppkg import dataframes
assert isinstance(dataframes, GroupNode)
# Make sure child dataframes were concatenated in the correct order (alphabetically by node name).
df = dataframes._data()
assert df['x'].tolist() == [1, 2, 3, 4]
assert df['y'].tolist() == [1, 4, 9, 16]
# Incompatible Schema
from quilt.data.foo.grppkg import incompatible
with self.assertRaises(PackageException):
incompatible._data()
def test_multiple_package_dirs(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml') # Contains 'dataframes'
simple_build_path = os.path.join(mydir, './build_simple.yml') # Empty
new_build_dir = 'aaa/bbb/%s' % PACKAGE_DIR_NAME
# Build two packages:
# - First one exists in the default dir and the new dir; default should take priority.
# - Second one only exists in the new dir.
# First package.
command.build('foo/multiple1', build_path)
# First and second package in the new build dir.
with patch.dict(os.environ, {'QUILT_PRIMARY_PACKAGE_DIR': new_build_dir}):
command.build('foo/multiple1', simple_build_path)
command.build('foo/multiple2', simple_build_path)
# Cannot see the second package yet.
with self.assertRaises(ImportError):
from quilt.data.foo import multiple2
# Now search the new build dir.
dirs = 'foo/%s:%s' % (PACKAGE_DIR_NAME, new_build_dir)
with patch.dict(os.environ, {'QUILT_PACKAGE_DIRS': dirs}):
# Can import the second package now.
from quilt.data.foo import multiple2
# The first package contains data from the default dir.
from quilt.data.foo import multiple1
assert multiple1.dataframes
def test_save(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package1', build_path)
from quilt.data.foo import package1
# Build an identical package
command.build('foo/package2', package1)
from quilt.data.foo import package2
teststore = PackageStore(self._store_dir)
contents1 = open(os.path.join(teststore.package_path(None, 'foo', 'package1'),
Package.CONTENTS_DIR,
package1._package.get_hash())).read()
contents2 = open(os.path.join(teststore.package_path(None, 'foo', 'package2'),
Package.CONTENTS_DIR,
package2._package.get_hash())).read()
assert contents1 == contents2
# Rename an attribute
package1.dataframes2 = package1.dataframes
del package1.dataframes
# Modify an existing dataframe
csv = package1.dataframes2.csv._data()
csv.at[0, 'Int0'] = 42
# Add a new dataframe
df = pd.DataFrame(dict(a=[1, 2, 3]))
package1._set(['new', 'df'], df)
assert package1.new.df._data() is df
# Add a new file
file_path = os.path.join(mydir, 'data/foo.csv')
package1._set(['new', 'file'], 'data/foo.csv', build_dir=mydir)
assert package1.new.file._data() == file_path
# Add a new group
package1._add_group('newgroup')
assert isinstance(package1.newgroup, GroupNode)
package1.newgroup._add_group('foo')
assert isinstance(package1.newgroup.foo, GroupNode)
# Overwrite a leaf node
new_path = os.path.join(mydir, 'data/nuts.csv')
package1._set(['newgroup', 'foo'], 'data/nuts.csv', build_dir=mydir)
assert package1.newgroup.foo._data() == new_path
# Overwrite the whole group
package1._set(['newgroup'], 'data/nuts.csv', build_dir=mydir)
assert package1.newgroup._data() == new_path
# Built a new package and verify the new contents
command.build('foo/package3', package1)
from quilt.data.foo import package3
assert hasattr(package3, 'dataframes2')
assert not hasattr(package3, 'dataframes')
new_csv = package3.dataframes2.csv._data()
assert new_csv.xs(0)['Int0'] == 42
new_df = package3.new.df._data()
assert new_df.xs(2)['a'] == 3
new_file = package3.new.file._data()
assert isinstance(new_file, string_types)
def test_set_non_node_attr(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package4', build_path)
from quilt.data.foo import package4
# Assign a DataFrame as a node
# (should throw exception)
df = pd.DataFrame(dict(a=[1, 2, 3]))
with self.assertRaises(AttributeError):
package4.newdf = df
def test_load_update(self):
# also tests dynamic import
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package5', build_path)
from ..data.foo import package5
# make a copy, to prove we can
newpkgname = 'foo/copied_package'
command.build(newpkgname, package5)
newfilename = 'myfile'+str(int(time.time()))
with open(newfilename, 'w') as fh:
fh.write('hello world1')
module = command.load(newpkgname)
module._set([newfilename], newfilename)
command.build(newpkgname, module)
# current spec requires that build() *not* update the in-memory module tree.
newpath1 = getattr(module, newfilename)()
assert newpath1 == newfilename
# current spec requires that load() reload from disk, i.e. gets a reference
# to the local object store
# this is important because of potential changes to myfile
reloaded_module = command.load(newpkgname)
assert reloaded_module is not module
newpath2 = getattr(reloaded_module, newfilename)()
assert 'myfile' not in newpath2
def test_multiple_updates(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package6', build_path)
from ..data.foo import package6
newfilename1 = 'myfile1'+str(int(time.time()))
with open(newfilename1, 'w') as fh:
fh.write('hello world1')
package6._set([newfilename1], newfilename1)
newfilename2 = 'myfile2'+str(int(time.time()))
with open(newfilename2, 'w') as fh:
fh.write('hello world2')
package6._set([newfilename1], newfilename2)
assert getattr(package6, newfilename1)() == newfilename2
def test_team_non_team_imports(self):
mydir = os.path.dirname(__file__)
build_path1 = os.path.join(mydir, './build_simple.yml')
command.build('myteam:foo/team_imports', build_path1)
build_path2 = os.path.join(mydir, './build_empty.yml')
command.build('foo/team_imports', build_path2)
# Verify that both imports work, and packages are in fact different.
from ..team.myteam.foo import team_imports as pkg1
from ..data.foo import team_imports as pkg2
assert hasattr(pkg1, 'foo')
assert not hasattr(pkg2, 'foo')
def test_team_set_non_node_attr(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('test:bar/package4', build_path)
from quilt.team.test.bar import package4
# Assign a DataFrame as a node
# (should throw exception)
df = pd.DataFrame(dict(a=[1, 2, 3]))
with self.assertRaises(AttributeError):
package4.newdf = df
| StarcoderdataPython |
8089216 | '''
Atomix project, orapp.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): <NAME>
'''
import abc
INFINITY = 999999
class OrApp:
__metaclass__ = abc.ABCMeta
def __init__(self):
self.states = []
self.blocksUsed = []
self.atoms = [];
self.blockCore = {} #store core assignments to blocks
self.atomList = {}
self.jump_decision = ["BlockDX", "BlockJ"];
self.out_edges = {}
self.in_edges = {}
self.wires = {}
self.slack = INFINITY
self.thr = 0
def setThr(self, thr):
self.thr = thr
# TODO: Distribute appropriate throughput reqs
# to all states
for state in self.states:
state.setThr(thr)
def setSlack(self, dead):
self.slack = dead
# TODO: Distribute appropriate deadlines to states
# For now, just send the same deadline to states
for state in self.states:
state.setSlack(dead)
def addState(self, state):
if state not in self.states:
self.states.append(state)
state_blocks = state.getBlocks()
for stateblock in state_blocks:
if stateblock not in self.blocksUsed:
self.blocksUsed.append(stateblock)
#Note: this assumes that you have called addState after adding
# all actions to the state
def getStateName(self, state_id):
for state in self.states:
if(state.IDval == state_id):
return state.stateName
return None
def getStateByID(self, state_id):
for state in self.states:
if(state.IDval == state_id):
return state
return None
####### TODO FIX #######
def wire(self, port1, port2, state1_id=-1, state2_id=-1):
print("Self Wire: ports of blocks %d %d"%(port1.pBlock.IDval, port2.pBlock.IDval))
self.wires[(port1.pBlock.IDval, state1_id, port1.portname)] = (port2.pBlock.IDval, state2_id, port2.portname)
def getAllWires(self):
return self.wires
def blockIsStateLess(self, bid):
for block in self.blocksUsed:
if(block.IDval == bid):
if(block.has_state == True):
return False
else:
return True
print("Error: Block with id %d not found\n" %bid)
def getAtomsForBlock(self, bid):
atoms_block = []
for state in self.states:
if(bid in state.axn.atoms.keys()):
atoms_block.append(state.axn.atoms[bid])
return atoms_block
def getAtomsForBlockState(self, bid, stateID):
atoms_block = []
for state in self.states:
if(state.IDval == stateID):
if(bid in state.axn.atoms.keys()):
atoms_block.append(state.axn.atoms[bid])
return atoms_block
def getDecisionAtoms(self):
ret_atoms = []
for at_key in self.atomList:
atom = self.atomList[at_key]
if(atom.atomType == "decision_t"):
ret_atoms.append(atom)
return ret_atoms
# We need to know which state is the block in when drawing
# edges out of a stateful block
# def wire(self, state1, blocka, state2, blockb):
# sid1 = state1.IDval
# sid2 = state2.IDval
# bid1 = blocka.IDval
# bid2 = blockb.IDval
# if((sid1, bid1) in self.out_edges.keys()):
# self.out_edges[(sid1, bid1)].append((sid2, bid2))
# else:
# self.out_edges[(sid1, bid1)] = []
# self.out_edges[(sid1, bid1)].append((sid2, bid2))
# if((sid2, bid2) in self.in_edges.keys()):
# self.in_edges.append((sid1, bid1))
# else:
# self.in_edges[(sid2, bid2)] = []
# self.in_edges[(sid2, bid2)].append((sid1, bid1))
# def wire(self, port1, port2):
# print("ports of blocks %d %d"%(port1.pBlock.IDval, port2.pBlock.IDval))
# start_block = self.getBlock(port1.pBlock.IDval)
# end_block = self.getBlock(port2.pBlock.IDval)
#
# if one of these blocks is stateful, we will need the state to which this belongs
# for the stateless block, get the instance of this block in all states and then
# draw wires from the stateful block
# else draw wires from all instances of stateless blockA to state blockB
# Store it as wires and edges.
# When the time comes to insert fifos, during R2 generation, it will be easy
# if(start_block.hasState == False and end_block.hasState == False):
# find these blocks in all states
# for state in self.States():
# if(state.hasBlock(start_block.IDval)):
# self.wires[(start_block.IDval, port1.portname)] = end_block.IDval
# self.out_edge(start_block, end_block)
# store the wire for these ports
# self.wires[(start_block.IDval, port1.portname)] = end_block.IDval
# def getExitEdges(self, sid, bid):
# if ((sid, bid) in self.out_edges.keys()):
# return self.out_edges[(sid, bid)]
##### TODO FIX END #######
def getBlocks(self):
return self.blocksUsed
def getStates(self):
return self.states
def getInitState(self):
for state in self.states:
if(state.isInitState is True):
return state
def getNumStates(self):
numstates = len(self.states)
return numstates
def getAtoms(self):
return self.atoms
@abc.abstractmethod
def declareApp(self):
pass
class OrIoPort:
def __init__(self, typename, port_name):
self.porttype = typename
self.portname = port_name
class OrIoPortCollection:
def __init__(self, pBlock):
self.null = OrInpPort('null', 'null', pBlock)
class OrBlock:
curr_id = 1
def __init__(self, inp_list=[], out_list=[], has_conf=False):
self.inp_list = inp_list
self.out_list = out_list
self.has_conf = has_conf
self.IDval = OrBlock.curr_id
self.inp = OrIoPortCollection(self)
self.out = OrIoPortCollection(self)
self.portnamearray = []
OrBlock.curr_id += 1
print("Created Block ID %d \n" %self.IDval)
def getid(self):
return self.IDval
def getExitStates(self):
# This routine will check if this block is a rule block
# If it is, it's conf is checked for state values to which it can jump
# NOTE: Somewhat hacky, messy and error-prone for now.. Must fix as code grows
state_ids = []
if (self.has_conf == 1):
for key in self.cf.keys():
state_id = self.cf_val[key]
state_ids.append(state_id)
else:
print("Is this an error? Block %s has no conf..\n" %self.blockName)
return state_ids
class OrInpPort(OrIoPort):
def __init__(self, port_type, port_name, pblock):
OrIoPort.__init__(self, port_type, port_name)
self.pBlock = pblock
#print("initiated port %s for block id %d" %(port_name, pblock.IDval))
class OrOutPort(OrIoPort):
def __init__(self, port_type, port_name, pblock):
OrIoPort.__init__(self, port_type, port_name)
self.pBlock = pblock
#print("initiated port %s for block id %d" %(port_name, pblock.IDval))
class OrAtom:
def __init__(self, blockType, parentDataFlowGraph, core,atomtype,stateid,atomname, pnamearray=[]):
self.block = blockType
self.dfg = parentDataFlowGraph # I don't know what to do with this; Kanthi
self.coreID = core
self.stateID = stateid
self.atomName = atomname
self.atomType = atomtype
self.inp = {}
self.outp = {}
self.portnames = pnamearray
self.portFifo = {}
def getPortnames(self):
return self.portnames
def addInp(self, ffname, portname, atomname):
key = ffname #+"_"+portname
if(ffname in self.inp.keys()):
self.inp[key].append(atomname)
else:
self.inp[key] = []
self.inp[key].append(atomname)
if(portname in self.portFifo.keys()):
print("ERROR: port %s already has fifo %s \n" %(portname, ffname))
self.portFifo[portname] = ffname
def getPortFifo(self, portname):
if portname not in self.portFifo.keys():
print("ERROR: Atom %s Port %s has no fifo with it\n" %(self.atomName, portname))
else:
return self.portFifo[portname]
#######################
def addOutp(self, ffname, portname, end_atom, end_atom_port):
key = ffname #+"_"+portname
if(ffname in self.outp.keys()):
self.outp[key].append(end_atom)
else:
self.outp[key] = []
self.outp[key].append(end_atom)
if(portname in self.portFifo.keys()):
print("ERROR: port %s already has fifo %s \n" %(portname, ffname))
self.portFifo[portname] = ffname
# reverse add
end_atom.addInp(ffname, end_atom_port, self)
def getDecisionPort(self):
return "bOutDecision"
class OrAxn:
#def __init__(self, name):
def __init__(self):
#self.name = name
self.blocks = []
self.wires = {}
self.edges = []
self.num_blocks = 0
self.localID = {}
self.globalID = {}
self.atoms = {}
self.block_by_name = {}
self.block_by_id = {}
self.stateid = 0
#def addAtom(self, atomName, blockType):
# if atomName in self.atoms:
# raise Exception("Duplicate atom name %s in DataFlowGraph %s" % (atomName, self.name))
# atom = Atom(atomName, blockType, self)
# self.atoms[atomName] = atom
# return atom
def addAtom(self, blockid, atom):
self.atoms[blockid] = atom;
def add(self, block):
self.blocks.append(block)
self.block_by_name[block.name] = block
self.block_by_id[block.IDval] = block
self.localID[block.IDval] = self.num_blocks
self.globalID[self.num_blocks] = block.IDval
self.num_blocks = self.num_blocks + 1
#print("Adding block with ID %d and giving it index %d name %s"%(block.IDval, self.localID[block.IDval], block.name))
# reverse lookup is important
def getBlockGlobalNameID(self, bid):
for block in self.blocks:
if(self.localID[block.IDval] is bid):
return (block.IDval, block.name)
print("getBlockGlobalNameID: Invalid block id requested %d\n" %bid);
return (-1,"NULL");
def getBlockGlobalID(self, bid):
if(bid > 0 and bid < len(globalID)):
return globalID[bid];
else:
print("getBlockGlobalID: Error Invalid block id %d requested\n" %bid);
return -1;
def getBlock(self, bname):
if bname in self.block_by_name.keys():
return self.block_by_name[bname]
else:
print("getBlock: no block by name %s" %bname)
return None
def getBlockByID(self, bid):
if bid in self.block_by_id.keys():
return self.block_by_id[bid]
else:
print("getBlockByID: no block by id %d" %bid)
return None
def wire(self, port1, port2):
# self.wires.append((port1, port2))
print("ports of blocks %d %d"%(port1.pBlock.IDval, port2.pBlock.IDval))
print("WIRING: Port names %s %s\n" %(port1.portname, port2.portname))
start_block = self.getBlockByID(port1.pBlock.IDval)
end_block = self.getBlockByID(port2.pBlock.IDval)
# call the edge on these blocks
self.edge(start_block, end_block)
# store the wire for these ports
# simple error check
if((start_block.IDval, port1.portname) in self.wires.keys()):
print("Error: Connecting same port twice..\n")
else:
self.wires[(start_block.IDval, port1.portname)] = (end_block.IDval, port2.portname)
def edge(self, start_block, end_block):
# TODO: Check if this block exists
#print("Adding edge with ID %d %d " %(start_block.IDval, end_block.IDval))
#if(start_block.IDval in self.localID.keys() && end_block.IDval in self.localID.keys()):
id1 = self.localID[start_block.IDval]
id2 = self.localID[end_block.IDval]
#print("Index for id %d = %d\n" %(start_block.IDval, id1))
#print("Index for id %d = %d\n" %(end_block.IDval, id2))
# NOTE !! Edges are between private indices. Each action has a private
# index for a block, for ease of scheduling purposes
# If you want the edges in this action, according to the original blockids,
# you have to convert these indices to blockids
self.edges.append((id1, id2))
def getEdges(self, b_id):
# This block id is original ID.. get the equivalent of id in this state
if(b_id not in self.localID.keys()):
print("Error !! Invalid bid in getEdges %d\n"%b_id)
local_id = self.localID[b_id]
out_edge_ids = []
in_edge_ids = []
for edge in self.edges:
if(edge[0] == local_id):
out_edge_ids.append(self.globalID[edge[1]])
if(edge[1] == local_id):
in_edge_ids.append(self.globalID[edge[0]])
# Now we have global ids of these edge blocks
return (in_edge_ids, out_edge_ids)
def getWires(self, b_id):
port_wires = {}
for port_key in self.wires.keys():
if(port_key[0] == b_id):
port_wires[port_key[1]] = self.wires[port_key]
#print("getWires: %d %s\n" %(port_key[0], port_key[1]))
return port_wires
def getAllWires(self):
return self.wires
def getFirstBlock(self):
# Determine the block which has no incoming edges.
# That is the first block
isFirst = []
for bid in range(0, len(self.blocks)):
isFirst.append(1)
for edge in self.edges:
s = edge[1];
isFirst[s] = 0
for bid in range(0, len(self.blocks)):
if(isFirst[bid] == 1):
#print("%d doesn't have an incoming edge.. blockid %d\n" %(bid, self.globalID[bid]));
return self.globalID[bid]
def getBenchMark(self, hwMgr):
runtime = {}
# For each of the blocks, read benchmark info from file
# and populate the p graph
# Open the benchmark file
benchmark_file = hwMgr.getBenchMarkFile()
f = open(benchmark_file, "r")
for line in f:
rec = line.split(" ")
blID = int(rec[0])
hwID = int(rec[1])
bm = float(rec[2])
runtime[(blID, hwID)] = bm
f.close()
# Get number of columns in this matrix.
# Number of columns is same as number of hw instances
num_hw = hwMgr.getNumHw()
# From the runtime dict, extract p array
blocks = self.getBlocks()
#print("Setting up array with %d columns %d rows"%(num_hw, len(blocks)))
p = [[0 for x in xrange(num_hw)] for x in xrange(len(blocks))]
for block in blocks:
for hwid in range(0, num_hw):
#print("bid %d hwid %d\n" %(block.IDval, hwid))
if((block.IDval, hwid) in runtime.keys()):
rtime = runtime[(block.IDval, hwid)]
p[self.localID[block.IDval]][hwid] = rtime
else:
p[self.localID[block.IDval]][hwid] = INFINITY
return p
def getGraph(self):
G = []
# G is like an adjacency list
# each row contains a dependency
for edge in self.edges:
G.append(edge)
#print("Adding edge %s %s\n"%(edge[0], edge[1]))
return G;
def getBlocks(self):
return self.blocks
class OrState:
curr_id = 0
def __init__(self, app, name):
self.IDval = OrState.curr_id
self.isInitState = False
OrState.curr_id += 1
#print 'Constructed state with id=%d' % self.IDval
self.stateName = name
self.appRef = app
self.state_slack = INFINITY
self.state_thr = 0
def ID(self):
return self.IDval
# just to keep it consistent
def getID(self):
return self.ID()
def setAxnAndRule(self, axn, rule):
self.axn = axn
self.rule = rule
self.axn.stateid = self.IDval
def setAsInitState(self):
self.isInitState = True
def getAction(self):
return self.axn
def getRule(self):
return self.rule
def getBlocks(self):
return self.axn.getBlocks()
def getEdges(self, blockid):
return self.axn.getEdges(blockid)
def getWires(self, blockid):
return self.axn.getWires(blockid)
def getFirstBlock(self):
return self.axn.getFirstBlock()
def getAllWires(self):
return self.axn.getAllWires()
def setSlack(self, dd):
self.state_slack = dd
def getSlack(self):
return self.state_slack
def setThr(self, thr):
self.state_thr = thr
def getThr(self):
return self.state_thr
def getNextNodes(self):
rBlock = self.rule
exit_states = []
# Get the configuration of this rBlock
exit_state_ids = rBlock.getExitStates()
for sid in exit_state_ids:
state_ptr = self.appRef.getStateByID(sid)
exit_states.append(state_ptr)
return exit_states
| StarcoderdataPython |
4931999 | <filename>notebook/so.py
# ref: https://stackoverflow.com/a/46901839/4886384
# code by <NAME>
import numpy as np
def align_yaxis_np(axes):
"""Align zeros of the two axes, zooming them out by same ratio"""
axes = np.array(axes)
extrema = np.array([ax.get_ylim() for ax in axes])
# reset for divide by zero issues
for i in range(len(extrema)):
if np.isclose(extrema[i, 0], 0.0):
extrema[i, 0] = -1
if np.isclose(extrema[i, 1], 0.0):
extrema[i, 1] = 1
# upper and lower limits
lowers = extrema[:, 0]
uppers = extrema[:, 1]
# if all pos or all neg, don't scale
all_positive = False
all_negative = False
if lowers.min() > 0.0:
all_positive = True
if uppers.max() < 0.0:
all_negative = True
if all_negative or all_positive:
# don't scale
return
# pick "most centered" axis
res = abs(uppers+lowers)
min_index = np.argmin(res)
# scale positive or negative part
multiplier1 = abs(uppers[min_index]/lowers[min_index])
multiplier2 = abs(lowers[min_index]/uppers[min_index])
for i in range(len(extrema)):
# scale positive or negative part based on which induces valid
if i != min_index:
lower_change = extrema[i, 1] * -1*multiplier2
upper_change = extrema[i, 0] * -1*multiplier1
if upper_change < extrema[i, 1]:
extrema[i, 0] = lower_change
else:
extrema[i, 1] = upper_change
# bump by 10% for a margin
extrema[i, 0] *= 1.1
extrema[i, 1] *= 1.1
# set axes limits
[axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))]
| StarcoderdataPython |
6506199 | <gh_stars>0
from pangea.AST import Case, Property, Concat, When
class Pangea(object):
"""
singleton to help with building Pangea ADTS
"""
@classmethod
def Case(cls, *args, **kwargs):
return Case(*args, **kwargs)
def __call__(self, *args, **kwargs):
return Property(*args, **kwargs)
def Concat(self, *args):
return Concat(args)
def When(self, **kwargs):
return When(**kwargs)
Else = AST.Else
P = Pangea()
| StarcoderdataPython |
8039513 | import matplotlib.pyplot as plt
import numpy as np
from bandit import Bandit
from rpp import RelativePayoffProcedure
"""
Comparison of the Bandit Algorithm with and without inference.
"""
if __name__ == '__main__':
k = 5
iterations = 20
avg_reward1 = np.zeros((20, 5000))
avg_reward2 = np.zeros((20, 5000))
actual_q = []
estimated_q = []
policy = []
avg_best_reward = []
action_values_init = np.random.uniform(low=-10, high=0, size=(k,))
# actual reward should be negative, takes the normalized action-value of the state as reward
action_values = [(action_values_init[a] - np.max(action_values_init)) / np.abs(np.min(action_values_init))
for a in range(len(action_values_init))]
for alpha, epsilon in zip([10], [0.01]):
for i in range(iterations):
bdt = Bandit(k, epsilon, action_values, True)
bdt2 = RelativePayoffProcedure(k, alpha, action_values)
bdt.play(5000)
bdt2.play(5000)
if i == 0:
avg_reward1[i] = bdt.avg_reward
avg_reward2[i] = bdt2.avg_reward
actual_q = action_values
estimated_q = bdt.Q
policy = bdt2.log_policy
avg_best_reward = bdt.best_avg_reward
else:
avg_reward1[i] = bdt.avg_reward
avg_reward2[i] = bdt2.avg_reward
actual_q = [x + y for x, y in zip(actual_q, action_values)]
estimated_q = [x + y for x, y in zip(estimated_q, bdt.Q)]
policy = [x + y for x, y in zip(policy, bdt2.log_policy)]
avg_best_reward = [x + y for x, y in zip(avg_best_reward, bdt.best_avg_reward)]
rew1 = np.array([np.percentile(avg_reward1[:, i], [10, 50, 90]) for i in range(5000)])
rew2 = np.array([np.percentile(avg_reward2[:, i], [10, 50, 90]) for i in range(5000)])
actual_q = [x / iterations for x in actual_q]
estimated_q = [x / iterations for x in estimated_q]
policy = [x / iterations for x in policy]
avg_best_reward = [x / iterations for x in avg_best_reward]
estimated_q2 = [(np.exp(alpha * actual_q[x] * policy[x])) / sum(actual_q) for x in range(len(actual_q))]
# estimated_q2 = [np.log(estimated_q2[i]) for i in range(len(estimated_q2))]
print("Actual average value-action"f'{actual_q}')
print("Estimated average value-action (greedy)"f'{estimated_q}')
print("Estimated average value-action (inference)"f'{estimated_q2}')
plt.plot(rew1[:, 1], label=f"epsilon='{epsilon}'")
plt.fill_between(np.arange(0, 5000), rew1[:, 0], rew1[:, 2], alpha=0.1)
plt.plot(rew2[:, 1], linestyle='--', label=f"alpha='{alpha}'")
plt.fill_between(np.arange(0, 5000), rew2[:, 0], rew2[:, 2], alpha=0.15)
plt.xlabel("Steps")
plt.ylabel(f"Average Reward for {iterations} iterations")
plt.title(f"{k}-armed Bandit Testbed Comparison")
plt.plot(avg_best_reward, linestyle='-.', label="best reward")
plt.yscale('symlog', linthresh=0.01)
plt.tight_layout()
plt.grid(True)
plt.legend()
plt.show()
| StarcoderdataPython |
4945785 | # -*- Mode: Python -*-
import re
import coro
import coro.read_stream
from protocol import http_file, header_set, latch
W = coro.write_stderr
class HTTP_Protocol_Error (Exception):
pass
class Bad_Response (HTTP_Protocol_Error):
pass
# viewed at its core, HTTP is a two-way exchange of messages,
# some of which may have content associated with them.
# two different usage patterns for pipelined requests:
# 1) requests are made by different threads
# 2) requests are made by a single thread
#
# we accommodate both patterns here.
# for #2, use the lower-level send_request() method, for #1,
# use GET, PUT, etc...
class request:
def __init__ (self, method, uri, headers, content=None, force=True):
self.method = method
self.uri = uri
self.qheaders = headers
self.latch = latch()
self.force = force
self.qcontent = content
self.content = None
self.response = None
self.rheader = None
self.rfile = None
def wake (self):
"signal that a reply to this request has been received"
if self.rfile and self.force:
self.content = self.rfile.read()
self.latch.wake_all()
if self.rfile and not self.force:
self.rfile.wait()
def wait (self):
"wait for the reply to be recieved. (if force=True wait for content as well)"
return self.latch.wait()
def abort (self):
"abort this client request"
self.latch.wake_all()
if self.rfile:
self.rfile.abort()
def has_body (self):
# XXX duplicates logic from server.py:http_request
h = self.rheader
if h.has_key ('transfer-encoding'):
return True
else:
probe = h.get_one ('content-length')
if probe:
try:
size = int (probe)
if size == 0:
return False
elif size > 0:
return True
else:
return False
except ValueError:
return False
elif h.test ('connection', 'close') and self.method == 'GET':
# XXX unless 204
return True
else:
return False
class client:
def __init__ (self, host, port=80, conn=None, inflight=100):
self.host = host
self.inflight = coro.semaphore (inflight)
if conn is None:
self.conn = coro.tcp_sock()
else:
self.conn = conn
self.conn.connect ((host, port))
self.stream = coro.read_stream.sock_stream (self.conn)
self.pending = coro.fifo()
coro.spawn (self.read_thread)
def read_thread (self):
while 1:
req = self.pending.pop()
if req is None:
break
else:
self._read_message (req)
if not req.response:
break
else:
req.wake()
def close (self):
self.pending.push (None)
self.conn.close()
response_re = re.compile ('([^ ]+) ([0-9][0-9][0-9]) (.+)')
def _read_message (self, req):
line = self.stream.read_line()
if not line:
raise HTTP_Protocol_Error ('unexpected close')
req.response = line[:-2]
m = self.response_re.match (req.response)
if not m:
raise Bad_Response (req.response)
else:
req.version, req.reply_code, req.reason = m.groups()
lines = []
while 1:
line = self.stream.read_line()
if not line:
raise HTTP_Protocol_Error ('unexpected close')
elif line == '\r\n':
break
else:
lines.append (line[:-2])
req.rheader = h = header_set (lines)
if req.has_body():
req.rfile = http_file (h, self.stream)
def send_request (self, method, uri, headers, content=None, force=False):
try:
self.inflight.acquire (1)
req = request (method, uri, headers, content, force)
self._send_request (method, uri, headers, content)
self.pending.push (req)
return req
finally:
self.inflight.release (1)
def _send_request (self, method, uri, headers, content):
if not headers.has_key ('host'):
headers['host'] = self.host
if content:
if type(content) is str:
headers['content-length'] = len(content)
elif not headers.has_key ('content-length'):
headers['transfer-encoding'] = 'chunked'
req = (
'%s %s HTTP/1.1\r\n'
'%s\r\n' % (method, uri, headers)
)
self.conn.send (req)
# XXX 100 continue
if content:
if type(content) is str:
self.conn.send (content)
elif headers.has_key ('content-length'):
clen = int (headers.get_one ('content-length'))
slen = 0
for block in content:
self.conn.send (block)
slen += len(block)
if slen > clen:
raise HTTP_Protocol_Error ("content larger than declared length", clen, slen)
else:
if slen != clen:
raise HTTP_Protocol_Error ("content smaller than declared length", clen, slen)
else:
# chunked encoding
for block in content:
if block:
self.conn.writev (['%x\r\n' % (len (block),), block])
self.conn.send ('0\r\n')
def GET (self, uri, **headers):
headers = header_set().from_keywords (headers)
req = self.send_request ('GET', uri, headers, force=True)
req.wait()
return req
def GET_file (self, uri, **headers):
headers = header_set().from_keywords (headers)
req = self.send_request ('GET', uri, headers, force=False)
req.wait()
return req
def PUT (self, uri, content, **headers):
headers = header_set().from_keywords (headers)
req = self.send_request ('PUT', uri, headers, content, force=True)
req.wait()
return req
def POST (self, uri, content, **headers):
headers = header_set().from_keywords (headers)
req = self.send_request ('POST', uri, headers, content, force=True)
req.wait()
return req
| StarcoderdataPython |
8095724 | from contextlib import contextmanager
from unittest.mock import patch
import pytest
from progress_interface.base import default_config, progress_config, get_progress, iter_progress, \
NullProgressMonitor, REGISTRY
from progress_interface.test import TestProgressMonitor
from progress_interface.monitors import TqdmProgressMonitor, ClickProgressMonitor
@contextmanager
def no_import(name: str):
"""Context manager which makes a module not importable even if installed."""
# Setting value of a key to None in sys.modules will raise a ModuleNotFound error on import,
# even if the package is installed.
with patch.dict('sys.modules', {name: None}):
yield
@pytest.mark.parametrize('with_tqdm', [False, True])
class TestDefaultConfig:
"""Test the default_config() function and get_progress(True)."""
def test_default_config(self, with_tqdm):
"""Test default_config() function."""
if with_tqdm:
pytest.importorskip('tqdm')
conf = default_config()
assert conf.factory == TqdmProgressMonitor.create
else:
with no_import('tqdm'):
with pytest.warns(UserWarning):
conf = default_config()
assert conf.factory == NullProgressMonitor.create
def test_progress_config_true(self, with_tqdm):
"""Test passing True as argument to progress_config()."""
if with_tqdm:
pytest.importorskip('tqdm') # Skip if tqdm not available.
config = progress_config(True, foo=1)
assert config.factory == TqdmProgressMonitor.create
assert config.kw == dict(foo=1)
else:
with no_import('tqdm'):
with pytest.warns(UserWarning):
config = progress_config(True, foo=1)
assert config.factory == NullProgressMonitor.create
assert config.kw == dict(foo=1)
class TestProgressConfigFunc:
"""Test the progress_config() function.
The case where arg=True is tested in TestDefaultConfig.
"""
def test_null(self):
"""Test passing None and False as argument."""
for arg in [None, False]:
config = progress_config(arg)
assert config.factory == NullProgressMonitor.create
def test_cls(self):
"""Test passing AbstractProgressMonitor subclass as argument."""
for cls in [NullProgressMonitor, TestProgressMonitor]:
config = progress_config(cls, foo=1)
assert config.factory == cls.create
assert config.kw == dict(foo=1)
def test_str(self):
for key, config in REGISTRY.items():
config2 = progress_config(key, foo=1)
assert config2.factory == config.factory
assert config2.kw == {**config.kw, 'foo': 1}
def test_factory(self):
"""Test passing a factory function as argument."""
def factory(total, *, initial=None, **kw):
return TestProgressMonitor.create(total, initial=initial, foo=1, **kw)
config = progress_config(factory, foo=1)
assert config.factory == factory
assert config.kw == dict(foo=1)
def test_progressconfig(self):
"""Test passing a factory function as argument."""
config = TestProgressMonitor.config(foo=1, bar=2)
config2 = progress_config(config, bar=20, baz=3)
assert config2.factory == TestProgressMonitor.create
assert config2.kw == dict(foo=1, bar=20, baz=3)
def test_invalid(selfj):
with pytest.raises(TypeError):
get_progress(0, 100)
class TestGetProgress:
"""Test the get_progress() function.
The case where arg=True is tested in TestDefaultConfig.
"""
@pytest.fixture()
def total(self):
return 100
@pytest.fixture(params=[0, 10])
def initial(self, request):
return request.param
def test_null(self, total, initial):
"""Test passing None and False as argument."""
for arg in [None, False]:
assert isinstance(get_progress(arg, total, initial=initial), NullProgressMonitor)
def test_cls(self, total, initial):
"""Test passing AbstractProgressMonitor subclass as argument."""
for cls in [NullProgressMonitor, TestProgressMonitor]:
monitor = get_progress(cls, total, initial=initial)
assert isinstance(monitor, cls)
if cls is not NullProgressMonitor:
assert monitor.total == total
assert monitor.n == initial
def test_str(self, total, initial):
# TODO - use a type that doesn't require 3rd-party library
monitor = get_progress('click', total, initial=initial)
assert isinstance(monitor, ClickProgressMonitor)
assert monitor.total == total
assert monitor.n == initial
def test_factory(self, total, initial):
"""Test passing a factory function as argument."""
def factory(total, *, initial=None, **kw):
return TestProgressMonitor.create(total, initial=initial, foo=1, **kw)
monitor = get_progress(factory, total, initial=initial, bar=2)
assert isinstance(monitor, TestProgressMonitor)
assert monitor.total == total
assert monitor.n == initial
assert monitor.kw == dict(foo=1, bar=2)
def test_progressconfig(self, total, initial):
"""Test passing a factory function as argument."""
config = TestProgressMonitor.config(foo=1)
monitor = get_progress(config, total, initial=initial, bar=2)
assert isinstance(monitor, TestProgressMonitor)
assert monitor.total == total
assert monitor.n == initial
assert monitor.kw == dict(foo=1, bar=2)
def test_invalid(selfj):
with pytest.raises(TypeError):
get_progress(0, 100)
@pytest.mark.parametrize('pass_total', [False, True])
@pytest.mark.parametrize('abort_early', [False, True])
def test_iter_progress(pass_total, abort_early):
"""Test the iter_progress() function."""
import string
items = string.ascii_letters
abort_at = 10
if pass_total:
iterable = iter(items)
total = len(items)
else:
iterable = items
total = None
with iter_progress(iterable, TestProgressMonitor, total=total, foo=1) as itr:
assert isinstance(itr.monitor, TestProgressMonitor)
assert itr.monitor.total == len(items)
assert itr.monitor.kw == dict(foo=1)
assert itr.monitor.n == 0
assert not itr.monitor.closed
for i, val in enumerate(itr):
assert val == items[i]
assert itr.monitor.n == i
assert not itr.monitor.closed
if abort_early and i == abort_at:
break
if abort_early:
assert i == abort_at
assert itr.monitor.n == abort_at
assert not itr.monitor.closed
else:
assert i == len(items) - 1
assert itr.monitor.n == len(items)
assert itr.monitor.closed
assert itr.monitor.closed # Always closed after exiting context
class TestRegister:
"""Test the register() function."""
# TODO
| StarcoderdataPython |
1856218 | <reponame>YTRedstone/Autoclicker-CPS-Tester<gh_stars>0
from tkinter import *
import mouse
import datetime
#############################################
#############################################
end = 0
lt55s = "False"
endtimesecond = "-1"
endtimeminute = "-1"
endtimehour = "-1"
endtimeday = "-1"
endtimemonth = "-1"
endtimeyear = "-1"
start = 1
now = datetime.datetime.now()
xx = 0
time = 0
stext = 0
#############################################
###############################################
def fivefivesec():
global lt55s
global start
now = datetime.datetime.now()
if endtimesecond == "-1":
while lt55s == "false" or start == 1:
start = 0
if now.second >= 55:
print("Please wait 5 seconds")
lt55s = "False"
text.delete(1.0, 2.0)
text.insert(END, "Please wait 5 seconds")
else:
lt55s = "true"
break
#############################################
def fail():
global end
print("Failed")
end = "true"
xyy = str("CPS: ") + str(xx/imput) + str(" ") + str("Total Clicks: ") + str(xx)
text.delete(1.0, 2.0)
text.insert(END, xyy)
text.pack()
#############################################
def setendtime():
global endtimesecond
global endtimeminute
global endtimehour
global endtimeday
global endtimemonth
global endtimeyear
global imput
now = datetime.datetime.now()
if endtimesecond == "-1":
endtimesecond = int(now.second + imput + 1)
#endtimesecond = 10
endtimeminute = now.minute + 1
endtimehour = now.hour + 1
endtimeday = now.day + 1
endtimemonth = now.month + 1
endtimeyear = now.year + 1
#############################################
def checkendtime():
global end
global now
now = datetime.datetime.now()
nowsec = int(now.second)
#nowsec = 9
nowmin = int(now.minute)
nowhor = int(now.hour)
nowday = int(now.day)
nowmon = int(now.month)
nowyer = int(now.year)
if int(endtimesecond) <= int(nowsec):
fail()
# else:
# end = "no"
else:
if int(endtimeminute) <= int(nowmin):
fail()
else:
if int(endtimehour) <= int(nowhor):
fail()
else:
if int(endtimeday) <= int(nowday):
fail()
else:
if int(endtimemonth) <= int(nowmon):
fail()
else:
if int(endtimeyear) <= int(nowyer):
fail()
else:
end = "no"
print("Now Sec")
print(nowsec)
print("Now Min")
print(nowmin)
print("Now Hour")
print(nowhor)
print("Now Day")
print(nowday)
print("Now month")
print(nowmon)
print("Now year")
print(nowyer)
print("end sec")
print(endtimesecond)
print("end min")
print(endtimeminute)
print("end hour")
print(endtimehour)
print("end month")
print(endtimemonth)
print("end day")
print(endtimeday)
print("end year")
print(endtimeyear)
print("input")
print(imput)
print("---------------------------")
#############################################
def track():
global end
global xx
now = datetime.datetime.now()
fivefivesec()
now = datetime.datetime.now()
setendtime()
now = datetime.datetime.now()
checkendtime()
now = datetime.datetime.now()
if end == "no":
xx += 1
text.delete(1.0, 2.0)
text.insert(END, xx)
text.pack()
else:
fail()
#############################################
#############################################
imput = int(input("ammout of seconds: "))
#############################################
#############################################
master = Tk()
button = Button(master, text="Click Here", command=track)
button.pack()
text = Text(master, width=40, height=1)
text.pack()
mainloop()
| StarcoderdataPython |
1937296 | import gevent
from gevent.pywsgi import WSGIServer
from gevent.lock import Semaphore
from geventwebsocket.handler import WebSocketHandler, WebSocketError
from datetime import datetime
def websocket_app(environ, start_response):
if environ["PATH_INFO"] == '/ws/echo':
ws = environ["wsgi.websocket"]
print ws
print ""
print ws.handler
print ""
print ws.handler.server
print ""
print ws.handler.server.clients
print ""
print ws.handler.server.clients.values()
while True:
try:
message = ws.receive()
print message
ws.send(message)
except WebSocketError:
break
| StarcoderdataPython |
6611215 | <reponame>alexanders0/farm-management
"""User views."""
# Django
from django.conf import settings
# Django REST Framework
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.parsers import MultiPartParser
# Permissions
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from farm_management.users.permissions import IsAccountOwner
# Serializers
from farm_management.users.serializers.profiles import ProfileModelSerializer
from farm_management.lands.serializers import LandModelSerializer
from farm_management.users.serializers import (
UserModelSerializer,
UserSignUpSerializer,
UserLoginSerializer,
VerifyPhoneSerializer
)
# Utilities
import jwt
# Models
from django.contrib.auth import get_user_model
from farm_management.lands.models import Land
User = get_user_model()
class UserViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""User view set.
Handle sign up, login and account verification.
"""
serializer_class = UserModelSerializer
queryset = User.objects.filter(is_active=True)
lookup_field = "username"
def get_permissions(self):
"""Assign permissions based on action."""
if self.action in ['signup', 'login', 'verify']:
permissions = [AllowAny]
elif self.action in ['retrieve', 'update', 'partial_update', 'profile']:
permissions = [IsAuthenticated, IsAccountOwner]
else:
permissions = [IsAuthenticated]
return [permission() for permission in permissions]
def get_serializer_class(self):
"""Return serializer based on action."""
if self.action == 'signup':
return UserSignUpSerializer
if self.action == 'login':
return UserLoginSerializer
if self.action == 'verify_phone':
return VerifyPhoneSerializer
return UserModelSerializer
def get_serializer_context(self):
context = super(UserViewSet, self).get_serializer_context()
context.update({'request': self.request})
return context
@action(detail=False, methods=['post'])
def signup(self, request):
"""User sign up."""
serializer_class = self.get_serializer_class()
serializer = serializer_class(
data=request.data,
context=self.get_serializer_context()
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['get'])
def verify(self, request):
"""Account verification."""
token = request.GET['token']
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms='HS256')
user = User.objects.get(username=payload['user'])
if not user.is_verified:
user.is_verified = True
user.save()
if payload['type'] != 'email_confirmation':
data = {'error': 'Invalid token.'}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
data = {'message': 'Congratulation, now go and manage farms!'}
return Response(data, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
data = {'error': 'Verification link has expired.'}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
except jwt.PyJWTError:
data = {'error': 'Invalid token.'}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
@action(detail=True, methods=['post'])
def verify_phone(self, request, *args, **kwargs):
"""Phone number verification."""
serializer_class = self.get_serializer_class()
serializer = serializer_class(
data=request.data,
context={'user': self.get_object()}
)
serializer.is_valid(raise_exception=True)
serializer.save()
data = {'message': 'Congratulation, you have verified your phone number!'}
return Response(data, status=status.HTTP_200_OK)
@action(detail=False, methods=['post'])
def login(self, request):
"""User login."""
serializer_class = self.get_serializer_class()
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user, token = serializer.save()
data = {
'user': UserModelSerializer(user).data,
'token': token
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['put', 'patch'], parser_classes=(MultiPartParser, ))
def profile(self, request, *args, **kwargs):
"""Update profile data."""
user = self.get_object()
profile = user.profile
partial = request.method == 'PATCH'
serializer = ProfileModelSerializer(
profile,
data=request.data,
partial=partial
)
serializer.is_valid(raise_exception=True)
serializer.save()
data = UserModelSerializer(user).data
return Response(data)
def retrieve(self, request, *args, **kwargs):
"""Add extra data to the response."""
response = super(UserViewSet, self).retrieve(request, *args, **kwargs)
lands = Land.objects.filter(manager=request.user)
data = {
'user': response.data,
'lands': LandModelSerializer(lands, many=True).data
}
response.data = data
return response
| StarcoderdataPython |
3394031 | #Задача № 6. Вариант 28
#Создайте игру, в которой компьютер загадывает название одного из шести континентов Земли, а игрок должен его угадать.
# <NAME>
#25.05.2016
import random
print("Компьютер загадывает название одного из шести континентов Земли, а игрок должен его угадать.")
kontinents=('Евразия','Африка','Австралия','Антарктида','Южная Америка','Северная Америка')
kontinent=random.randint(0,6)
ran=kontinents[kontinent]
otvet=3
while (otvet)!=(ran):
otvet=input("Введите название континента: ")
if (otvet)!=(ran):
print("Неверно, еще разок")
elif (otvet)==(ran):
print("Ну наконец-то.")
break
input("Нажмите Enter для выхода.") | StarcoderdataPython |
8069410 | """Loads reference data to memory."""
import pkg_resources
import ujson
def load_dict() -> dict:
"""
Loads reference data to dictionary.
:return: dictionary of the syllable reference data
"""
file_name = "data.json"
file_path = pkg_resources.resource_filename(__name__, file_name)
with open(file_path) as file:
words = ujson.load(file)
return words
| StarcoderdataPython |
1647169 | import pygame as pyg
import math
pyg.init()
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
LIGHT_BLUE = (128, 191, 255)
LEFT = 1
FILE = "tree-diagram.txt"
tree_file = open(FILE, "w")
# Window size
SIZE = (1121, 485)
# Font info
FONT = pyg.font.SysFont('Calibri', 22, True, False)
def text(txt="", color=[0, 0, 0]):
return FONT.render(txt, True, color)
screen = pyg.display.set_mode(SIZE)
# Grid System
grid = [[0 for row in range(9)] for column in range(21)]
WIDTH = 45
HEIGHT = 45
MARGIN = 8
# Set Window Title
pyg.display.set_caption("Tree Diagram")
# Keep Window open
done = False
clock = pyg.time.Clock()
class Node:
# Give grid coords, radius, and value for the node
def __init__(self, coord=[0, 0], value="", radius=15, color=[0, 0, 0]):
self.screen = screen
self.color = color
self.value = value
self.radius = radius
self.width = 1
self.x = coord[0]
self.y = coord[1]
self.top_y = self.y - self.radius
self.bot_y = self.y + self.radius
self.parent = None
def set_color(self, c):
self.color = c
def get_pos(self):
return self.x, self.y
def set_parent(self, parent):
self.parent = parent
def get_x(self):
return self.x
def get_bot_y(self):
return self.bot_y
def draw(self):
pyg.draw.circle(self.screen, self.color, [self.x, self.y], self.radius, self.width)
self.screen.blit(text(self.value, self.color), [self.x-5, self.y-7])
if self.parent is not None:
pyg.draw.line(self.screen, BLACK, [self.x, self.top_y], [self.parent.get_x(), self.parent.get_bot_y()], 1)
def get_x(r):
return (MARGIN + WIDTH)*r + MARGIN + WIDTH/2
def get_y(cl):
return (MARGIN + HEIGHT)*cl + MARGIN + HEIGHT/2
nodes = []
active = None
while not done:
for event in pyg.event.get():
if event.type == pyg.QUIT:
print "user asked to quit"
done = True
# When a key is pressed, create a node at mouse location with the key pressed as the value
elif event.type == pyg.KEYDOWN:
if event.key != pyg.K_DELETE:
pos = pyg.mouse.get_pos()
row = pos[0] // (WIDTH + MARGIN)
column = pos[1] // (HEIGHT + MARGIN)
nodes.append(Node([get_x(row), get_y(column)], pyg.key.name(event.key).upper()))
tree_file.write(nodes[-1].value)
# If another node is active, set it as parent for new node
if active is not None:
nodes[-1].set_parent(active)
else:
nodes = []
# When a node is clicked, activate or deactivate as needed
elif event.type == pyg.MOUSEBUTTONDOWN:
pos = pyg.mouse.get_pos()
row = pos[0] // (WIDTH + MARGIN)
col = pos[1] // (HEIGHT + MARGIN)
x_pos = get_x(row)
y_pos = get_y(col)
for node in nodes:
if node.get_pos() == (x_pos, y_pos):
if event.button == LEFT and active is None:
node.set_color(GREEN)
active = node
else:
node.set_color(BLACK)
active = None
screen.fill(BLACK)
for row in range(21):
for column in range(9):
pyg.draw.rect(screen, LIGHT_BLUE, [get_x(row) - WIDTH/2, get_y(column) - HEIGHT/2, WIDTH, HEIGHT])
for item in nodes:
item.draw()
pyg.display.flip()
clock.tick(60)
pyg.quit()
tree_file.close()
| StarcoderdataPython |
11321974 | n = int(input())
l = list(map(int,input().split()))
ans = n-1
while ans>0 and l[ans-1]<l[ans]:
ans-=1
print(ans)
| StarcoderdataPython |
8160925 | # Presenter layer responsibility
# 1. define presenter class which depends on output interface and it has emit method to call outputPort.emit()
# 2. define outputPort interface
from .output_port import DataSourceUseCaseOutputPort
| StarcoderdataPython |
9761029 | <filename>neural_net/neural_net.py
"""
Author: <NAME>
Neural Network Model
"""
import math
class NeuralNetwork(object):
"""
Neural Network Library
"""
def dot(v, w):
"""
v_1 * w_1 + ... + v_n * w_n
"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sigmoid(self, x):
"""
Sigmoidal function
"""
return 1/(1 + math.exp(-x))
def neuron_output(self, weights, inputs):
"""
y = x * w
"""
return self.sigmoid(self.dot(weights, inputs))
def feed_forward(self, neural_network, input_vector):
"""
Feedforward training
"""
outputs = []
for layer in neural_network:
input_with_bias = input_vector + [1]
_output = [
self.neuron_output(neuron, input_with_bias)
for neuron in layer]
outputs.append(_output)
input_vector = _output
return outputs
def backpropagate(self, network, input_vector, targets):
"""
Backpropagation algorithm
"""
hidden_outputs, outputs = self.feed_forward(network, input_vector)
output_deltas = [
output * (1 - output) * (output - target)
for output, target in zip(outputs, targets)]
# Adjust weights for output layer
for i, output_neuron in enumerate(network[-1]):
for j, hidden_output in enumerate(hidden_outputs + [1]):
output_neuron[j] -= output_deltas[i] * hidden_output
# back propagate errors to hidden layer
hidden_deltas = [
hidden_output * (1 - hidden_output) *
self.dot(output_deltas, [n[i] for n in outputs])
for i, hidden_output in enumerate(hidden_outputs)]
# Adjust weights of hidden layer
for i, hidden_neuron in enumerate(network[0]):
for j, inpu in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * inpu
def __int__(self):
super(self.__class__, self).__init__()
| StarcoderdataPython |
9650331 | <reponame>kanishkan/tce
# Copyright 2004 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""This package defines all user messages (warnings + errors), which will be
reported to user.
"""
class message_type(str):
"""implementation details"""
def __new__(self, value, identifier=None):
return str.__new__(self, value )
def __init__(self, value, identifier=None):
self.__identifier = identifier
@property
def identifier( self ):
return self.__identifier
def __mod__( self, values ):
str_value = super( message_type, self ).__str__()
return self.__class__( str_value % values, self.identifier )
class warning( message_type ):
prefix = 'warning'
class compilation_error( message_type ):
prefix = 'compilation error'
class execution_error( message_type ):
prefix = 'execution error'
W0000 = warning( '%s' ) #general message, usefull in few cases
W1000 = compilation_error(
'Py++, by default, does not expose internal compilers declarations. '
'Names of those declarations usually start with "__".' )
W1001 = compilation_error(
'Py++, by default, does not expose internal declarations. '
'GCC-XML reports that these declaration belong to "<internal>" header.' )
W1002 = compilation_error(
'Py++, by default, does not expose compiler generated declarations.' )
W1003 = warning(
'Virtual functions that returns const reference cannot be overridden from Python. '
'Reason: boost::python::override::operator()(...) saves the result of the marshaling '
'(from Python to C++) on the stack. Thus operator() returns reference '
'to a temporary variable. Consider to use "Function Transformation" functionality '
'to solve the problem.' )
W1004 = compilation_error(
'Boost.Python library can not expose function, which takes as argument/returns '
'pointer to function. '
' See http://www.boost.org/libs/python/doc/v2/faq.html#funcptr for more information.' )
W1005 = compilation_error(
'Py++ cannot expose function that takes as argument/returns instance of non-public class. '
'Generated code will not compile.' )
W1006 = compilation_error(
'Py++ need your help to expose function that takes as argument/returns C++ arrays. '
'Take a look on "Function Transformation" functionality and define the transformation.' )
W1007 = warning(
'The function has more than %d arguments ( %d ). '
'You should adjust BOOST_PYTHON_MAX_ARITY macro. '
'For more information see: http://www.boost.org/libs/python/doc/v2/configuration.html' )
W1008 = warning(
'The function returns non-const reference to "Python immutable" type. '
'The value cannot be modified from Python. ' )
W1009 = execution_error(
'The function takes as argument (name=%s, pos=%d) non-const reference '
'to Python immutable type - function could not be called from Python. '
'Take a look on "Function Transformation" functionality and define the transformation.' )
W1010 = execution_error(
'The function introduces registration order problem. '
'For more information about the problem read next document: '
'http://language-binding.net/pyplusplus/documentation/functions/registration_order.html '
'Problematic functions list: %s' )
W1011 = warning( "Py++ doesn't export private not virtual functions." )
W1012 = compilation_error( 'Py++ does not exports compiler generated constructors.' )
W1013 = compilation_error( "Py++ doesn't export private constructor." )
W1014 = compilation_error(
'"%s" is not supported. '
'See Boost.Python documentation: http://www.boost.org/libs/python/doc/v2/operators.html#introduction.' )
W1015 = compilation_error( "Py++ doesn't export private operators." )
W1016 = warning(
'Py++ does not exports non-const casting operators with user defined type as return value. '
'This could be change in future.' )
W1017 = compilation_error( "Py++ doesn't export non-public casting operators." )
W1018 = compilation_error( 'Py++ can not expose unnamed classes.' )
W1019 = compilation_error( 'Py++ can not expose private class.' )
W1020 = warning( "Py++ will generate class wrapper - hand written code should be added to the wrapper class" )
W1021 = warning( "Py++ will generate class wrapper - hand written code should be added to the wrapper class null constructor body" )
W1022 = warning( "Py++ will generate class wrapper - hand written code should be added to the wrapper class copy constructor body" )
W1023 = warning(
"Py++ will generate class wrapper - there are few functions that should be redefined in class wrapper. "
"The functions are: %s." )
W1024 = warning( 'Py++ will generate class wrapper - class contains "%s" - bit field member variable' )
W1025 = warning( 'Py++ will generate class wrapper - class contains "%s" - T* member variable' )
W1026 = warning( 'Py++ will generate class wrapper - class contains "%s" - T& member variable' )
W1027 = warning( 'Py++ will generate class wrapper - class contains "%s" - array member variable' )
W1028 = warning( 'Py++ will generate class wrapper - class contains definition of nested class "%s", which requires wrapper class' )
W1029 = warning( "Py++ will generate class wrapper - hand written code should be added to the wrapper class constructor body" )
W1030 = warning( 'Py++ will generate class wrapper - class contains "%s" - [pure] virtual member function' )
W1031 = warning( 'Py++ will generate class wrapper - user asked to expose non - public member function "%s"' )
W1032 = execution_error(
"Boost.Python library does not support enums with duplicate values. "
"You can read more about this here: "
"http://boost.org/libs/python/todo.html#support-for-enums-with-duplicate-values . "
"The quick work around is to add new class variable to the exported enum, from Python. " )
W1033 = compilation_error( "Py++ can not expose unnamed variables" )
W1034 = compilation_error( "Py++ can not expose alignment bit." )
W1035 = compilation_error( "Py++ can not expose static pointer member variables. This could be changed in future." )
W1036 = compilation_error( "Py++ can not expose pointer to Python immutable member variables. This could be changed in future." )
W1037 = compilation_error(
"Boost.Python library can not expose variables, which are pointer to function."
" See http://www.boost.org/libs/python/doc/v2/faq.html#funcptr for more information." )
W1038 = compilation_error( "Py++ can not expose variables of with unnamed type." )
W1039 = compilation_error( "Py++ doesn't expose private or protected member variables." )
W1040 = execution_error(
'The declaration is unexposed, but there are other declarations, which refer to it. '
'This could cause "no to_python converter found" run time error. '
'Declarations: %s' )
W1041 = warning(
'Property "%s" could not be created. There is another exposed declaration with the same name( alias )." '
'The property will make it inaccessible.' )
W1042 = warning(
'Py++ can not find out container value_type( mapped_type ). '
'The container class is template instantiation declaration and not definition. '
'This container class will be exported, but there is a possibility, that '
'generated code will not compile or will lack some functionality. '
'The solution to the problem is to create a variable of the class.' )
W1043 = warning( 'Py++ created an ugly alias ("%s") for template instantiated class.' )
W1044 = warning( 'Py++ created an ugly alias ("%s") for function wrapper.' )
W1045 = compilation_error(
'Py++ does not expose static arrays with unknown size. '
'You can fix this by setting array size to the actual one.'
'For more information see "array_t" class documentation.' )
W1046 = warning(
'The virtual function was declared with empty throw. '
'Adding the ability to override the function from Python breaks the exception specification. '
'The function wrapper can throw any exception. '
'In case of exception in run-time, the behaviour of the program is undefined! ' )
W1047 = warning(
'There are two or more classes that use same alias("%s"). '
'Duplicated aliases causes few problems, but the main one is that some '
'of the classes will not be exposed to Python.'
'Other classes : %s' )
W1048 = warning(
'There are two or more aliases within "pyplusplus::aliases" namespace for '
'the class. Py++ selected "%s" as class alias. Other aliases: %s' )
W1049 = warning(
'This method could not be overriden in Python - method returns reference '
'to local variable!' )
W1050 = compilation_error(
'The function returns "%s" type. You have to specify a call policies.'
'Be sure to take a look on Py++ defined call policies: '
'http://language-binding.net/pyplusplus/documentation/functions/call_policies.html#py-defined-call-policies' )
W1051 = warning(
'The function takes as argument (name=%s, pos=%d) "%s" type. '
'You have to specify a call policies or to use "Function Transformation" '
'functionality.' )
W1052 = warning(
'Py++ will not expose free operator "%s" - all classes, this operator works on, are excluded.' )
W1053 = warning(
'Py++ will not expose function "%s" - the function has variable-argument list, spicified by ellipsis (...).' )
warnings = globals()
all_warning_msgs = []
for identifier, explanation in warnings.items():
if len( identifier ) != 5:
continue
if identifier[0] != 'W':
continue
try:
int( identifier[1:] )
except:
continue
msg = '%s %s: %s' % ( explanation.__class__.prefix, identifier, str(explanation) )
msg_inst = explanation.__class__( msg, identifier )
globals()[ identifier ] = msg_inst
all_warning_msgs.append( msg_inst )
del warnings
del identifier
del explanation
if __name__ == '__main__':
x = W1051 % ( 'xxxxxxxx', 122, 'yyyyyyyyyy' )
print x
print x.__class__.__name__
print '\n\n\n'
y = W1000
print y
| StarcoderdataPython |
6670969 | <reponame>egonw/scheduled-bots
## Bot for adding Prop65 ID
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from wikidataintegrator.ref_handlers import update_retrieved_if_new_multiple_refs
import pandas as pd
from pandas import read_csv
import requests
import time
from datetime import datetime
import copy
## Here are the object QIDs, assuming that a chemical is the subject
object_qid = {'femrep':'Q55427776',
'menrep': 'Q55427774',
'devtox': 'Q72941151',
'cancer': 'Q187661',
'reptox': 'Q55427767'}
list_date = {'femrep':'Female Reproductive Toxicity - Date of Listing',
'menrep':'Male Reproductive Toxicity - Date of Listing',
'devtox':'Male Reproductive Toxicity - Date of Listing',
'cancer': 'None',
'reptox': 'None'}
list_prop = "P31"
def create_reference(prop65_url):
refStatedIn = wdi_core.WDItemID(value="Q28455381", prop_nr="P248", is_reference=True)
timeStringNow = datetime.now().strftime("+%Y-%m-%dT00:00:00Z")
refRetrieved = wdi_core.WDTime(timeStringNow, prop_nr="P813", is_reference=True)
refURL = wdi_core.WDUrl(value=prop65_url, prop_nr="P854", is_reference=True)
return [refStatedIn, refRetrieved, refURL]
## Login for Scheduled bot
print("Logging in...")
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
## Files should be downloaded to data/downloads
downloadpath = 'data/downloads/'
## Get the latest file
filelist = []
for eachfilename in os.listdir(downloadpath):
filelist.append(downloadpath+eachfilename)
datasrc = max(filelist, key=os.path.getctime)
## Add Prop 65 CA IDs from CA OEHHA chemicals list Wikidata
chem_list = read_csv(datasrc, encoding = 'unicode_escape', header=0)
## Pull out only columns of interest for our task
cols_of_interest = chem_list[['Title','CAS Number','Cancer','Cancer - Listing Mechanism',
'Reproductive Toxicity','Chemical listed under Proposition 65 as causing',
'Developmental Toxicity - Date of Listing','Developmental Toxicity - Listing Mechanism',
'Female Reproductive Toxicity - Date of Listing',
'Female Reproductive Toxicity - Listing Mechanism',
'Male Reproductive Toxicity - Date of Listing',
'Male Reproductive Toxicity - Listing Mechanism']]
prop_65_irrelevant = cols_of_interest.loc[(cols_of_interest['Cancer'] == "None") &
(cols_of_interest['Reproductive Toxicity'] == "None") &
(cols_of_interest['Chemical listed under Proposition 65 as causing'] == "None")]
non_prop_chems = prop_65_irrelevant['Title'].tolist()
prop65_chems = cols_of_interest.loc[~cols_of_interest['Title'].isin(non_prop_chems)].copy()
## To convert the title to a url stub, lower case it, strip out parenthesis, brackets, and commas, and replace spaces with dashes
prop65_chems['url_stub'] = prop65_chems['Title'].str.lower().str.replace("[","").str.replace("]","").str.replace(",","").str.replace("(","").str.replace(")","").str.strip("]").str.replace(".","").str.replace("&","").str.replace(" ","-")
prop65_chems.to_csv('data/prop65_chems.tsv',sep='\t',header=True, encoding='utf-8')
mixnmatch_cat = prop65_chems[['url_stub','Title','CAS Number']].copy()
mixnmatch_cat.rename(columns={'url_stub':'Entry ID','Title':'Entry name'}, inplace=True)
mixnmatch_cat['Entry description'] = mixnmatch_cat['Entry name'].astype(str).str.cat(mixnmatch_cat['CAS Number'].astype(str),sep=", CAS Number: ")
sparqlQuery = "SELECT * WHERE {?item wdt:P231 ?CAS}"
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
cas_in_wd_list = []
i=0
while i < len(result["results"]["bindings"]):
cas_id = result["results"]["bindings"][i]["CAS"]["value"]
wdid = result["results"]["bindings"][i]["item"]["value"].replace("http://www.wikidata.org/entity/", "")
cas_in_wd_list.append({'WDID':wdid,'CAS Number':cas_id})
i=i+1
cas_in_wd = pd.DataFrame(cas_in_wd_list)
cas_in_wd.drop_duplicates(subset='CAS Number',keep=False,inplace=True)
cas_in_wd.drop_duplicates(subset='WDID',keep=False,inplace=True)
prop_65_matches = mixnmatch_cat.merge(cas_in_wd,on='CAS Number',how='inner')
## Pull items already mapped to Prop 65 ID
sparqlQuery = "SELECT ?item ?CA65 WHERE {?item wdt:P7524 ?CA65}"
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
CA65_in_wd_list = []
i=0
while i < len(result["results"]["bindings"]):
CA65_id = result["results"]["bindings"][i]["CA65"]["value"]
wdid = result["results"]["bindings"][i]["item"]["value"].replace("http://www.wikidata.org/entity/", "")
CA65_in_wd_list.append({'WDID':wdid,'Entry ID':CA65_id})
i=i+1
CA65_in_wd = pd.DataFrame(CA65_in_wd_list)
## Remove items matched via mix n match from update
prop_65_less_mixnmatch = prop_65_matches.loc[~prop_65_matches['Entry ID'].isin(CA65_in_wd['Entry ID'].tolist())]
## Remove items with known url issues
bad_urls = read_csv('data/bad_urls.tsv',delimiter='\t',header=0, encoding='utf-8',index_col=0)
bad_urls_cas = bad_urls['CAS Number'].tolist()
prop_65_less_bad_urls = prop_65_less_mixnmatch.loc[~prop_65_less_mixnmatch['CAS Number'].isin(bad_urls_cas)]
prop65_to_add = prop_65_less_bad_urls
url_base = 'https://oehha.ca.gov/chemicals/'
list_prop = "P7524"
## Add Prop65 statements if there's a successful mapping
for i in range(len(prop65_to_add)):
prop_65_qid = prop65_to_add.iloc[i]['WDID']
prop_65_id = prop65_to_add.iloc[i]['Entry ID']
prop_65_url = url_base+prop_65_id
reference = create_reference(prop_65_url)
prop65_statement = [wdi_core.WDString(value=prop_65_id, prop_nr=list_prop,
references=[copy.deepcopy(reference)])]
item = wdi_core.WDItemEngine(wd_item_id=prop_65_qid, data=prop65_statement, append_value=list_prop,
global_ref_mode='CUSTOM', ref_handler=update_retrieved_if_new_multiple_refs)
item.write(login, edit_summary="added CA prop 65 id")
| StarcoderdataPython |
3270567 | from .base_test import TestDDNSUpdaterConfigHelper
from .google_synthetic_ddns_test import TestGoogleSyntheticDDNSConfigHelper
| StarcoderdataPython |
1959466 | # Helper functions for easy saving and loading of named results
import os
import pickle
import gzip
import errno
import inspect
import time
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
PICKLES_PATH = os.path.join(CURRENT_DIR, 'pickles')
def _pickled_name(name):
return os.path.join(PICKLES_PATH, name) + '.pickle.gz'
def dump(filename, **kwargs):
os.makedirs(PICKLES_PATH, exist_ok=True)
filename = _pickled_name(filename)
metadata = {'date': time.ctime()}
print('Saving to', filename)
print("Saved fields: ", ', '.join(sorted(kwargs.keys())))
with gzip.GzipFile(filename, 'wb') as f:
pickle.dump({'metadata': metadata, 'data': kwargs}, f, 2)
class StructFromDict(object):
def __init__(self, d):
self.__dict__.update(d)
def __repr__(self):
return repr(self.__dict__)
def load(name):
filename = _pickled_name(name)
print('Loading', filename)
with gzip.GzipFile(filename, 'rb') as f:
d = pickle.load(f)
print('Creation time:', d['metadata']['date'])
return StructFromDict(d['data'])
| StarcoderdataPython |
3377643 | <reponame>dyens/sdk-python
""" Audit.
Do not edit this file by hand.
This is generated by parsing api.html service doc.
"""
from ambra_sdk.exceptions.service import FilterNotFound
from ambra_sdk.exceptions.service import InvalidBucket
from ambra_sdk.exceptions.service import InvalidCondition
from ambra_sdk.exceptions.service import InvalidField
from ambra_sdk.exceptions.service import MissingFields
from ambra_sdk.exceptions.service import NotFound
from ambra_sdk.exceptions.service import NotPermitted
from ambra_sdk.service.query import QueryO
from ambra_sdk.service.query import QueryOP
from ambra_sdk.service.query import QueryOPF
class Audit:
"""Audit."""
def __init__(self, api):
self._api = api
def object(
self,
uuid,
customfield_detail=None,
download=None,
reverse=None,
):
"""Object.
:param uuid: The uuid of the object to audit
:param customfield_detail: Flag to include the customfield name in the detail (optional)
:param download: Flag to create a zipped CSV file. A report_id will be returned and the file can be accessed via /report/status and /report/zip (optional)
:param reverse: Flag to reverse the default sort order (optional)
"""
request_data = {
'reverse': reverse,
'uuid': uuid,
'customfield_detail': customfield_detail,
'download': download,
}
errors_mapping = {}
errors_mapping['FILTER_NOT_FOUND'] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping['INVALID_CONDITION'] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping['INVALID_FIELD'] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping['MISSING_FIELDS'] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping['NOT_FOUND'] = NotFound('The object was not found')
errors_mapping['NOT_PERMITTED'] = NotPermitted('You are not permitted to access this object')
query_data = {
'api': self._api,
'url': '/audit/object',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'events'
return QueryOPF(**query_data)
def user(
self,
account_id,
user_id,
download=None,
reverse=None,
):
"""User.
:param account_id: The id of the account
:param user_id: The id of the user to audit
:param download: Flag to create a zipped CSV file. A report_id will be returned and the file can be accessed via /report/status and /report/zip (optional)
:param reverse: Flag to reverse the default sort order (optional)
"""
request_data = {
'reverse': reverse,
'download': download,
'user_id': user_id,
'account_id': account_id,
}
errors_mapping = {}
errors_mapping['MISSING_FIELDS'] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping['NOT_FOUND'] = NotFound('The user was not found')
errors_mapping['NOT_PERMITTED'] = NotPermitted('You are not permitted to access this user record')
query_data = {
'api': self._api,
'url': '/audit/user',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'events'
return QueryOP(**query_data)
def account(
self,
account_id,
download=None,
reverse=None,
):
"""Account.
:param account_id: The id of the account
:param download: Flag to create a zipped CSV file. A report_id will be returned and the file can be accessed via /report/status and /report/zip (optional)
:param reverse: Flag to reverse the default sort order (optional)
"""
request_data = {
'reverse': reverse,
'download': download,
'account_id': account_id,
}
errors_mapping = {}
errors_mapping['FILTER_NOT_FOUND'] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping['INVALID_CONDITION'] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping['INVALID_FIELD'] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping['MISSING_FIELDS'] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping['NOT_FOUND'] = NotFound('The account was not found')
errors_mapping['NOT_PERMITTED'] = NotPermitted('You are not permitted to access this information')
query_data = {
'api': self._api,
'url': '/audit/account',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'events'
return QueryOPF(**query_data)
def deleted(
self,
account_id,
type=None,
):
"""Deleted.
:param account_id: The id of the account
:param type: type
Notes:
type - The type of the object (Study OR User etc.)
"""
request_data = {
'account_id': account_id,
'type': type,
}
errors_mapping = {}
errors_mapping['MISSING_FIELDS'] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping['NOT_PERMITTED'] = NotPermitted('You are not permitted to access this record')
query_data = {
'api': self._api,
'url': '/audit/deleted',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'objects'
return QueryOP(**query_data)
def log(
self,
bucket,
logged_params,
):
"""Log.
:param bucket: Name of the bucket to log to
:param logged_params: Dict of parameters. They are logged to a message in the bucket
"""
request_data = {
'bucket': bucket,
}
if logged_params is not None:
logged_params_dict = {'{prefix}{k}'.format(prefix='', k=k): v for k,v in logged_params.items()}
request_data.update(logged_params_dict)
errors_mapping = {}
errors_mapping['INVALID_BUCKET'] = InvalidBucket('The bucket name can only contain A-z characters and must be between 4 and 16 characters long')
errors_mapping['MISSING_FIELDS'] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
query_data = {
'api': self._api,
'url': '/audit/log',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def failedlogins(
self,
account_id,
from_time=None,
):
"""Failedlogins.
:param account_id: The id of the account
:param from_time: Only return events after the epoch time (optional)
"""
request_data = {
'from_time': from_time,
'account_id': account_id,
}
errors_mapping = {}
query_data = {
'api': self._api,
'url': '/audit/failedlogins',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
| StarcoderdataPython |
9604958 | <gh_stars>1-10
import json
import boto3
import sys
import os
def upload_to_s3(file):
with open("crds.json") as f:
setup = json.load(f)
f.close()
AWS_ACCESS_KEY_ID = setup["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = setup["AWS_SECRET_ACCESS_KEY"]
session = boto3.Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)
# Let's use Amazon S3
s3 = session.resource('s3')
bucket_name='pi-camera-images'
data = open(file,'rb')
s3.Bucket(bucket_name).put_object(Key=os.path.basename(file), Body=data)
# [START]
def main():
upload_to_s3(sys.argv[1])
if __name__ == '__main__':
main()
| StarcoderdataPython |
11361393 | <filename>object_detection/utils/ssd/ssd512_config_resnet.py<gh_stars>1-10
import numpy as np
import collections
from .ssd_utils import generate_ssd_priors
SSDBoxSizes = collections.namedtuple('SSDBoxSizes', ['min', 'max'])
SSDSpec = collections.namedtuple('SSDSpec', ['feature_map_size', 'shrinkage', 'box_sizes', 'aspect_ratios'])
image_mean = np.array([127, 127, 127]) # RGB layout
image_std = 128.0
iou_threshold = 0.45
center_variance = 0.1
size_variance = 0.2
image_size = 512
specs = [
SSDSpec(64, 8, SSDBoxSizes(20.48, 61.2), [2]),
SSDSpec(32, 16, SSDBoxSizes(61.2, 133.12), [2, 3]),
SSDSpec(16, 32, SSDBoxSizes(133.12, 215.04), [2, 3]),
SSDSpec(8, 64, SSDBoxSizes(215.04, 296.96), [2, 3]),
SSDSpec(4, 128, SSDBoxSizes(296.96, 378.88), [2, 3]),
SSDSpec(2, 256, SSDBoxSizes(378.88, 460.8), [2]),
SSDSpec(1, 512, SSDBoxSizes(460.8, 542.72), [2])
]
# specs = [
# SSDSpec(64, 8, SSDBoxSizes(35.84, 76.8), [2]),
# SSDSpec(32, 16, SSDBoxSizes(76.8, 153.6), [2, 3]),
# SSDSpec(16, 32, SSDBoxSizes(153.6, 230.4), [2, 3]),
# SSDSpec(8, 64, SSDBoxSizes(230.4, 307.2), [2, 3]),
# SSDSpec(4, 128, SSDBoxSizes(307.2, 384.0), [2, 3]),
# SSDSpec(2, 256, SSDBoxSizes(384.0, 460.8), [2]),
# SSDSpec(1, 512, SSDBoxSizes(460.8, 537.6), [2])
# ]
priors = generate_ssd_priors(specs, image_size) | StarcoderdataPython |
63300 | # Generated by Django 4.0 on 2021-12-16 19:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("episodes", "0021_auto_20210922_1012"),
]
operations = [
migrations.AddField(
model_name="episode",
name="link",
field=models.URLField(blank=True, max_length=2083, null=True),
),
]
| StarcoderdataPython |
339802 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import pandas.testing as mt
import numpy as np
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
class SeriesStringTest(ReusedSQLTestCase, SQLTestUtils):
@property
def pds1(self):
return pd.Series(['apples', 'Bananas', 'carrots', '1', '100', '',
'\nleading-whitespace', 'trailing-whitespace \t',
None, np.NaN])
def check_func(self, func):
ks1 = koalas.from_pandas(self.pds1)
mt.assert_series_equal(
func(ks1).toPandas(),
func(self.pds1),
check_names=False
)
def test_string_add_str_num(self):
pdf = pd.DataFrame(dict(col1=['a'], col2=[1]))
ds = koalas.from_pandas(pdf)
with self.assertRaises(TypeError):
ds['col1'] + ds['col2']
def test_string_add_assign(self):
pdf = pd.DataFrame(dict(col1=['a', 'b', 'c'], col2=['1', '2', '3']))
ds = koalas.from_pandas(pdf)
ds['col1'] += ds['col2']
pdf['col1'] += pdf['col2']
self.assert_eq((ds['col1']).to_pandas(), pdf['col1'])
def test_string_add_str_str(self):
pdf = pd.DataFrame(dict(col1=['a', 'b', 'c'], col2=['1', '2', '3']))
ds = koalas.from_pandas(pdf)
self.assert_eq((ds['col1'] + ds['col2']).to_pandas(), pdf['col1'] + pdf['col2'])
self.assert_eq((ds['col2'] + ds['col1']).to_pandas(), pdf['col2'] + pdf['col1'])
def test_string_add_str_lit(self):
pdf = pd.DataFrame(dict(col1=['a', 'b', 'c']))
ds = koalas.from_pandas(pdf)
self.assert_eq((ds['col1'] + '_lit').to_pandas(), pdf['col1'] + '_lit')
self.assert_eq(('_lit' + ds['col1']).to_pandas(), '_lit' + pdf['col1'])
def test_string_capitalize(self):
self.check_func(lambda x: x.str.capitalize())
def test_string_lower(self):
self.check_func(lambda x: x.str.lower())
def test_string_upper(self):
self.check_func(lambda x: x.str.upper())
def test_string_swapcase(self):
self.check_func(lambda x: x.str.swapcase())
def test_string_startswith(self):
pattern = 'car'
self.check_func(lambda x: x.str.startswith(pattern))
self.check_func(lambda x: x.str.startswith(pattern, na=False))
def test_string_endswith(self):
pattern = 's'
self.check_func(lambda x: x.str.endswith(pattern))
self.check_func(lambda x: x.str.endswith(pattern, na=False))
def test_string_strip(self):
self.check_func(lambda x: x.str.strip())
self.check_func(lambda x: x.str.strip('es\t'))
self.check_func(lambda x: x.str.strip('1'))
def test_string_lstrip(self):
self.check_func(lambda x: x.str.lstrip())
self.check_func(lambda x: x.str.lstrip('\n1le'))
self.check_func(lambda x: x.str.lstrip('s'))
def test_string_rstrip(self):
self.check_func(lambda x: x.str.rstrip())
self.check_func(lambda x: x.str.rstrip('\t ec'))
self.check_func(lambda x: x.str.rstrip('0'))
def test_string_get(self):
self.check_func(lambda x: x.str.get(6))
self.check_func(lambda x: x.str.get(-1))
def test_string_isalnum(self):
self.check_func(lambda x: x.str.isalnum())
def test_string_isalpha(self):
self.check_func(lambda x: x.str.isalpha())
def test_string_isdigit(self):
self.check_func(lambda x: x.str.isdigit())
def test_string_isspace(self):
self.check_func(lambda x: x.str.isspace())
def test_string_islower(self):
self.check_func(lambda x: x.str.islower())
def test_string_isupper(self):
self.check_func(lambda x: x.str.isupper())
def test_string_istitle(self):
self.check_func(lambda x: x.str.istitle())
def test_string_isnumeric(self):
self.check_func(lambda x: x.str.isnumeric())
def test_string_isdecimal(self):
self.check_func(lambda x: x.str.isdecimal())
| StarcoderdataPython |
80502 | <filename>bcbio/pipeline/datadict.py
"""
functions to access the data dictionary in a clearer way
"""
import toolz as tz
from bcbio.utils import file_exists
from bcbio.log import logger
import sys
LOOKUPS = {
"gtf_file": {"keys": ['genome_resources', 'rnaseq', 'transcripts'],
"checker": file_exists},
"work_dir": {"keys": ['dirs', 'work']},
"cores": {"keys": ["config", "algorithm", "num_cores"], "default": 1},
"sample_name": {"keys": ['rgnames', 'sample']},
"strandedness": {"keys": ['config', 'algorithm', 'strandedness'],
"default": "unstranded"},
"work_bam": {"keys": ["work_bam"]},
"ref_file": {"keys": ["reference", "fasta", "base"]},
"dexseq_gff": {"keys": ['genome_resources', 'rnaseq', 'dexseq']},
"fusion_mode": {"keys": ['config', 'algorithm', 'fusion_mode']},
"dexseq_counts": {"keys": ['dexseq_counts']},
"description": {"keys": ['description']},
"quality_format": {"keys": ['config', 'algorithm', 'quality_format'],
"default": "standard"},
"adapters": {"keys": ['config', 'algorithm', 'adapters'],
"default": []},
"qsig_file": {"keys": ['genome_resources', 'variation', 'qsignature'],
"checker": file_exists},
"mixup_check": {"keys": ["config", "algorithm", "mixup_check"],
"default": False}
}
def getter(keys, global_default=None):
def lookup(config, default=None):
default = global_default if not default else default
return tz.get_in(keys, config, default)
return lookup
def setter(keys, checker):
def update(config, value):
if checker and not checker(value):
logger.error("%s fails check %s." % (value, checker))
sys.exit(1)
return tz.update_in(config, keys, lambda x: value, default=value)
return update
_g = globals()
for k, v in LOOKUPS.items():
keys = v['keys']
_g["get_" + k] = getter(keys, v.get('default', None))
_g["set_" + k] = setter(keys, v.get('checker', None))
| StarcoderdataPython |
1928319 | #!/usr/bin/env python
__author__ = '<EMAIL>'
import os
import sys
from common import Smali
from common import Java
from common import File
from common import DataFile
from common import AccessSmali
class NameToNum:
def __init__(self, name, path, smaliList):
self.name = name
self.path = path
self.smaliList = smaliList
self.accessSmaliSet = {}
self.getAccessSmaliSet()
def getAccessSmaliSet(self):
dFile = open(os.path.join(self.path, self.name+".data"), 'r')
for line in dFile.readlines():
tList = line.split()
sName = tList[0]
name = tList[1]
num = tList[2]
if (sName not in self.accessSmaliSet.keys()):
self.accessSmaliSet[sName] = AccessSmali(sName)
self.accessSmaliSet[sName].readMethod(name, num)
dFile.close()
def printAccessSmaliSet(self):
for sName in self.accessSmaliSet.keys():
self.accessSmaliSet[sName].printMethodNameSet()
def doNameToNum(self):
allMethodCallNameMap = {}
for aSmali in self.accessSmaliSet.keys():
self.accessSmaliSet[aSmali].createNameMap()
callNameMap = self.accessSmaliSet[aSmali].methodCallNameMap
for callName in callNameMap.keys():
if callName not in allMethodCallNameMap.keys():
allMethodCallNameMap[callName] = callNameMap[callName]
else:
raise ValueError("method call name map duplicate")
for s in self.smaliList:
sFile = File(os.path.join(self.path, s))
sName = Smali.getSmaliName(s)
if sName in self.accessSmaliSet.keys():
sFile.replaces(self.accessSmaliSet[sName].methodDefNameMap)
sFile.replaces(allMethodCallNameMap)
#End of NameToNum
def NameToNumForOneFile(path):
if Smali.isSmali(path):
path = Smali.getDataFilePath(path) #change smali path to data file path
if DataFile.isDataFile(path) and os.path.exists(path):
fDir = os.path.dirname(path)
if cmp(fDir, "") == 0:
fDir = "."
name = DataFile.getDataFileName(path)
else:
return
java = Java(fDir, name)
#java.printJava()
if java.getListLen() == 0:
print "Can not find data file: "+os.path.join(java.path, java.name)+".data"
return
if False: print "NameToNum: "+os.path.join(java.path, java.name)+".data"
toNum = NameToNum(java.name, java.path, java.smaliList)
#toNum.printAccessSmaliSet()
toNum.doNameToNum()
os.remove(path)
def Usage():
print "Usage: name2num.py aa/bb/A.data"
print " name2num.py aa/bb/A.smali"
print " name2num.py aa/bb"
if __name__ == '__main__':
argLen = len(sys.argv)
if argLen == 2:
path = sys.argv[1]
if os.path.isfile(path) and (DataFile.isDataFile(path) or Smali.isSmali(path)):
NameToNumForOneFile(path)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
for sfile in files:
fPath = os.path.join(root, sfile)
if DataFile.isDataFile(fPath):
NameToNumForOneFile(fPath)
else:
Usage()
else:
Usage()
| StarcoderdataPython |
9790067 | from django.db import migrations, models
import mayan.apps.documents.models.document_version_models
import mayan.apps.storage.classes
class Migration(migrations.Migration):
dependencies = [
('documents', '0047_auto_20180917_0737'),
]
operations = [
migrations.AlterField(
model_name='documentversion',
name='file',
field=models.FileField(
storage=mayan.apps.storage.classes.FakeStorageSubclass(),
upload_to=mayan.apps.documents.models.document_version_models.UUID_FUNCTION,
verbose_name='File'
),
),
]
| StarcoderdataPython |
11234964 | from django.db import models
# Create your models here.
class Book(models.Model):
pass
class Shelf(models.Model):
pass
| StarcoderdataPython |
8186249 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 15:43:09 2020
@author: meco
"""
import sys
import unittest
import inspect
import io
import pep8
from datetime import datetime
from contextlib import redirect_stdout
from models.user import User
class TestUser(unittest.TestCase):
"""
class for testing User class' methods
"""
@classmethod
def setUpClass(cls):
"""
Set up class method for the doc tests
"""
cls.setup = inspect.getmembers(User, inspect.isfunction)
def test_pep8_conformance_User(self):
"""
Test that user.py file conform to PEP8
"""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['models/user.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_user(self):
"""
Test that test_user.py file conform to PEP8
"""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['tests/test_models/test_user.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_module_docstring(self):
"""
Tests if module docstring documentation exist
"""
self.assertTrue(len(User.__doc__) >= 1)
def test_class_docstring(self):
"""
Tests if class docstring documentation exist
"""
self.assertTrue(len(User.__doc__) >= 1)
def test_func_docstrings(self):
"""
Tests if methods docstring documntation exist
"""
for func in self.setup:
self.assertTrue(len(func[1].__doc__) >= 1)
def setUp(self):
"""Set up method for User class
"""
self.Us = User()
def tearDown(self):
"""Initialized User class
"""
self.Us = None
def test_type(self):
"""type checks for user model
"""
self.assertEqual(issubclass(self.Us.__class__, User), True)
self.assertEqual(isinstance(self.Us, User), True)
self.assertEqual(isinstance(self.Us, User), True)
self.assertEqual(type(self.Us), User)
def test_basic_attribute_set(self):
"""Basic attribute tests for user model
"""
self.Us.first_name = 'Meco'
self.Us.last_name = 'Montes'
self.assertEqual(self.Us.first_name, 'Meco')
self.assertEqual(self.Us.last_name, 'Montes')
def test_email(self):
"""tests the user's email attribute
"""
self.assertEqual(type(User.email), str)
def test_password(self):
"""tests the user's password attribute
"""
self.assertEqual(type(User.password), str)
def test_first_name(self):
"""tests the user's first_name attribute
"""
self.assertEqual(type(User.first_name), str)
def test_last_name(self):
"""tests the user's last_name attribute
"""
self.assertEqual(type(User.last_name), str)
def test_string_return(self):
"""tests the str method
"""
string = str(self.Us)
Usid = "[User] ({})".format(self.Us.id)
test = Usid in string
self.assertEqual(True, test)
test = "updated_at" in string
self.assertEqual(True, test)
test = "created_at" in string
self.assertEqual(True, test)
test = "datetime.datetime" in string
self.assertEqual(True, test)
def test_to_dict(self):
"""tests the to_dict method
"""
my_dict = self.Us.to_dict()
self.assertEqual(str, type(my_dict['created_at']))
self.assertEqual(my_dict['created_at'],
self.Us.created_at.isoformat())
self.assertEqual(datetime, type(self.Us.created_at))
self.assertEqual(my_dict['__class__'],
self.Us.__class__.__name__)
self.assertEqual(my_dict['id'], self.Us.id)
def test_from_dict_basic(self):
"""tests the from_dict method
"""
my_dict = self.Us.to_dict()
Us1 = User(**my_dict)
self.assertEqual(Us1.id, self.Us.id)
self.assertEqual(Us1.updated_at, self.Us.updated_at)
self.assertEqual(Us1.created_at, self.Us.created_at)
self.assertEqual(Us1.__class__.__name__,
self.Us.__class__.__name__)
def test_from_dict_hard(self):
"""tests from dict method of user inherited from base_model
"""
self.Us.student = 'Science'
my_dict = self.Us.to_dict()
self.assertEqual(my_dict['student'], 'Science')
Us1 = User(**my_dict)
self.assertEqual(Us1.created_at, self.Us.created_at)
| StarcoderdataPython |
1879523 | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.graph_utils import create_op_node_with_second_input
from mo.graph.graph import Graph
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.unsqueeze import Unsqueeze
class MXTileReplacer(MiddleReplacementPattern):
"""
Aligns Tile operation from MxNet framework with OpenVINO Tile
MxNet has no restrictions for `tile_array` input of `Tile` operation.
If len(tile_array) > rank(data), this transformation will insert Unsqueeze before Tile operation,
because in this case output_shape > input_shape
DOC link: https://beta.mxnet.io/api/ndarray/_autogen/mxnet.ndarray.tile.html#mxnet.ndarray.tile
"""
enabled = True
def pattern(self):
return dict(
nodes=[
('tile', dict(kind='op', op='Tile'))
],
edges=[]
)
@staticmethod
def replace_pattern(graph: Graph, match: dict):
node = match['tile']
name = node.soft_get('name', node.id)
in_shape = node.in_port(0).data.get_shape()
out_shape = node.out_port(0).data.get_shape()
tile_array_diff = len(out_shape) - len(in_shape)
if tile_array_diff == 0:
return
assert tile_array_diff > 0,\
'Unexpected difference between rank(input) and rank(output) for node {}'.format(name)
unsqueeze_dims = int64_array(range(tile_array_diff))
unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, unsqueeze_dims,
dict(name=name + '/Unsqueeze', override_output_shape=True))
node.in_port(0).get_connection().insert_node(unsqueeze)
| StarcoderdataPython |
4981929 | <reponame>roemmele/answerquest<filename>qna_server/server.py
import sys
import os
root_dir = os.path.realpath('../')
sys.path.insert(0, root_dir)
import warnings
warnings.filterwarnings('ignore')
import argparse
import json
from flask import Flask, request, render_template, jsonify
from answerquest import QnAPipeline
app = Flask(__name__)
qna_pipeline = None
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/qg_with_qa', methods=['POST'])
def serve_qna_items():
try:
input_text = request.values['input_text']
(sent_idxs,
questions,
answers) = qna_pipeline.generate_qna_items(input_text,
filter_duplicate_answers=True,
filter_redundant=True,
sort_by_sent_order=True)
except Exception as e:
print(e)
questions = ['''An error occurred. Perhaps either no Q&A pairs found because text is too short, \
or a memory error occurred because the text is too long. Try using a different text.''']
answers = [""]
return jsonify({'questions': questions, 'answers': answers})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run server that generates Q&A pairs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--qg_tokenizer_path", "-qg_tokenizer_path",
help="Specify path to HuggingFace GPT2Tokenizer config files.",
type=str, required=True)
parser.add_argument("--qg_model_path", "-qg_model_path",
help="Specify path to PyTorch question generation model.",
type=str, required=True)
parser.add_argument("--qa_model_path", "-qa_model_path",
help="Specify path to PyTorch question answering model.",
type=str, required=True)
parser.add_argument("--port", "-port",
help="Specify port number for server.",
type=int, required=False, default=8082)
args = parser.parse_args()
qna_pipeline = QnAPipeline(args.qg_tokenizer_path,
args.qg_model_path,
args.qa_model_path)
app.run(port=args.port, host="0.0.0.0", debug=True)
| StarcoderdataPython |
8080140 | <reponame>LXie502/point_based_pcgc
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
class HyperEncoder(nn.Module):
def __init__(self, input_dim):
super(HyperEncoder, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, input_dim)
self.fc2 = nn.Linear(input_dim, input_dim//4)
self.fc3 = nn.Linear(input_dim//4, input_dim//32)
def forward(self, x):
x = torch.abs(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| StarcoderdataPython |
1638608 | import random
import numpy as np
from sum_tree import SumTree
class Memory:
def __init__(self, tree_memory_length, error_multiplier=0.01, alpha=0.6, beta=0.4, beta_increment_per_sample=0.001):
self.tree = SumTree(tree_memory_length)
self.tree_memory_length = tree_memory_length
self.error_multiplier = error_multiplier
self.per_alpha = alpha
self.per_beta_init = beta
self.beta_increment_per_sample = beta_increment_per_sample
def _get_priority(self, error):
return (np.abs(error) + self.error_multiplier) ** self.per_alpha
def add_sample_to_tree(self, error, sample):
priority = self._get_priority(error)
self.tree.add(priority, sample)
def sample_tree(self, num_samples):
batch = []
idxs = []
segment = self.tree.sum_of_tree() / num_samples
priorities = []
self.beta = np.min([1.0, self.per_beta_init + self.beta_increment_per_sample])
for i in range(num_samples):
a = segment * i
b = segment * (i + 1)
sample = random.uniform(a, b)
idx, priority, data = self.tree.get_sample(sample)
priorities.append(priority)
batch.append(data)
idxs.append(idx)
sampling_prob = priorities / self.tree.sum_of_tree()
is_weight = np.power(self.tree.num_entries * sampling_prob, -self.beta)
is_weight /= is_weight.max()
return batch, idxs, is_weight
def update_tree(self, idx, error):
priority = self._get_priority(error)
self.tree.update_priority(idx, priority)
| StarcoderdataPython |
6529847 | <filename>gradchange.py
import argparse
__author__ = 'MattC'
parser = argparse.ArgumentParser(description= \
'Script to batch plot all Itime plots, keyword time')
parser.add_argument('-path', help='Path where files to be plotted are', required=True)
args = parser.parse_args()
print ("The path you want to use is %s" % args.path)
#print ('This will plot %s with %s on the x-axis and %s on the y-axis' % (args.f, args.x, args.y))
path = args.path
assert path.endswith('/'), 'Path needs to end with /'
import os
import xlrd
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
#print (os.path.abspath(path))
targetdir = (path + 'Annotated_Images') #need to make the full path externally to the otherwise the if stmnt doesn't work - makedirs creates the dir in it's home folder
if not os.path.exists(targetdir):
print ('Doesn\'t think it is there')
os.makedirs(targetdir)
print('The folder %s was created' % targetdir)
else:
print ('Thinks it is there')
print('The folder %s already exists' % targetdir)
for root,dir,files in os.walk(path, topdown=False): #the topdown tells the script to look at the collection of files directly in path last
xlsfiles=[f for f in files if 'time' in f] #this ensures break files are not pulled through
#print (xlsfiles)
def reducelist(a):
reducedlist = []
roundedlist = []
for i in a:
if int(i) not in set(roundedlist):
reducedlist.append(i)
roundedlist.append(int(i))
return reducedlist
labeldict = {'DrainI' : "Current $I_{ds}$ (A)", 'DrainV' : "Voltage $V_{ds}$ (V)", 'GateV' : "GateV (V)", 'Time' : "Time (s)"} #defines the label wanted for each type of plot
for f in xlsfiles:
try:
wb = xlrd.open_workbook(os.path.join(root, f))
s = (len(wb.sheets()) - 2)
sheet1 = wb.sheet_by_index(0)
rip = [[sheet1.cell_value(r, c) for c in range(sheet1.ncols)] for r in range(sheet1.nrows)]
data = rip
ripasarray = np.asarray(rip)
del data[0]
datanew = np.asarray(data)
x = 'Time'
y = 'DrainI'
xcol = np.where( x == ripasarray[0,:])
xcolint = xcol[0][0]
ycol = np.where( y == ripasarray[0,:])
ycolint = ycol[0][0]
timevalues = datanew[:, xcol]
currentvalues = datanew[:, ycol]
T = []
I = []
for i in timevalues:
T.append(i[0][0])
npT = np.array(T)
for i in currentvalues:
I.append(i[0][0])
npI = np.array(I)
dT = np.gradient(npT)
npdI = np.gradient(npI, dT, edge_order=2) #just do this with T and dT plot dI/dT then for T then filter the noise out the outliers are the PC response
sigma = np.std(npdI)
mean = np.mean(npdI)
dI = npdI.tolist()
#for i in npdI:
#dI.append(i[0])
#dIvsT = np.column_stack(T, dI)\
biggrads = []
for i,j in zip(T,dI):
if j > (mean + (3 * sigma)) and i > 5:
biggrads.append(i)
biggradstouse = reducelist(biggrads)
Iatbig = []
for i,j in zip(T,I):
if i in biggradstouse:
Iatbig.append(j)
smallgrads = []
for i,j in zip(T,dI):
if j < (mean - (3 * sigma)) and i > 5:
smallgrads.append(i)
smallgradstouse = reducelist(smallgrads)
Iatsmall = []
for i,j in zip(T,I):
if i in smallgradstouse:
Iatsmall.append(j)
#plt.locator_params(axis='y', nbins=20) #don't know if this is doing anything
figsingle = plt.figure()
axsingle = figsingle.add_subplot(1, 1, 1)
axsingle.set_prop_cycle(cycler('color', ['r', 'r']) + cycler('linestyle', ['-', '--']))
axsingle.plot( datanew[:, xcolint], datanew[:, ycolint]) # does placement of this matter
maxval = max(datanew[:, ycolint])
minval = min(datanew[:, ycolint])
majorLocator = MultipleLocator((maxval-minval)/20) #OR IF THIS IS
axsingle.yaxis.set_major_locator(majorLocator)
#axsingle.plot( datanew[:, xcolint], datanew[:, ycolint]) # does placement of this matter
axsingle.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
axsingle.tick_params(axis='both', direction='out', top='off', right='off')
axsingle.set_xlabel(labeldict[x])
#axsingle.set_xlabel(filestoplot[f]['x'])
axsingle.set_ylabel(labeldict[y])
#axsingle.set_ylabel(filestoplot[f]['y'])
axsingle.set_title(f, y = 1.05)
axsingle.axis('tight')
axsingle.axhline(y=0, color='k', linestyle='solid')
axsingle.axvline(x=0, color='k', linestyle='solid')
axsingle.grid('on', 'both', 'y')
for i,j in zip(biggradstouse, Iatbig):
axsingle.annotate(('%.4E' % j), xy = (i,j), xytext = (i,(j + 3 * ((maxval-minval)/20))), arrowprops=dict(facecolor='black', shrink=0.05), annotation_clip = False)
for i,j in zip(smallgradstouse, Iatsmall):
axsingle.annotate(('%.4E' % j), xy = (i,j), xytext = (i,(j + 3 * ((maxval-minval)/20))), arrowprops=dict(facecolor='black', shrink=0.05), annotation_clip = False)
figsingle.savefig('%sAnnotated_Images/%s_annotated.png' % (path, f))
plt.close()
except Exception as e:
print(f)
print(e)
continue
| StarcoderdataPython |
3332348 | <reponame>dylanlee101/leetcode
'''
根据每日 气温 列表,请重新生成一个列表,对应位置的输出是需要再等待多久温度才会升高超过该日的天数。如果之后都不会升高,请在该位置用 0 来代替。
例如,给定一个列表 temperatures = [73, 74, 75, 71, 69, 72, 76, 73],你的输出应该是 [1, 1, 4, 2, 1, 1, 0, 0]。
提示:气温 列表长度的范围是 [1, 30000]。每个气温的值的均为华氏度,都是在 [30, 100] 范围内的整数。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/daily-temperatures
'''
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
length = len(T)
ans = [0] * length
stack = []
for i in range(length):
temperature = T[i]
while stack and temperature > T[stack[-1]]:
prev_index = stack.pop()
ans[prev_index] = i - prev_index
stack.append(i)
return ans | StarcoderdataPython |
3228307 | <filename>main.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pickle
import json
import datetime
import pprint
import copy
from entity_linking import *
from violation_detection import *
class NumpyArrayEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyArrayEncoder, self).default(obj)
def get_slot_values(tokens, labels):
assert len(tokens) == len(labels), f'tokens:{tokens}\nlabels:{labels}'
ans = dict()
active_type = None
active_value = ''
for i in range(len(tokens)):
if labels[i] == 'O':
if active_type is not None:
if active_type not in ans:
ans[active_type] = []
ans[active_type].append(active_value)
active_type = None
active_value = ''
else:
if active_type is None:
active_type = labels[i]
active_value = tokens[i]
elif active_type == labels[i]:
active_value = active_value + ' ' + tokens[i]
elif active_type != labels[i]:
if active_type not in ans:
ans[active_type] = []
ans[active_type].append(active_value)
active_type = labels[i]
active_value = tokens[i]
else:
assert False
if active_type is not None:
if active_type not in ans:
ans[active_type] = []
ans[active_type].append(active_value)
return ans
def get_slot_values_BIO(tokens, labels):
assert len(tokens) == len(labels), f'tokens:{tokens}\nlabels:{labels}'
rectified_labels = []
for l in labels:
if l.startswith('I-'):
if rectified_labels == []:
rectified_labels.append('B-' + l[2:])
elif rectified_labels[-1][2:] == l[2:]:
rectified_labels.append(l)
else:
rectified_labels.append('B-' + l[2:])
else:
rectified_labels.append(l)
labels = rectified_labels
ans = dict()
active_type = None
active_value = ''
for i in range(len(tokens)):
if labels[i] == 'O':
if active_type is not None:
if active_type not in ans:
ans[active_type] = []
ans[active_type].append(active_value)
active_type = None
active_value = ''
elif labels[i].startswith('B-'):
if active_type is not None:
if active_type not in ans:
ans[active_type] = []
ans[active_type].append(active_value)
active_type = labels[i][2:]
active_value = tokens[i]
elif labels[i].startswith('I-'):
if active_type == labels[i][2:]:
active_value = active_value + ' ' + tokens[i]
else:
print(tokens, labels, i, active_type, labels[i][2:])
assert False
else:
assert False
if active_type is not None:
if active_type not in ans:
ans[active_type] = []
ans[active_type].append(active_value)
return ans
def get_constraints(bot_definition):
distinct_slot_values = bot_definition['distinct_slot_values']
closed_type_constraints = []
for slot_var in distinct_slot_values:
the_constraint = {'name': f"closedType_{slot_var}",
"isGlobal": True,
"slots": [slot_var],
"disjunctions": [
[{
"slotVariable": slot_var,
"operator": "IN",
"values": distinct_slot_values[slot_var]
}]
]}
closed_type_constraints.append(the_constraint)
# Mapping from intents to constraints
## Initialization
all_constraints = bot_definition["constraints"] + closed_type_constraints
constraints_for_intent = {i:[] for i in bot_definition['main_intents']}
## Map constraints to intents
for i in bot_definition["main_intents"]:
for constraint in all_constraints:
if constraint['isGlobal'] or i in constraint['intents']:
constraints_for_intent[i].append(constraint)
return all_constraints, constraints_for_intent
def run_pipeline(entity_linking_method, entity_linking_threshold=None, is_BIO=False):
conversations = dict()
dialog_states = dict()
latest_intents = dict()
for ids, output in zip(test_ids, nlu_result):
if not is_BIO:
conver_id, turn_id, sentence_id = ids[0], ids[1], ids[2]
else:
conver_id, turn_id = ids[0], ids[1]
if conver_id not in conversations:
conversations[conver_id] = []
dialog_states[conver_id] = {None: {}}
latest_intents[conver_id] = [None]
predicted_intents, texts = output.split('->')
intents = predicted_intents[1:-2].split('<div>')
tokens = texts.strip().split(' ')
word_list = []
label_list = []
for t in tokens:
if t.startswith('[') and t.endswith(']') and ':' in t:
colon_idx = t.rindex(':')
word_list.append(t[1:colon_idx])
the_label = t[colon_idx + 1:-1]
# assert the_label in bot_definition['slots']
label_list.append(the_label)
else:
word_list.append(t)
label_list.append('O')
if is_BIO:
slot_values = get_slot_values_BIO(word_list, label_list)
else:
slot_values = get_slot_values(word_list, label_list)
entity_values = entity_linking_list(slot_values, distinct_slot_values, method=entity_linking_method,
threshold=entity_linking_threshold)
main_intents = [intent for intent in intents if intent in bot_definition['main_intents']]
# Update dialog states
if main_intents != []:
for intent in main_intents:
if intent not in dialog_states[conver_id]:
dialog_states[conver_id][intent] = dict()
dialog_states[conver_id][intent].update(entity_values)
latest_intents[conver_id] = main_intents
else:
for intent in latest_intents[conver_id]:
dialog_states[conver_id][intent].update(entity_values)
sentence_object = {
'conversationId': conver_id,
'turnNumber': turn_id,
'utterance': ' '.join(word_list),
'intents': predicted_intents[1:-2],
'main_intents': main_intents,
'slot_values': slot_values,
'entity_values': entity_values,
'dialog_states': copy.deepcopy(dialog_states[conver_id])
}
if not is_BIO:
sentence_object['sentenceNumber'] = sentence_id
sentence_object['utteranceId'] = f'<CONV>{conver_id}<TURN>{turn_id}<SENT>{sentence_id}'
else:
sentence_object['utteranceId'] = f'<CONV>{conver_id}<TURN>{turn_id}'
# Violation detection
violations_all = set()
violation_details = []
for intent, state in sentence_object['dialog_states'].items():
if intent is None:
continue
related_constraints = constraints_for_intent[intent]
for constraint in related_constraints:
is_applicable, violations = find_violations(constraint, state, distinct_slot_values)
if is_applicable and len(violations) > 0: # No violations
violations_all.add((intent, constraint['name']))
violation_details.append(
{'intent': intent, 'constraint': constraint['name'], 'violations': violations})
sentence_object['violations'] = list(violations_all)
sentence_object['violation_details'] = violation_details
conversations[conver_id].append(sentence_object)
entity_linking_results = get_linking_results_of_method(test_data, conversations, distinct_slot_values, entity_linking_method, threshold=entity_linking_threshold)
pipeline_results = evaluation(test_data, conversations)
pipeline_results_no_intent = evaluation_no_intent(test_data, conversations)
return conversations, entity_linking_results, pipeline_results, pipeline_results_no_intent
def run_pipeline_probabilistic(entity_linking_method, entity_linking_threshold = None, softmaxtemp = 0.1):
conversations = dict()
dialog_states = dict()
dialog_states_slots = dict()
dialog_states_probs = dict()
latest_intents = dict()
for ids, output in zip(test_ids, nlu_result):
if not is_BIO:
conver_id, turn_id, sentence_id = ids[0], ids[1], ids[2]
else:
conver_id, turn_id = ids[0], ids[1]
if conver_id not in conversations:
conversations[conver_id] = []
dialog_states[conver_id] = {None:{}}
dialog_states_slots[conver_id] = {None:{}}
dialog_states_probs[conver_id] = {None:{}}
latest_intents[conver_id] = [None]
predicted_intents, texts = output.split('->')
intents = predicted_intents[1:-2].split('<div>')
tokens = texts.strip().split(' ')
word_list = []
label_list = []
for t in tokens:
if t.startswith('[') and t.endswith(']') and ':' in t:
colon_idx = t.rindex(':')
word_list.append(t[1:colon_idx])
the_label = t[colon_idx+1:-1]
# assert the_label in bot_definition['slots']
label_list.append(the_label)
else:
word_list.append(t)
label_list.append('O')
if is_BIO:
slot_values = get_slot_values_BIO(word_list, label_list)
else:
slot_values = get_slot_values(word_list, label_list)
entity_values = entity_linking_list(slot_values, distinct_slot_values, method = entity_linking_method, threshold = entity_linking_threshold)
entity_probs = entity_linking_list_probs(slot_values, distinct_slot_values, softmaxtemp, method = entity_linking_method, threshold = entity_linking_threshold)
main_intents = [intent for intent in intents if intent in bot_definition['main_intents']]
# Update dialog states
if main_intents != []:
for intent in main_intents:
if intent not in dialog_states[conver_id]:
dialog_states[conver_id][intent] = dict()
dialog_states_slots[conver_id][intent] = dict()
dialog_states_probs[conver_id][intent] = dict()
dialog_states[conver_id][intent].update(entity_values)
dialog_states_slots[conver_id][intent].update(slot_values)
dialog_states_probs[conver_id][intent].update(entity_probs)
latest_intents[conver_id] = main_intents
else:
for intent in latest_intents[conver_id]:
dialog_states[conver_id][intent].update(entity_values)
dialog_states_slots[conver_id][intent].update(slot_values)
dialog_states_probs[conver_id][intent].update(entity_probs)
sentence_object = {
'conversationId': conver_id,
'turnNumber': turn_id,
'utterance': ' '.join(word_list),
'intents': predicted_intents[1:-2],
'main_intents': main_intents,
'slot_values': slot_values,
'entity_values': entity_values,
'entity_probs': entity_probs,
'dialog_states': copy.deepcopy(dialog_states[conver_id]),
'dialog_states_slots': copy.deepcopy(dialog_states_slots[conver_id]),
'dialog_states_probs': copy.deepcopy(dialog_states_probs[conver_id])
}
if not is_BIO:
sentence_object['sentenceNumber'] = sentence_id
sentence_object['utteranceId'] = f'<CONV>{conver_id}<TURN>{turn_id}<SENT>{sentence_id}'
else:
sentence_object['utteranceId'] = f'<CONV>{conver_id}<TURN>{turn_id}'
# Violation detection
violations_all = set()
violation_details = []
for intent, state in sentence_object['dialog_states_probs'].items():
if intent is None:
continue
related_constraints = constraints_for_intent[intent]
for constraint in related_constraints:
is_applicable, violations = find_violations_probs(constraint, state, distinct_slot_values)
if is_applicable and any(v >= 0.5 for v in violations): # No violations
violations_all.add((intent,constraint['name']))
violation_details.append({'intent': intent, 'constraint': constraint['name'], 'violations': violations})
sentence_object['violations'] = list(violations_all)
sentence_object['violation_details'] = violation_details
conversations[conver_id].append(sentence_object)
pipeline_results = evaluation(test_data, conversations)
pipeline_results_no_intent = evaluation_no_intent(test_data, conversations)
return conversations, None, pipeline_results, pipeline_results_no_intent
def calculate_prf_one_group(all_gts, all_pds):
common = all_gts.intersection(all_pds)
try:
precision = len(common) / len(all_pds)
except:
precision = None
try:
recall = len(common) / len(all_gts)
except:
recall = None
try:
f1 = 2 * precision * recall / (precision + recall)
except:
f1 = None
return {
'precision': precision,
'recall': recall,
'f1': f1
}
def calculate_prf(all_gts, all_pds):
ans = calculate_prf_one_group(all_gts, all_pds)
constraint_stats = {}
for constraint in all_constraints:
cname = constraint['name']
this_gts = set([t for t in all_gts if t[-1] == cname])
this_pds = set([t for t in all_pds if t[-1] == cname])
constraint_stats[cname] = calculate_prf_one_group(this_gts, this_pds)
ans['constraint_stats'] = constraint_stats
return ans
def calculate_both_prfs(all_gts, all_pds, all_dependent_gts = None, all_dependent_pds = None):
final_ans = {}
final_ans['independent'] = calculate_prf(all_gts, all_pds)
if (all_dependent_gts is not None) and (all_dependent_pds is not None):
final_ans['dependent'] = calculate_prf(all_dependent_gts, all_dependent_pds)
return final_ans
def evaluation(true_convers, predict_convers):
assert len(true_convers) == len(predict_convers)
all_turn_pds = set()
all_turn_gts = set()
all_turn_dependent_pds = set()
all_turn_dependent_gts = set()
exact_match_turn = []
iou_turn = []
conversation_all_correct = []
for key in true_convers:
assert key in predict_convers
assert len(true_convers[key]) == len(predict_convers[key])
conversation_correct = 1
# predicted_state_tracker = {}
# groundtruth_violation_tracker = {}
for idx in range(len(true_convers[key])):
gt = true_convers[key][idx]
pd = predict_convers[key][idx]
assert gt['utteranceId'] == pd['utteranceId']
for i, v in pd['violations']:
all_turn_pds.add((pd['utteranceId'], i, v))
if i in gt['dialog_states']:
all_turn_dependent_pds.add((pd['utteranceId'], i, v))
for i, v in gt['violations']:
all_turn_gts.add((gt['utteranceId'], i, v))
if i in pd['dialog_states']:
all_turn_dependent_gts.add((gt['utteranceId'], i, v))
gt_vios = set([tuple(x) for x in gt['violations']])
pd_vios = set(pd['violations'])
if gt_vios == pd_vios:
this_iou = 1
else:
this_iou = len(gt_vios.intersection(pd_vios)) / len(gt_vios.union(pd_vios))
iou_turn.append(this_iou)
this_exact_match = 1 if this_iou == 1 else 0
exact_match_turn.append(this_exact_match)
if not this_exact_match:
conversation_correct = 0
conversation_all_correct.append(conversation_correct)
result_this_iteration = calculate_both_prfs(all_turn_gts, all_turn_pds, all_turn_dependent_gts,
all_turn_dependent_pds)
pprint(result_this_iteration)
result_this_iteration['exact_match'] = np.mean(exact_match_turn)
result_this_iteration['iou'] = np.mean(iou_turn)
result_this_iteration['conversation_correct'] = np.mean(conversation_all_correct)
return result_this_iteration
def evaluation_no_intent(true_convers, predict_convers):
assert len(true_convers) == len(predict_convers)
all_turn_pds = set()
all_turn_gts = set()
exact_match_turn = []
iou_turn = []
conversation_all_correct = []
for key in true_convers:
assert key in predict_convers
assert len(true_convers[key]) == len(predict_convers[key])
conversation_correct = 1
for idx in range(len(true_convers[key])):
gt = true_convers[key][idx]
pd = predict_convers[key][idx]
assert gt['utteranceId'] == pd['utteranceId']
for i, v in pd['violations']:
all_turn_pds.add((pd['utteranceId'], v))
for i, v in gt['violations']:
all_turn_gts.add((gt['utteranceId'], v))
gt_vios = set([x[1] for x in gt['violations']]) # x[1] is violation, while x[0] is intent
pd_vios = set([x[1] for x in pd['violations']]) # x[1] is violation, while x[0] is intent
if gt_vios == pd_vios:
this_iou = 1
else:
this_iou = len(gt_vios.intersection(pd_vios)) / len(gt_vios.union(pd_vios))
iou_turn.append(this_iou)
this_exact_match = 1 if this_iou == 1 else 0
exact_match_turn.append(this_exact_match)
if not this_exact_match:
conversation_correct = 0
conversation_all_correct.append(conversation_correct)
result_this_iteration = calculate_both_prfs(all_turn_gts, all_turn_pds)
pprint(result_this_iteration)
result_this_iteration['exact_match'] = np.mean(exact_match_turn)
result_this_iteration['iou'] = np.mean(iou_turn)
result_this_iteration['conversation_correct'] = np.mean(conversation_all_correct)
return result_this_iteration
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", default=None, required=True, type=str, help="The name of the task to test")
parser.add_argument("--nlu_result_path", default=None, required=True, type=str, help="Path to nlu result file")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--is_bio", action="store_true", default=False, help="Whether slot labels have BIO schema")
parser.add_argument("--pipeline", default="deterministic", type=str, help="deterministic or probabilistic")
parser.add_argument("--softmaxtemp", default=0.1, type=float, help="Temperature of the softmax function for the probabilistic pipeline")
parser.add_argument("--result_folder", default=None, required=True, type=str, help="Folder to save run results")
args = parser.parse_args()
# Important file paths
test_data_path = f'{args.data_dir}/{args.task}/test/test.json'
test_ids_path = f'{args.data_dir}/{args.task}/test/ids.txt'
nlu_result_path = args.nlu_result_path
bot_definition_path = f'{args.data_dir}/{args.task}/bot_definition_{args.task}.json'
is_BIO = args.is_bio
# Load files
bot_definition = json.load(open(bot_definition_path, 'r'))
test_data = json.load(open(test_data_path, 'r'))
nlu_result = [line.strip() for line in open(nlu_result_path, 'r')]
test_ids = [line.strip().split('\t') for line in open(test_ids_path, 'r')]
assert len(test_ids) == len(nlu_result)
# Constraints info
distinct_slot_values = bot_definition['distinct_slot_values']
all_constraints, constraints_for_intent = get_constraints(bot_definition)
# Run the pipeline (deterministic or probabilistic)
final_results = {'settings':vars(args)}
predictions = {}
if args.pipeline == 'deterministic':
method_list = ['exact', 'bijaccard', 'edit_distance',
'roberta_mnli', ('roberta_mnli', 0.2), 'average_three', ('average_three', 0.5)]
all_entity_linking_results = dict()
all_pipeline_results = dict()
all_pipeline_results_no_intent = dict()
for m in method_list:
print(m)
if isinstance(m, str):
met = m
threshold = None
method_name = met
else:
met, threshold = m[0], m[1]
method_name = f"{met}_{threshold}"
conversations, entity_linking_results, pipeline_results, pipeline_results_no_intent = run_pipeline(met, threshold, is_BIO)
all_entity_linking_results[method_name] = entity_linking_results
all_pipeline_results[method_name] = pipeline_results
all_pipeline_results_no_intent[method_name] = pipeline_results_no_intent
predictions[method_name] = conversations
pprint(entity_linking_results)
print('-' * 10)
pprint(pipeline_results_no_intent)
print('=' * 50)
final_results['entity_linking_results'] = all_entity_linking_results
final_results['violation_detection_results'] = all_pipeline_results
final_results['violation_detection_results_no_intent'] = all_pipeline_results_no_intent
elif args.pipeline == 'probabilistic':
method_list = ['bijaccard', 'edit_distance_norm',
'roberta_mnli', ('roberta_mnli', 0.2), 'average_three', ('average_three', 0.5)]
all_pipeline_results_probs = dict()
all_pipeline_results_probs_no_intent = dict()
for m in method_list:
print(m)
if isinstance(m, str):
met = m
threshold = None
method_name = met
else:
met, threshold = m[0], m[1]
method_name = f"{met}_{threshold}"
conversations, _, pipeline_results, pipeline_results_no_intent = run_pipeline_probabilistic(met, threshold, args.softmaxtemp)
all_pipeline_results_probs[method_name] = pipeline_results
all_pipeline_results_probs_no_intent[method_name] = pipeline_results_no_intent
predictions[method_name] = conversations
pprint(pipeline_results_no_intent)
print('=' * 50)
final_results['violation_detection_results'] = all_pipeline_results_probs
final_results['violation_detection_results_no_intent'] = all_pipeline_results_probs_no_intent
else:
assert False, f"Pipeline type {args.pipeline} not supported"
pickle.dump(memorise_roberta, open('memorise_roberta.pickle', 'wb'))
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
json.dump(final_results, open(f'{args.result_folder}/results_{args.pipeline}_{timestamp}.json', 'w'), cls=NumpyArrayEncoder)
pickle.dump(final_results, open(f'{args.result_folder}/results_{args.pipeline}_{timestamp}.pickle', 'wb'))
json.dump(predictions, open(f'{args.result_folder}/predictions_{args.pipeline}_{timestamp}.json', 'w'), cls=NumpyArrayEncoder) | StarcoderdataPython |
1973115 | <reponame>adamwojt/ur_l<gh_stars>1-10
import pytest
from django.core.cache import cache
from ..apps import ApiConfig
from ..models import CollisionLog, Url
class TestURLWithoutDB:
def test_get_random_url_token(self):
""" Test _get_random_url_token"""
token = Url.objects._get_random_url_token()
assert len(token) == ApiConfig.TOKEN_LENGTH_STR
@pytest.mark.django_db
class TestURLWithDB:
schema_url = "https://www.google.com"
test_token = "<PASSWORD>" # nosec
def setup_method(self, method):
ApiConfig.USE_CACHE = "cache" in method.__name__
def test_create_url(self):
Url.objects.create_short_url(self.schema_url)
def test_cache_life_cycle(self):
# First create and check if cache exists
url = Url.objects.create_short_url(self.schema_url)
token = url.token
assert cache.get(token) == url.long_url
# Change long_url and see if cache was updated
new_url = "www.new.com"
url.long_url = new_url
url.save()
assert cache.get(token) == new_url
# Delete and check if cache was deleted too
url.delete()
assert not cache.get(token)
def test_no_cash3_created(self):
""" See `setup_method` """
url = Url.objects.create_short_url(self.schema_url)
assert not cache.get(url.token)
def test_token_collision(self):
url = Url.objects.create_short_url(self.schema_url, token=self.test_token)
url2 = Url.objects.create_short_url(self.schema_url, token=self.test_token)
assert not url.token == url2.token
collision_log = CollisionLog.objects.get(token=self.test_token)
assert collision_log.token == self.test_token
def test_collisions_are_rare(self):
for _ in range(1000):
Url.objects.create_short_url(self.schema_url)
assert CollisionLog.objects.all().count() <= 3
def test_token_no_collsion_log(self):
ApiConfig.LOG_COLLISIONS = False
url = Url.objects.create_short_url(self.schema_url, token=self.test_token)
url2 = Url.objects.create_short_url(self.schema_url, token=self.test_token)
with pytest.raises(CollisionLog.DoesNotExist):
CollisionLog.objects.get(token=self.test_token)
def test_get_url_using_cache(self, django_assert_num_queries):
url = Url.objects.create_short_url(self.schema_url, token=self.test_token)
with django_assert_num_queries(0):
assert Url.objects.get_long_url(url.token) == url.long_url
def test_get_url_using_db(self, django_assert_num_queries):
url = Url.objects.create_short_url(self.schema_url, token=self.test_token)
with django_assert_num_queries(1):
assert Url.objects.get_long_url(url.token) == url.long_url
| StarcoderdataPython |
9668980 | from django.contrib import admin
from .models import *
class FileInline(admin.StackedInline):
model = File
extra = 3
class PostAdmin(admin.ModelAdmin):
inlines = [FileInline]
admin.site.register(Post, PostAdmin)
admin.site.register(File) # 一応、ファイル単体の管理画面も作っておく
| StarcoderdataPython |
8054560 | """
Shared utilities for the patch drawings.
"""
import numpy as np
def rotmat(rot):
d = 2 * np.pi / 360.0 # assume input in degree
return np.array([[np.cos(d * rot), -np.sin(d * rot)], [np.sin(d * rot), np.cos(d * rot)]])
| StarcoderdataPython |
6467790 | <filename>src/scene_layouts/evaluator.py<gh_stars>1-10
import numpy as np
import torch
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from scene_layouts.datasets import SCENE_HEIGHT_TEST, SCENE_WIDTH_TEST
class Evaluator:
def __init__(self, total_elements):
self.total_elements = total_elements
self.abs_sim = np.zeros(self.total_elements)
self.rel_sim = np.zeros(self.total_elements)
self.o_acc = np.zeros(self.total_elements)
self.index = 0
def update_metrics(self, x_out, x_lab, y_out, y_lab, o_out, o_lab, attn_mask):
# Update absolute similarity
batch_size = x_out.size()[0]
abs_sim, flips = abs_similarity(x_out, x_lab, y_out, y_lab, attn_mask)
self.abs_sim[self.index : self.index + batch_size] = abs_sim.cpu().numpy()
# Update relative similarity
self.rel_sim[self.index : self.index + batch_size] = (
relative_similarity(x_out, x_lab, y_out, y_lab, attn_mask).cpu().numpy()
)
# Update orientation accuracy
self.o_acc[self.index : self.index + batch_size] += (
orientation_acc(o_out, o_lab, attn_mask, flips).cpu().numpy()
)
self.index += batch_size
def reset_metrics(self):
self.abs_sim = np.zeros(self.total_elements)
self.rel_sim = np.zeros(self.total_elements)
self.o_acc = np.zeros(self.total_elements)
self.index = 0
def get_abs_sim(self):
return np.round(self.abs_sim.mean(), decimals=3)
def get_rel_sim(self):
return np.round(self.rel_sim.mean(), decimals=3)
def get_o_acc(self):
return np.round(self.o_acc.mean() * 100, decimals=3)
def get_abs_error_bar(self):
return np.round(
np.std(self.abs_sim, ddof=1) / np.sqrt(self.total_elements), decimals=3
)
def get_rel_error_bar(self):
return np.round(
np.std(self.rel_sim, ddof=1) / np.sqrt(self.total_elements), decimals=3
)
def dump_results(self, abs_dump_path: str, rel_dump_path: str):
np.save(abs_dump_path, self.abs_sim, allow_pickle=False)
np.save(rel_dump_path, self.rel_sim, allow_pickle=False)
def find_common_cliparts(
self,
pred_clips,
gt_clips,
pred_pos_x,
gt_pos_x,
pred_pos_y,
gt_pos_y,
pred_pos_o,
gt_pos_o,
):
# https://github.com/uvavision/Text2Scene/blob/master/lib/modules/abstract_evaluator.py#L309
common_pred_x, common_pred_y, common_pred_o = [], [], []
common_gts_x, common_gts_y, common_gts_o = [], [], []
for i in range(len(pred_clips)):
if pred_clips[i] in gt_clips:
common_pred_x.append(pred_pos_x[i])
common_pred_y.append(pred_pos_y[i])
common_pred_o.append(pred_pos_o[i])
index_gt = gt_clips.index(pred_clips[i])
common_gts_x.append(gt_pos_x[index_gt])
common_gts_y.append(gt_pos_y[index_gt])
common_gts_o.append(gt_pos_o[index_gt])
return (
torch.tensor(common_pred_x).unsqueeze(0),
torch.tensor(common_pred_y).unsqueeze(0),
torch.tensor(common_pred_o).unsqueeze(0),
torch.tensor(common_gts_x).unsqueeze(0),
torch.tensor(common_gts_y).unsqueeze(0),
torch.tensor(common_gts_o).unsqueeze(0),
)
def flip_scene(labs: torch.Tensor):
# Get flipped indices
pad_ids = torch.where(labs < 0)
labs_flipped = torch.abs(SCENE_WIDTH_TEST - labs)
labs_flipped[pad_ids] = -100
return labs_flipped
def abs_similarity(x_inds, x_labs, y_inds, y_labs, attn_mask):
# Obtain normal similarity
sim_normal = abs_similarity_single(x_inds, x_labs, y_inds, y_labs, attn_mask)
# Obtain flipped similarity
x_labs_flipped = flip_scene(x_labs)
sim_flipped = abs_similarity_single(
x_inds, x_labs_flipped, y_inds, y_labs, attn_mask
)
# Get the maximum of both
sim = torch.max(sim_normal, sim_flipped)
return sim, (sim == sim_flipped).float()
def abs_similarity_single(x_inds, x_labs, y_inds, y_labs, attn_mask):
x_inds_norm = x_inds.clone().float() / SCENE_WIDTH_TEST
y_inds_norm = y_inds.clone().float() / SCENE_HEIGHT_TEST
x_labs_norm = x_labs.clone().float() / SCENE_WIDTH_TEST
y_labs_norm = y_labs.clone().float() / SCENE_HEIGHT_TEST
# Obtain dist for X and Y
dist_x = torch.pow(x_inds_norm - x_labs_norm, 2).float()
dist_y = torch.pow(y_inds_norm - y_labs_norm, 2).float()
dist = torch.sqrt(dist_x + dist_y + torch.full_like(dist_x, 1e-15))
# Convert to similarity by applying Gaussian Kernel
# https://github.com/uvavision/Text2Scene/blob/master/lib/abstract_utils.py#L366
sim = torch.exp(-0.5 * dist / 0.2)
# Set 0 similarity for the padding tokens
sim = sim * attn_mask
# Obtain average sim for each scene without considering the padding tokens
sim = sim.sum(-1) / attn_mask.sum(-1)
return sim
def elementwise_distances(X: torch.Tensor, Y: torch.Tensor):
X_inds_norm = X.clone().float() / SCENE_WIDTH_TEST
Y_inds_norm = Y.clone().float() / SCENE_HEIGHT_TEST
x_dist = torch.pow(
torch.unsqueeze(X_inds_norm, 1) - torch.unsqueeze(X_inds_norm, 2), 2
).float()
y_dist = torch.pow(
torch.unsqueeze(Y_inds_norm, 1) - torch.unsqueeze(Y_inds_norm, 2), 2
).float()
return torch.sqrt(x_dist + y_dist + torch.full_like(x_dist, 1e-15))
def relative_similarity(x_inds, x_labs, y_inds, y_labs, attn_mask):
dist = torch.abs(
elementwise_distances(x_inds, y_inds) - elementwise_distances(x_labs, y_labs)
).float()
# Convert to similarity by applying Gaussian Kernel
# https://github.com/uvavision/Text2Scene/blob/master/lib/abstract_utils.py#L366
sim = torch.exp(-0.5 * dist / 0.2)
# Set diagonal to 0
diag = torch.diagonal(sim, dim1=1, dim2=2)
diag.fill_(0.0)
# Remove the similarity from the padding tokens
sim = (
sim
* attn_mask.unsqueeze(1).expand(sim.size())
* attn_mask.unsqueeze(-1).expand(sim.size())
)
# Obtain average distance for each scene without considering the padding tokens
# and the main diagonal
# Unsqueeze because it should work within a batch
sim = sim.sum(-1) / (attn_mask.sum(-1).unsqueeze(-1) - 1)
sim = sim.sum(-1) / (attn_mask.sum(-1) - 1)
return sim
def orientation_acc(inds, labs, attn_mask, flips):
inds = torch.abs(inds - flips.unsqueeze(-1))
return (inds == labs).sum(-1).float() / attn_mask.sum(-1)
class ScEvaluator:
def __init__(self, total_elements):
self.total_elements = total_elements
self.abs_sim = np.zeros(self.total_elements)
self.rel_sim = np.zeros(self.total_elements)
self.orientation_acc = np.zeros(self.total_elements)
self.index = 0
def update_metrics(self, x_out, x_lab, y_out, y_lab, o_out, o_lab, mask):
# Update absolute similarity
batch_size = x_out.size()[0]
self.abs_sim[self.index : self.index + batch_size] = (
abs_similarity_sc(x_out, x_lab, y_out, y_lab, mask).cpu().numpy()
)
# Update relative similarity
self.rel_sim[self.index : self.index + batch_size] = (
relative_similarity_sc(x_out, x_lab, y_out, y_lab, mask).cpu().numpy()
)
self.orientation_acc[self.index : self.index + batch_size] = (
orientation_acc_sc(o_out, o_lab, mask).cpu().numpy()
)
self.index += batch_size
def reset_metrics(self):
self.abs_sim = np.zeros(self.total_elements)
self.rel_sim = np.zeros(self.total_elements)
self.orientation_acc = np.zeros(self.total_elements)
self.index = 0
def get_abs_sim(self):
return np.round(self.abs_sim.sum() / np.count_nonzero(self.abs_sim), decimals=3)
def get_rel_sim(self):
return np.round(self.rel_sim.sum() / np.count_nonzero(self.rel_sim), decimals=3)
def get_o_acc(self):
return np.round(
(self.orientation_acc.sum() / np.count_nonzero(self.orientation_acc)) * 100,
decimals=1,
)
def get_abs_error_bar(self):
return np.round(
np.std(self.abs_sim, ddof=1) / np.sqrt(np.count_nonzero(self.abs_sim)),
decimals=3,
)
def get_rel_error_bar(self):
return np.round(
np.std(self.rel_sim, ddof=1) / np.sqrt(np.count_nonzero(self.rel_sim)),
decimals=3,
)
def get_o_acc_error_bar(self):
return np.round(
(
np.std(self.orientation_acc, ddof=1)
/ np.sqrt(np.count_nonzero(self.orientation_acc))
)
* 100,
decimals=2,
)
def relative_similarity_sc(x_inds, x_labs, y_inds, y_labs, mask):
dist = torch.abs(
elementwise_distances(x_inds, y_inds) - elementwise_distances(x_labs, y_labs)
).float()
# Convert to similarity by applying Gaussian Kernel
# https://github.com/uvavision/Text2Scene/blob/master/lib/abstract_utils.py#L366
sim = torch.exp(-0.5 * dist / 0.2)
sim = sim * mask.unsqueeze(-1).expand(sim.size())
# Set diagonal to 0
diag = torch.diagonal(sim, dim1=1, dim2=2)
diag.fill_(0.0)
# Obtain average over the selected group
sim = sim.sum(-1) / ((sim != 0).sum(-1) + 1e-15)
sim = sim.sum() / (mask.sum(-1) + 1e-15)
return sim
def abs_similarity_sc(x_inds, x_labs, y_inds, y_labs, mask):
# Obtain dist for X and Y
x_inds_norm = x_inds.clone().float() / SCENE_WIDTH_TEST
y_inds_norm = y_inds.clone().float() / SCENE_HEIGHT_TEST
x_labs_norm = x_labs.clone().float() / SCENE_WIDTH_TEST
y_labs_norm = y_labs.clone().float() / SCENE_HEIGHT_TEST
dist_x = torch.pow(x_inds_norm - x_labs_norm, 2).float()
dist_y = torch.pow(y_inds_norm - y_labs_norm, 2).float()
dist = torch.sqrt(dist_x + dist_y + torch.full_like(dist_x, 1e-15))
# Convert to similarity by applying Gaussian Kernel
# https://github.com/uvavision/Text2Scene/blob/master/lib/abstract_utils.py#L366
sim = torch.exp(-0.5 * dist / 0.2)
# Set 0 similarity for the non-target elements
sim = sim * mask
# Obtain average distance for each scene without considering the padding tokens
sim = sim.sum(-1) / (mask.sum(-1) + 1e-15)
return sim
def orientation_acc_sc(inds, labs, mask):
acc = ((inds == labs).float() + 1e-15) * mask
return acc.sum(-1) / (mask.sum(-1) + 1e-15)
class ClipartsPredictionEvaluator:
def __init__(
self,
dataset_size,
visual2index,
index2pose_hb0,
index2pose_hb1,
index2expression_hb0,
index2expression_hb1,
):
self.dataset_size = dataset_size
self.visual2index = visual2index
self.index2pose_hb0 = index2pose_hb0
self.index2pose_hb1 = index2pose_hb1
self.index2expression_hb0 = index2expression_hb0
self.index2expression_hb1 = index2expression_hb1
# Create target and target arrays
self.predictions = np.zeros((self.dataset_size, len(self.visual2index)))
self.targets = np.zeros((self.dataset_size, len(self.visual2index)))
self.index = 0
def update_counters(self, preds, targets):
batch_size = preds.shape[0]
self.predictions[self.index : self.index + batch_size] = preds
self.targets[self.index : self.index + batch_size] = targets
self.index += batch_size
def reset_counters(self):
self.predictions = np.zeros((self.dataset_size, len(self.visual2index)))
self.targets = np.zeros((self.dataset_size, len(self.visual2index)))
self.index = 0
def per_object_prf(self):
targets_obj = np.concatenate(
[self.targets[:, :23], self.targets[:, 93:]], axis=1
)
preds_obj = np.concatenate(
[self.predictions[:, :23], self.predictions[:, 93:]], axis=1
)
precision, recall, f1, _ = precision_recall_fscore_support(
targets_obj, preds_obj, average="micro"
)
return (
np.round(precision * 100, decimals=1),
np.round(recall * 100, decimals=1),
np.round(f1 * 100, decimals=1),
)
def posses_expressions_accuracy(self):
num_targets_hbo = len(
[target for target in self.targets[:, 23:58] if target.sum() > 0]
)
num_targets_hb1 = len(
[target for target in self.targets[:, 58:93] if target.sum() > 0]
)
targets_pose = np.zeros((num_targets_hbo + num_targets_hb1,))
targets_expr = np.zeros((num_targets_hbo + num_targets_hb1,))
predicts_pose = np.zeros((num_targets_hbo + num_targets_hb1,))
predicts_expr = np.zeros((num_targets_hbo + num_targets_hb1,))
index = 0
for i in range(self.targets.shape[0]):
if self.targets[i, 23:58].sum() > 0:
# Get target index
target_index = str(self.targets[i, 23:58].argmax() + 23)
# Get predictions index
pred_index = str(self.predictions[i, 23:58].argmax() + 23)
# Update pose arrays
targets_pose[index] = self.index2pose_hb0[target_index]
predicts_pose[index] = self.index2pose_hb0[pred_index]
# Update expression arrays
targets_expr[index] = self.index2expression_hb0[target_index]
predicts_expr[index] = self.index2expression_hb0[pred_index]
# Update index
index += 1
if self.targets[i, 58:93].sum() > 0:
# Get target index
target_index = str(self.targets[i, 58:93].argmax() + 58)
# Get predictions index
pred_index = str(self.predictions[i, 58:93].argmax() + 58)
# Update pose arrays
targets_pose[index] = self.index2pose_hb1[target_index]
predicts_pose[index] = self.index2pose_hb1[pred_index]
# Update expression arrays
targets_expr[index] = self.index2expression_hb1[target_index]
predicts_expr[index] = self.index2expression_hb1[pred_index]
# Update index
index += 1
return (
np.round(accuracy_score(targets_pose, predicts_pose) * 100, decimals=1),
np.round(accuracy_score(targets_expr, predicts_expr) * 100, decimals=1),
)
| StarcoderdataPython |
261465 | <reponame>nathanielsimard/improving-fs-ssl
from copy import deepcopy
from time import time as time
from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from mcp.data.dataset.transforms import KorniaTransforms, TransformType
from mcp.model.base import freeze_weights
from mcp.model.utils import BatchNormHead
from mcp.task.base import Task, TaskOutput
from mcp.task.compute import TaskCompute
class TrainableModule(nn.Module):
def __init__(self, head_projection: nn.Module, head_prediction: nn.Module):
super().__init__()
self.head_projection = head_projection
self.head_prediction = head_prediction
class BYOLTask(Task):
def __init__(
self,
embedding_size: int,
transforms: KorniaTransforms,
head_size: int,
hidden_size: int,
tau: float,
scale: Tuple[float, float],
key_transforms: Optional[Tuple[str, str]],
key_forwards: Optional[Tuple[str, str]],
compute: TaskCompute,
):
super().__init__()
self.tau = tau
self.compute = compute
self.key_forwards = key_forwards
self.key_transforms = key_transforms
head_projection = BatchNormHead(embedding_size, hidden_size, head_size)
head_prediction = BatchNormHead(head_size, hidden_size, head_size)
self.trainable = TrainableModule(head_projection, head_prediction)
self._momentum_encoder: Optional[nn.Module] = None
self._momentum_head_projection: Optional[nn.Module] = None
self._initial_state_dict = self.state_dict()
self._training = True
self.transforms: List[TransformType] = [
transforms.resize(),
transforms.color_jitter(hue=0.1, p=0.8),
transforms.grayscale(p=0.2),
transforms.random_flip(),
transforms.gaussian_blur(p=0.1),
transforms.random_resized_crop(scale=scale),
transforms.normalize(),
]
@property
def name(self):
return "BYOL"
@property
def initial_state_dict(self):
return self._initial_state_dict
def transform(self, x: torch.Tensor) -> torch.Tensor:
for t in self.transforms:
x = t(x)
return x
def run(
self, encoder: nn.Module, x: torch.Tensor, y: Optional[torch.Tensor] = None
) -> TaskOutput:
self._update_momentum_model(encoder, self.trainable.head_projection)
x1_tfm, x2_tfm = self._compute_transform(x)
x1, x2 = self._compute_forward(encoder, x1_tfm, x2_tfm)
online_proj_one = self.trainable.head_projection(x1)
online_proj_two = self.trainable.head_projection(x2)
online_pred_one = self.trainable.head_prediction(online_proj_one)
online_pred_two = self.trainable.head_prediction(online_proj_two)
with torch.no_grad():
x1 = self._momentum_encoder(x1_tfm) # type: ignore
x2 = self._momentum_encoder(x2_tfm) # type: ignore
target_proj_one = self._momentum_head_projection(x1) # type: ignore
target_proj_two = self._momentum_head_projection(x2) # type: ignore
loss_one = self.loss(online_pred_one, target_proj_two.detach())
loss_two = self.loss(online_pred_two, target_proj_one.detach())
loss = (loss_one + loss_two).mean()
metric = loss.cpu().detach().item()
return TaskOutput(loss=loss, metric=metric, metric_name="MSE-norm", time=time())
def _compute_transform(self, x):
if self.key_transforms is None:
x1_tfm = self.transform(x)
x2_tfm = self.transform(x)
else:
x1_tfm = self.compute.cache_transform(
x, training=True, key=self.key_transforms[0]
)
x2_tfm = self.compute.cache_transform(
x, training=True, key=self.key_transforms[1]
)
return x1_tfm, x2_tfm
def _compute_forward(self, encoder, x1_tfm, x2_tfm):
if self.key_forwards is None:
x1 = encoder(x1_tfm)
x2 = encoder(x2_tfm)
else:
x1 = self.compute.cache_forward(x1_tfm, encoder, key=self.key_forwards[0])
x2 = self.compute.cache_forward(x2_tfm, encoder, key=self.key_forwards[1])
return x1, x2
def train(self, mode: bool = True):
self._training = mode
return super().train(mode)
def _update_momentum_model(self, encoder: nn.Module, head_projection: nn.Module):
if self._momentum_encoder is None:
self._momentum_encoder = _initialize_momentum_module(encoder)
if self._momentum_head_projection is None:
self._momentum_head_projection = _initialize_momentum_module(
head_projection
)
if self._training:
with torch.no_grad():
_update_momentum_module(encoder, self._momentum_encoder, self.tau)
_update_momentum_module(
head_projection, self._momentum_head_projection, self.tau
)
def loss(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
def state_dict(self):
value = {}
value["trainable"] = self.trainable.state_dict()
value["momentum_encoder"] = _state_dict_or_none(self._momentum_encoder)
value["momentum_head_projection"] = _state_dict_or_none(
self._momentum_head_projection
)
return value
def load_state_dict(self, value):
self.trainable.load_state_dict(value["trainable"])
self._momentum_encoder = value["momentum_encoder"]
self._momentum_head_projection = value["momentum_head_projection"]
def _state_dict_or_none(module: Optional[nn.Module]):
if module is None:
return None
return module.state_dict()
def _update_momentum_module(module: nn.Module, module_momentum: nn.Module, tau: float):
for param_q, param_k in zip(module.parameters(), module_momentum.parameters()):
param_k.data = param_k.data * tau + param_q.data * (1.0 - tau)
def _initialize_momentum_module(module: nn.Module) -> nn.Module:
momentum_module = deepcopy(module)
freeze_weights(momentum_module)
return momentum_module
| StarcoderdataPython |
3378954 | <reponame>riccardobernardi/MLL-machine-learning-language
new_revision = """
//////////////PARSER RULES
// | macro_exp | macro_mod | macro_pip
mll : ( model | macro | parmac | comment | summa )+
model : ID COLON [_rc] [PI] e (_nn)*
_rc.2 : RR | CC
_nn : ( PI e )
pyt : "!" (W | NUM | "," | " " | ":" | "+" | "=" | "[" | "]"| "(" | ")" | "'" )* WSS+ WSP*
comment : HASH (W | NUM | "," | " " | ":" | "+" | "=" | "[" | "]" | "(" | ")" | "'" )* WSS+ WSP*
parmac : ID DOLLAR ID (OR ID)*
summa : ID SCO ID
macro : ID EQC [ID] e
e : ID
| _mm
| LP [e] RP // applicazione di funzione
| NUMBER
| SQ W SQ
| e PLUS e
| e MULT e
| e SUB e
| e DIV e
| AT ID LP RP
| AT e
| ID (e | comp )+
_mm.2 : ( ID AR )
| ( ID AR ID ID )
comp: ID EQ LSP (e
| e COLON
| e CO )+ RSP
| ID EQ LP (e
| e CO)+ RP
| ID EQ BL (e
| e COLON)+ BR
| ID EQ SQ W SQ
| ID EQ NUMBER
| ID EQ ID
| ID LP ( e CO )+ e RP
| LP ( e CO )+ e RP
//////////////LEXER TERMINALS
SC : ";"
DOLLAR : "$"
SCO : "+:"
MP : "p:"
ME : "e:"
MM : "m:"
MULT : "*" WS
OR : "or" WS
AT : "@"
SUB : "-" WS
DIV : "/" WS
AR : "->" WS
EX : "!" WS
HASH : WS "#" [" "]+
RR : ("REGRESSOR" | "regressor") WS
CC : ("CLASSIFIER" | "classifier") WS
IS : "is" WS
EQC : ":=" WS
PLUS : "+" WS
WITH : "with" WS
PI : "|" WS
CO : "," WS
DO : "." WS
SQ : "'" WS
EQ : "=" WS
BR : "}" WS
BL : "{" WS
LP : "(" WS
RP : ")" WS
LSP : "[" WS
RSP : "]" WS
ID : WS W [INTEGER | W | DO W]+ WS
NWID : W [INTEGER | W]+
WID : W [INTEGER | W]+
COLON : ":" WS
W : ("a".."z" | "A".."Z" | "_" )+
WS : (" " | "\\n" | "\\t" | "\\r")*
WSP : (" " | "\\n" | "\\t" | "\\r")+
WSS : ("\\n")
INTEGER : ("0".."9")+
DECIMAL : INTEGER ["." INTEGER]
NUMBER : NUM WS
NUM : (DECIMAL | INTEGER)
"""
def get_rev_grammar():
return new_revision | StarcoderdataPython |
6406129 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from src.qldb_streaming_to_es_sample.clients.elasticsearch import ElasticsearchClient
from requests_aws4auth import AWS4Auth
from .test_constants import TestConstants
from src.qldb_streaming_to_es_sample.constants import Constants
from unittest.mock import MagicMock
from .fixtures import elasticsearch_error
import unittest
region = 'us-east-1'
service = 'es'
awsauth = AWS4Auth("access_key", "secret_key", region, service, "session_token")
host = "elasticsearch_host"
elasticsearch_client = ElasticsearchClient(host=host, awsauth=awsauth)
def test_indexing():
# Mock
elasticsearch_client.es_client.index = MagicMock(return_value={"status":"success"})
# Trigger
response= elasticsearch_client.index(body = TestConstants.PERSON_DATA, version=1,
index = Constants.PERSON_INDEX, id = TestConstants.PERSON_DATA["GovId"])
# Verify
elasticsearch_client.es_client.index.assert_called_once_with(body=TestConstants.PERSON_DATA,
id=TestConstants.PERSON_DATA["GovId"],
index=Constants.PERSON_INDEX,
version=1,version_type='external')
def test_bad_input_exceptions_are_handled_for_indexing(elasticsearch_error):
for error_class in TestConstants.EXCEPTIONS_THAT_SHOULD_BE_HANDLED:
error = elasticsearch_error(error_class)
# Mock
elasticsearch_client.es_client.index = MagicMock(side_effect=[error, None])
# Trigger
response = elasticsearch_client.index(body=TestConstants.PERSON_DATA, version=1,
index=Constants.PERSON_INDEX, id=TestConstants.PERSON_DATA["GovId"])
# Verify
assert response == None
def test_deletion():
# Mock
elasticsearch_client.es_client.delete = MagicMock(return_value={"status":"success"})
# Trigger
response= elasticsearch_client.delete(version=1,
index = Constants.PERSON_INDEX, id = TestConstants.PERSON_DATA["GovId"])
# Verify
elasticsearch_client.es_client.delete.assert_called_once_with(id=TestConstants.PERSON_DATA["GovId"],
index=Constants.PERSON_INDEX,
version=1,version_type='external')
def test_bad_input_exceptions_are_handled_for_deletion(elasticsearch_error):
for error_class in TestConstants.EXCEPTIONS_THAT_SHOULD_BE_HANDLED:
error = elasticsearch_error(error_class)
# Mock
elasticsearch_client.es_client.delete = MagicMock(side_effect=[error, None])
# Trigger
response = elasticsearch_client.delete(version=1, index=Constants.PERSON_INDEX,
id=TestConstants.PERSON_DATA["GovId"])
# Verify
assert response == None | StarcoderdataPython |
3279345 | <reponame>elexis-eu/word-game
import mysql.connector
import csv
from decimal import *
import os
import time
from config import DBconfig, CronRoot
if CronRoot.path is not None:
os.chdir(CronRoot.path)
mydb = mysql.connector.connect(
host=DBconfig.host,
port=DBconfig.port,
user=DBconfig.user,
passwd=<PASSWORD>,
database=DBconfig.database
)
mycursor = mydb.cursor()
mycursorDict = mydb.cursor(dictionary=True)
def get_structures():
mycursor.execute("SELECT id, name, headword_position \
FROM structure;")
rows = mycursor.fetchall()
assoc = {}
for (id, name, headword_position) in rows:
assoc[id] = {"name": name, "headword_position": headword_position}
return assoc
mycursorDict.execute("SELECT * FROM admin_exports WHERE type = 'col_drag_log' and filename IS NULL AND started IS NULL LIMIT 1")
new_export = mycursorDict.fetchone()
if new_export is None:
exit()
mycursor.execute("UPDATE admin_exports SET started = NOW() WHERE id = '" + str(new_export['id']) + "'")
mydb.commit()
filename = str(new_export['type'])+"_"+str(new_export['admin_user_id'])+"_"+time.strftime("%Y%m%d_%H%M%S")+".csv"
try:
structures = get_structures()
with open("../../exports/"+filename, 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
mycursorDict.execute("SELECT * FROM collocation_log_drag WHERE created BETWEEN '" + new_export['date_from'].strftime("%Y-%m-%d") + " 00:00' AND '" + new_export['date_to'].strftime("%Y-%m-%d") + " 23:59'")
col_log = mycursorDict.fetchall()
writer.writerow(["Level", "Structure", "Headword position", "Word shown", "Word selected", "CollocationID", "Score", "User", "Created"])
for log in col_log:
print(log)
writer.writerow([log['level'], structures[log['structure_id']]['name'], structures[log['structure_id']]['headword_position'], log['word_shown'], log['word_selected'], log['col'], log['score'], log['user'], str(log['created'])])
mycursor.execute("UPDATE admin_exports SET filename = '" + filename + "', finished = NOW() WHERE id = '" + str(new_export['id']) + "'")
mydb.commit()
except Exception as e:
print(str(e)) | StarcoderdataPython |
3390887 | <reponame>dmccloskey/listDict
# system
from copy import copy
# Calculate utilities
from .listDict_dependencies import *
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class listDict():
'''Utility functions for converting and extracting a list of
dictionaries into lists and arrays'''
def __init__(self,listDict_I=None,
dictList_I=None,
record_I=None,
dataFrame_I=None,
pivotTable_I=None):
self.data=None; # of type list, nparray, etc.
if listDict_I:
self.listDict=listDict_I;
else:
self.listDict=[];
if dictList_I:
self.dictList=dictList_I;
else:
self.dictList={};
if record_I: #sql record
self.record=record_I;
else:
self.record={};
if not dataFrame_I is None: #pandas data frame representation
self.dataFrame=dataFrame_I;
else:
self.dataFrame=None;
if pivotTable_I:#pandas pivot table representation
self.pivotTable=pivotTable_I;
else:
self.pivotTable=None;
def convert_listDict2dataMatrix(self,
row_label_I,column_label_I,value_label_I,
row_variables_I=[],
column_variables_I=[],
data_IO=[],
na_str_I=None,
filter_rows_I=[],
filter_columns_I=[],
order_rows_I=[],
order_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],):
'''convert a list of dictionary rows to a numpy array
INPUT:
data_I = [{}]
row_label_I = column_id of the row labels
column_label_I = column_id of the column labels
value_label_I = column_id of the value label
OPTIONAL INPUT:
row_variables_I = list of keys to extract out with the rows
column_variables_I = list of keys to extract out with the columns
data_IO = pre-initialized data list
na_str_I = optional string or value to pre-initialize the output data with
filter_rows_I = list of row labels to include
filter_columns_I = list of column labels to include
order_rows_I = list of integers defining the order of the rows
order_columns_I = list of integers defining the order of the rows
order_rowsFromTemplate_I = list of row labels defining the order of the rows
order_columnsFromTemplate_I = list of row labels defining the order of the rows
OUTPUT:
data_O = nparray of shape(len(row_label_unique),len(column_label_unique))
row_labels_O = row labels of data_O
column_labels_O = column labels of data_O
OPTIONAL OUTPUT:
row_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(row_labels_O)
column_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(column_labels_O)
'''
data_O = [];
data_I = self.listDict;
# get unique rows and columns
nrows,row_labels_O = self.get_uniqueValues(row_label_I,filter_I=filter_rows_I);
ncolumns,column_labels_O = self.get_uniqueValues(column_label_I,filter_I=filter_columns_I);
# order rows and columns
row_labels_O,column_labels_O = self.order_rowAndColumnLabels(row_labels_O,column_labels_O,
order_rows_I=order_rows_I,
order_columns_I=order_columns_I,
order_rowsFromTemplate_I=order_rowsFromTemplate_I,
order_columnsFromTemplate_I=order_columnsFromTemplate_I,
);
# initialize the data matrix
data_O = self.initialize_dataMatrix(nrows,ncolumns,na_str_I);
# factor
row_variables_O = {};
if row_variables_I:
for cv in row_variables_I:
row_variables_O[cv]=[];
column_variables_O = {};
if column_variables_I:
for cv in column_variables_I:
column_variables_O[cv]=[];
#make the dataMatrixList
cnt = 0;
cnt_bool = True;
cnt2_bool = True;
for r_cnt,r in enumerate(row_labels_O):
cnt2_bool = True;
for c_cnt,c in enumerate(column_labels_O):
for d in data_I:
if d[column_label_I] == c and d[row_label_I] == r:
if d[value_label_I]:
data_O[r_cnt,c_cnt] = d[value_label_I];
if cnt_bool and column_variables_I:
for cv in column_variables_I:
column_variables_O[cv].append(d[cv]);
if cnt2_bool and row_variables_I:
for rv in row_variables_I:
row_variables_O[rv].append(d[rv]);
cnt2_bool = False;
break;
cnt = cnt+1
cnt_bool = False;
#return output based on input
if row_variables_I and column_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O,column_variables_O;
elif row_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O;
elif column_variables_I:
return data_O,row_labels_O,column_labels_O,column_variables_O;
else:
return data_O,row_labels_O,column_labels_O;
def convert_listDict2dataMatrixList(self,
row_label_I,column_label_I,value_label_I,
row_variables_I=[],
column_variables_I=[],
data_IO=[],
na_str_I="NA",
order_rows_I=[],
order_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],
):
'''convert a list of dictionary rows to a numpy array
INPUT:
data_I = [{}]
row_label_I = column_id of the row labels
column_label_I = column_id of the column labels
value_label_I = column_id of the value label
OPTIONAL INPUT:
row_variables_I = list of keys to extract out with the rows
column_variables_I = list of keys to extract out with the columns
data_IO = pre-initialized data list
na_str_I = optional string or value to pre-initialize the output data with
order_rows_I = list of integers defining the order of the rows
order_columns_I = list of integers defining the order of the rows
order_rowsFromTemplate_I = list of row labels defining the order of the rows
order_columnsFromTemplate_I = list of row labels defining the order of the rows
OUTPUT:
data_O = list of values ordered according to (len(row_label_unique),len(column_label_unique))
row_labels_O = row labels of data_O
column_labels_O = column labels of data_O
OPTIONAL OUTPUT:
row_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(row_labels_O)
column_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(column_labels_O)
'''
data_O = [];
data_I = self.listDict;
# get unique rows and columns
nrows,row_labels_O = self.get_uniqueValues(row_label_I);
ncolumns,column_labels_O = self.get_uniqueValues(column_label_I);
# order rows and columns
row_labels_O,column_labels_O = self.order_rowAndColumnLabels(row_labels_O,column_labels_O);
# factor
row_variables_O = {};
if row_variables_I:
for cv in row_variables_I:
row_variables_O[cv]=[];
column_variables_O = {};
if column_variables_I:
for cv in column_variables_I:
column_variables_O[cv]=[];
# initialize the data list
data_O = self.initialize_dataMatrixList(nrows,ncolumns,na_str_I='NA');
#make the dataMatrixList
cnt = 0;
cnt_bool = True;
cnt2_bool = True;
for r in row_labels_O:
cnt2_bool = True;
for c in column_labels_O:
for d in data_I:
if d[column_label_I] == c and d[row_label_I] == r:
if d[value_label_I]:
data_O[cnt] = d[value_label_I];
if cnt_bool and column_variables_I:
for cv in column_variables_I:
column_variables_O[cv].append(d[cv]);
if cnt2_bool and row_variables_I:
for rv in row_variables_I:
row_variables_O[rv].append(d[rv]);
cnt2_bool = False;
break;
cnt = cnt+1
cnt_bool = False;
#return output based on input
if row_variables_I and column_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O,column_variables_O;
elif row_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O;
elif column_variables_I:
return data_O,row_labels_O,column_labels_O,column_variables_O;
else:
return data_O,row_labels_O,column_labels_O;
def order_rowAndColumnLabels(self,
row_labels_I,column_labels_I,
order_rows_I=[],
order_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],):
'''Order rows and columns according to input
INPUT:
row_labels_I = list of unique row labels
column_labels_I = list of unique column labels
OUTPUT:
row_labels_O = list of ordered row labels
column_labels_O = list of ordered column labels
'''
row_labels_O,column_labels_O=row_labels_I,column_labels_I;
# order the rows and columns
if order_rows_I:
row_labels_O = self.order_labels(row_labels_I,order_rows_I);
if order_columns_I:
column_labels_O = self.order_labels(column_labels_I,order_columns_I);
if order_rowsFromTemplate_I:
row_labels_O = self.order_labelsFromTemplate(row_labels_I,order_rowsFromTemplate_I);
if order_columnsFromTemplate_I:
column_labels_O = self.order_labelsFromTemplate(column_labels_I,order_columnsFromTemplate_I);
return row_labels_O,column_labels_O;
def get_uniqueValues(self,key_I,filter_I=[]):
'''get the unique values for a column key
INPUT:
key_I = string, column key
filter_I = list of column values to not include in the output
OUTPUT:
nvalues_O = # of values
uniqueValues_O = list of unique values
'''
nvalues_O=0;
uniqueValues_O=[];
data_I = self.listDict;
# get all values
values = [];
for d in data_I:
values.append(d[key_I]);
# filter the values
if filter_I:
values = [x for x in values if x in filter_I];
# get the unique values
uniqueValues_O = sorted(set(values))
# count the values
nvalues_O = len(uniqueValues_O);
return nvalues_O,uniqueValues_O;
def order_labels(self,labels_I,order_I):
'''Order the labels from a pre-defined index
INPUT:
labels_I = list of strings
order_I = list of integers defining the order of the labels
OUTPUT:
labels_O = list of ordered strings
'''
labels_O = [];
if len(labels_I)==len(order_I):
labels_O = [labels_I[i] for i in order_I];
else:
print('length of labels and order do not match!');
return labels_O;
def order_labelsFromTemplate(self,labels_I,template_I):
'''Order the labels using a template
NOTES:
The template may include values not in the labels
ASSUMPTIONS:
The template includes all values found in the labels
INPUT:
labels_I = list of strings
template_I = list of strings
OUTPUT:
labels_O = list of ordered strings
'''
labels_O = [];
# make the new template
template = [];
if len(template_I)>=len(labels_I):
template = [i for i in template_I if i in labels_I];
else:
print('length of labels is less than the template!');
return labels_O;
# order the labels
if len(template)==len(labels_I):
labels_O = template;
#for label in labels_I:
# for temp in template:
# if label == temp:
# labels_O.append(label);
# break;
else:
print('template does not contain all labels!');
return labels_O;
def count_missingValues(self,values_I,na_str_I='NA'):
'''count the number of occurances of a missing value in a list of values
INPUT:
values_I = list of numeric values
na_str_I = string identifier of a missing value
OUTPUT:
mv_O = # of missing values
'''
mv_O = 0;
for c in values_I:
if c==na_str_I:
mv_O += 1;
return mv_O;
def initialize_dataMatrixList(self,nrows_I,ncolumns_I,na_str_I='NA'):
'''initialize dataMatrixList with missing values
INPUT:
nrows_I = int, # of rows of data
ncolumns_I - int, # of columns of data
na_str_I = string identifier of a missing value
OUTPUT:
dataMatrixList_O = list of na_str_I of length nrows_I*ncolumns_I'''
dataMatrixList_O = [na_str_I for r in range(nrows_I*ncolumns_I)];
return dataMatrixList_O;
def initialize_dataMatrix(self,nrows_I,ncolumns_I,na_str_I='NA'):
'''initialize dataMatrix with missing values
INPUT:
nrows_I = int, # of rows of data
ncolumns_I - int, # of columns of data
na_str_I = string identifier of a missing value
OUTPUT:
dataMatrixList_O = list of na_str_I of length nrows_I*ncolumns_I'''
if na_str_I:
dataMatrix_O = npfull((nrows_I,ncolumns_I), na_str_I);
else:
dataMatrix_O = npzeros((nrows_I,ncolumns_I));
return dataMatrix_O;
def extract_arrayFromListDict(self,key_I):
'''convert a list of dictionary rows to a numpy array
INPUT:
key_I = string, dictionary key to extract values from
OUTPUT:
data_O = numpy array of values
'''
data_I = self.listDict;
data_O = np.zeros_like(data_I);
for i,d in enumerate(data_I):
data_O[i]=d[key_I];
return data_O;
def convert_listDict2ColumnGroupListDict(self,
value_labels_I = [],
column_labels_I = [],
feature_labels_I = [],
na_str_I=None,
columnValueConnector_str_I='_',
):
'''
Convert a linearized listDict into a listDict with additional column labels that are unique
values in the group column and filled with values in the values column
INPUT:
value_labels_I = [] string, column that will be used to fill
the additional columns formed by the unique values
in the group column
column_labels_I = [] string, unique values will form additional column labels
feature_labels_I = [] string, columns to be included
OUTPUT:
ASSUMPTIONS:
columnValueConnector_str_I is not a substring in any of the value_labels or in the column_label_I
'''
data_I = self.listDict;
#get unique group values
ncolumns_O,uniqueColumns_O = self.get_uniqueGroups(column_labels_I);
#get unique feature values
nfeatures_O,uniqueFeatures_O = self.get_uniqueGroups(feature_labels_I);
#initialize the columnGroupListDict
listDict_O,columnValueHeader_O = self.initialize_columnGroupListDict(
uniqueFeatures_I = uniqueFeatures_O,
uniqueColumns_I = uniqueColumns_O,
value_labels_I = value_labels_I,
column_labels_I = column_labels_I,
feature_labels_I =feature_labels_I,
na_str_I=na_str_I,
columnValueConnector_str_I=columnValueConnector_str_I,
);
#make the new listDict
assert(nfeatures_O==len(listDict_O));
#for d in data_I:
# for cnt_feature,features in enumerate(uniqueFeatures_O):
# d_features = {k: d[k] for k in features.keys()};
# if d_features == features:
# for value in value_labels_I:
# key = columnValueConnector_str_I.join([d[k] for k in column_labels_I]);
# key += columnValueConnector_str_I + value;
# listDict_O[cnt_feature][key] = d[value];
# break;
for d in data_I:
d_features = {k: d[k] for k in feature_labels_I};
feature_key = str(d_features.values());
for value in value_labels_I:
key = columnValueConnector_str_I.join([d[k] for k in column_labels_I]);
key += columnValueConnector_str_I + value;
listDict_O[feature_key][key] = d[value];
listDict_O = [v for k,v in listDict_O.items()];
return listDict_O,columnValueHeader_O;
def initialize_columnGroupListDict(self,
uniqueFeatures_I = [],
uniqueColumns_I = 'sample_name',
value_labels_I = [],
column_labels_I = [],
feature_labels_I = [],
na_str_I='NA',
columnValueConnector_str_I='_',
):
'''
Convert a linearized listDict into a listDict with additional column labels that are unique
values in the group column and filled with values in the values column
INPUT:
...
na_str_I = default string, float, boolean, integer, etc. to fill dictionary values
columnValueConnector_str_I = string, connector to join the uniqueColumns label with the value_labels
OUTPUT:
'''
# make the dict keys
dict_keys = [];
dict_keys.extend(feature_labels_I);
columnValueHeader_O = [];
for column in uniqueColumns_I:
for value in value_labels_I:
column_str = columnValueConnector_str_I.join([column[k] for k in column_labels_I]);
column_str += columnValueConnector_str_I + value;
columnValueHeader_O.append(column_str);
dict_keys.extend(columnValueHeader_O);
# make the na_str
if na_str_I:
na_str=na_str_I;
else:
na_str=0.0;
# make the initial listDict
#listDict_O = [{} for i in range(len(uniqueFeatures_I))];
#for cnt,feature in enumerate(uniqueFeatures_I):
# listDict_O[cnt] = copy.copy(feature);
# for key in columnValueHeader_O:
# listDict_O[cnt][key]=na_str_I;
listDict_O = {};
for cnt,feature in enumerate(uniqueFeatures_I):
feature_key = str(feature.values());
listDict_O[feature_key] = copy.copy(feature);
for key in columnValueHeader_O:
listDict_O[feature_key][key]=na_str_I;
return listDict_O,columnValueHeader_O;
def get_uniqueGroups(self,keys_I,filter_I=[]):
'''get the unique values for a group of column keys
INPUT:
key_I = string, column key
TODO:
filter_I = list of column groups to not include in the output
OUTPUT:
ngroups_O = # of groups
uniqueGroups_O = list of unique groups
'''
ngroups_O=0;
uniqueGroups_O=[];
data_I = self.listDict;
# get all groups
data_subset = [{} for i in range(len(data_I))];
for cnt,d in enumerate(data_I):
data_subset[cnt]={k: d[k] for k in keys_I};
uniqueGroups_O = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in data_subset)]
# count the groups
ngroups_O = len(uniqueGroups_O);
return ngroups_O,uniqueGroups_O;
#Data adders
def append_dataFrame(self,dataFrame_I):
'''
add a new data set of rows to the dataframe
INPUT:
dataFrame_I = dataFrame
'''
self.dataFrame=self.dataFrame.append(dataFrame_I);
def append_listDict2dataFrame(self,listDict_I):
'''
add a new data set of rows to the dataframe
INPUT:
listDict_I = listDict
'''
df = pd.DataFrame(listDict_I);
self.dataFrame=self.dataFrame.append(df);
def add_column2DataFrame(self,column_label_I,data_I):
'''
add a new data column to the dataFrame
INPUT:
column_label_I = string
data_I = [] or numpy.array
'''
if self.dataFrame is None:
series = pd.Series(data=data_I);
self.dataFrame = series.to_frame(name=column_label_I);
else:
self.dataFrame.loc[:,column_label_I] = pd.Series(data_I, index=self.dataFrame.index);
#Data reset methods
def clear_listDict(self):
'''clear the list of dictionaries'''
self.listDict = [];
def clear_dictList(self):
'''clear the dictionary of lists'''
self.dictList = None;
def clear_dataFrame(self):
'''clear the dataFrame'''
self.dataFrame = None;
def clear_pivotTable(self):
'''clear the pivotTable'''
self.pivotTable = None;
def clear_allData(self):
'''clear the list of dicitonaries and the data'''
self.clear_listDict();
self.clear_dictList();
self.clear_dataFrame();
self.clear_pivotTable();
#Data io:
def import_listDict_csv(self,filename_I):
'''import a listDict from .csv
INPUT:
filename_I = string, name of the file
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
self.set_listDict(data.data);
#data.clear_data();
def export_listDict_csv(self,filename_O):
'''export a listDict to .csv
INPUT:
filename_O = string, name of the file
'''
export = base_exportData(self.listDict);
export.write_dict2csv(filename_O);
#Setters
def set_listDict(self,listDict_I):
'''add a list of dictionaries'''
self.listDict = listDict_I;
def set_dictList(self,dictList_I):
'''add a dictionary of lists'''
self.dictList = dictList_I;
def set_record(self,record_I):
'''add a sql record'''
self.record = record_I;
def set_dataFrame(self,dataFrame_I):
'''make a pandas dataFrame from listDict'''
self.dataFrame=dataFrame_I;
def set_pivotTable(self,value_label_I,row_labels_I,column_labels_I):
'''make a pandas pivot_table from a pandas dataFrame'''
self.pivotTable = self.dataFrame.pivot_table(
values=value_label_I,
index = row_labels_I,
columns = column_labels_I);
#Getters
def get_listDict(self):
'''get a list of dictionaries'''
return self.listDict;
def get_dictList(self):
'''get a dictionary of lists'''
return self.dictList;
def get_record(self):
'''get a sql record'''
return self.record;
def get_dataFrame(self):
'''return dataFrame
'''
return self.dataFrame;
def get_pivotTable(self):
'''
return pivot table
'''
return self.pivotTable;
#Attribute conversions
def convert_listDict2DataFrame(self):
'''make a pandas dataFrame from listDict'''
self.dataFrame = pd.DataFrame(self.listDict);
def convert_record2DataFrame(self):
'''make a pandas dataFrame from a sql record'''
self.dataFrame = pd.DataFrame.from_records(self.record,columns=self.record[0].keys());
def convert_dictList2DataFrame(self):
'''make a pandas dataFrame from listDict'''
self.dataFrame = pd.DataFrame.from_dict(self.dictList);
def convert_dataFrame2ListDict(self):
'''
convert a dataFrame to a list of dictionaries
INPUT:
OUTPUT:
row_O = {}
.where((pd.notnull(df)), None)
'''
self.listDict=self.dataFrame.where((pd.notnull(self.dataFrame)), None).to_dict('records');
def convert_dataFrame2DictList(self):
'''
convert a dataFrame to a dictionaries of lists
INPUT:
OUTPUT:
row_O = {}
.where((pd.notnull(df)), None)
'''
dictList = {};
columns = self.dataFrame.columns.get_values();
for col in columns:
dictList[col]=self.dataFrame[col].get_values();
self.dictList=dictList;
def convert_listDict2ListDictValues(self,
value_key_name_I = 'value',
value_label_name_I = 'label',
value_labels_I=['var_proportion','var_cumulative']):
'''linearize a list of dictionaries by seriesLabels
to a linearized version for a multiseries bi plot
INPUT:
value_labels_I = list of table columns to use as individual values
OUTPUT:
data_O = list of dictionaries of len(listDict)*len(value_labels_I)
with additional keys "value" = value of value_labels_I[i]
"value_label" = value_labels_I[i]
'''
data_I = self.listDict;
data_O = [];
# make the linearized list
for d in data_I: #iterate through the original copy
for vl in value_labels_I:
data_tmp = copy.copy(d);
data_tmp[value_key_name_I]=d[vl];
data_tmp[value_label_name_I]=vl;
data_O.append(data_tmp);
# remove the value_label keys
for d in data_O:
for vl in value_labels_I:
del d[vl]
return data_O;
#Getters (specific to an attribute)
def get_dataMatrixList(self,na_str_I = None):
'''return a flattened list matrix representation of a pandas pivot_table
NOTES: this is particularly useful prior to input to R due to the use of NA
INPUT:
OPTIONAL INPUT:
na_str_I = optional string or value to fill missing data with
'''
#fill values with 'NA', convert to 1d numpy array, convert to list
if na_str_I:
data_O = list(self.pivotTable.fillna(na_str_I).get_values().ravel());
else:
data_O = list(self.pivotTable.get_values().ravel())
return data_O;
def get_dataMatrix(self,na_str_I = None):
'''return a matrix representation of a pandas pivot_table
INPUT:
OPTIONAL INPUT:
na_str_I = optional string or value to fill missing data with
'''
#fill values with 'NA', convert to 1d numpy array, convert to list
if na_str_I:
data_O = self.pivotTable.fillna(na_str_I).get_values();
else:
data_O = self.pivotTable.get_values();
return data_O;
def get_rowLabels(self,row_labels_I):
'''return a dictionary of row labels in a pandas pivot_table
INPUT:
row_labels_I = list of row labels to extract
NOTES:
row_labels_I should consist of ALL row labels in the pandas index tuple
(labels are extracted by order and NOT by the label value)
'''
row_labels_O = {}
for i,rv in enumerate(row_labels_I):
row_labels_O[rv] = np.array([g[i] for g in self.pivotTable.index.unique()]);
#for g in self.pivotTable.index.unique():
# row_labels_O[rv].append(g[i]);
return row_labels_O;
def get_rowLabels_asTupleArray(self):
'''return an array of row labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
row_labels_O = self.pivotTable.index.unique();
return row_labels_O;
def get_rowLabels_asArray(self):
'''return an array of row labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
row_labels_O = np.array([np.asarray(row) for row in self.pivotTable.index.unique()]);
return row_labels_O;
def get_rowLabels_asDataFrame(self):
'''return a DataFrame of row labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
row_labels_O = pd.DataFrame.from_records(list(self.pivotTable.index.unique()),columns=self.pivotTable.index.names);
return row_labels_O;
def get_rowLabels_asUniqueIndexes(self):
'''return an array from 0 to nunique indexes in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
nrow_labels_O = np.array(range(self.pivotTable.index.nunique()));
return nrow_labels_O;
def get_columnLabels(self,column_labels_I):
'''return a dictionary of column labels in a pandas pivot_table
INPUT:
column_labels_I = list of column labels to extract
NOTES:
column_labels_I should consist of ALL row labels in the pandas columns tuple
(labels are extracted by order and NOT by the label value)
'''
column_labels_O = {}
for i,cv in enumerate(column_labels_I):
column_labels_O[cv] = np.array([g[i] for g in self.pivotTable.columns.unique()]);
return column_labels_O;
def get_columnLabels_asTupleArray(self):
'''return a array of column labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
column_labels_O = self.pivotTable.columns.unique();
return column_labels_O;
def get_columnLabels_asArray(self):
'''return a array of column labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
column_labels_O = np.array([np.asarray(col) for col in self.pivotTable.columns.unique()]);
return column_labels_O;
def get_columnLabels_asDataFrame(self):
'''return a array of column labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
column_labels_O = pd.DataFrame.from_records(list(self.pivotTable.columns.unique()),columns=self.pivotTable.columns.names);
return column_labels_O;
def get_columnLabels_asUniqueIndexes(self):
'''return a array from 0 to nunique column labels in a pandas pivot_table
NOTES:
labels are in the same order as the pivot table was instanciated
'''
column_labels_O = np.array(range(self.pivotTable.columns.nunique()));
return column_labels_O;
def get_dataFrameRow_byIndex_asDict(self,row_index_I):
'''return a dictionary of a row index
INPUT:
row_index_I = integer
OUTPUT:
row_O = {}
'''
row_O = dict(self.dataFrame.iloc[0]);
return row_O;
def get_flattenedDataAndColumnLabels(self):
'''
return a flattened list of data and corresponding column labels
EXAMPLE:
dataFrame:
A B
0 1
2 3
output:
data_O = [0,1,2,3];
column_labeles_O = ['A','B','A','B']
OUTPUT:
data_O = flattened array of data
column_labels_O = flattened array of corresponding column labels
'''
stacked = self.dataFrame.stack();
data_O = stacked.get_values();
columnLabels_df = pd.DataFrame(list(stacked.index))
column_labels_O = columnLabels_df[1].get_values();
return data_O,column_labels_O
def get_uniqueValues(self,column_I):
'''return unique values from a dataFrame column
INPUT:
column_I = string
'''
return self.dataFrame[column_I].unique();
#Sorting methods
def order_indexFromTemplate_pivotTable(self,template_I,axis_I):
'''re-order a column/row from a template
INPUT:
template_I = [], strings listing the labels in the desired order
axis_I = integer, 0=rows, 1=columns
OUTPUT:
'''
mi = pd.Index(template_I);
self.pivotTable.reindex_axis(mi,axis_I);
#Filter methods
def filterIn_byDictList(self,dictList_I):
'''filter in data that is in a list
INPUT:
dictList_I = {'column_label':[items to filter in...]}
'''
for k,v in dictList_I.items():
if v:
self.dataFrame = self.dataFrame[self.dataFrame[k].isin(v)];
def filterOut_byDictList(self,dictList_I):
'''filter out data that is not in a list
INPUT:
dictList_I = {'column_label':[items to filter out...]}
'''
for k,v in dictList_I.items():
if v:
self.dataFrame = self.dataFrame[~self.dataFrame[k].isin(v)];
#Utility methods
def count_missingValues_pivotTable(self):
'''count the number of occurances of a missing value in a pandas pivot table
INPUT:
OUTPUT:
mv_O = # of missing values
'''
mv_O = 0;
#check for missing values
mv_O = self.pivotTable.size - self.pivotTable.count().get_values().sum();
return mv_O;
def get_uniqueValues_list(self,list_I):
'''retrieve unique values in a list in order
INPUT:
list_I = list of values
'''
return pd.Series(list_I).unique();
def convert_list2Levels(self,list_I):
'''Convert a list of strings to unique indexes'''
s = pd.Series(list_I)
levels, labels = pd.factorize(s)
return levels;
def make_concatenatedColumn(self,
column_label_new_I,
column_labels_I,
connector_str_I = '__'
):
'''
make a concatenated column from two columns
INPUT:
column_label_new_I = string, new column label
column_labels_I = [], list of column labels to concatenate
connector_str_I = string, string to connect the two columns by
'''
for i,column_label in enumerate(column_labels_I):
if i==0:
self.dataFrame[column_label_new_I] = self.dataFrame[column_label];
else:
self.dataFrame[column_label_new_I] = self.dataFrame[column_label_new_I].map(str) + connector_str_I + self.dataFrame[column_label].map(str);
#def split_concatenatedColumns(self,
# column_label_I,
# connector_str_I = '__'
# ):
# '''
# split a concatenated column into indiviual columns
# '''
# column_labels = column_label_I.split(connector_str_I);
# self.dataFrame['new_col1'], self.dataFrame['new_col2'] = zip(*df['original_col'].apply(lambda x: x.split(': ', 1)))
def change_rowAndColumnNames(self,row_names_dict_I=None,column_names_dict_I=None):
'''
change the names of multiple columns
INPUT:
row_names_dict_I = {}, k,v = old:new row label
column_names_dict_I = {}, k,v = old:new column label
'''
#Rename several DataFrame columns
self.dataFrame = self.dataFrame.rename(
index = row_names_dict_I,
columns = column_names_dict_I
);
def make_dummyIndexColumn(self,column_index_I,column_label_I,col2index_I=None):
'''
Create a dummy index column based on the unique values in the specific column
INPUT:
column_index_I = string, name of the index column
column_label_I = string, name of the column to create the dummy label
'''
if col2index_I is None:
unique = self.dataFrame[column_label_I].unique();
unique.sort();
col2index = {v:i for i,v in enumerate(unique)}
else:
col2index = col2index_I;
index = [col2index[v] for v in self.dataFrame[column_label_I].get_values()];
self.add_column2DataFrame(column_index_I,index);
def convert_dictListListDict2ListDict(self,data_I):
'''flatten a dictionary of listDicts to a listDict
{key:[{},...],...} -> [[{},...],...] -> [{},...]
NOTES:
the 'key' is lost
'''
data1 = [v for v in data_I.values()];
data2=[];
for d in data1:
data2.extend(d);
return data2;
#Bulk methods
def convert_listDict2dataMatrixList_pd(self,
row_label_I,column_label_I,value_label_I,
row_variables_I=[],
column_variables_I=[],
na_str_I="NA",
filter_rows_I=[],
filter_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],
):
'''convert a list of dictionary rows to a numpy array
INPUT:
data_I = [{}]
row_label_I = column_id of the row labels
column_label_I = column_id of the column labels
value_label_I = column_id of the value label
OPTIONAL INPUT:
row_variables_I = list of keys to extract out with the rows
column_variables_I = list of keys to extract out with the columns
na_str_I = optional string or value to pre-initialize the output data with
filter_rows_I = list of row labels to include
filter_columns_I = list of column labels to include
order_rows_I = list of integers defining the order of the rows
order_columns_I = list of integers defining the order of the rows
order_rowsFromTemplate_I = list of row labels defining the order of the rows
order_columnsFromTemplate_I = list of row labels defining the order of the rows
OUTPUT:
data_O = list of values ordered according to (len(row_label_unique),len(column_label_unique))
row_labels_O = row labels of data_O
column_labels_O = column labels of data_O
OPTIONAL OUTPUT:
row_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(row_labels_O)
column_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(column_labels_O)
'''
data_O = [];
#handle the input to pandas
row_variables = row_variables_I;
row_variables.insert(0,row_label_I);
column_variables = column_variables_I;
column_variables.insert(0,column_label_I);
#make the pandas dataframe
self.convert_listDict2DataFrame();
#filter in rows/columns
if filter_rows_I:
data_listDict.filterIn_byDictList({row_label_I:filter_rows_I,
});
if filter_columns_I:
data_listDict.filterIn_byDictList({column_label_I:filter_columns_I,
});
#set the pivot table
self.set_pivotTable(value_label_I, row_variables, column_variables);
#sort rows/columns
if order_rowsFromTemplate_I:
data_listDict.order_indexFromTemplate_pivotTable(template_I=order_rowsFromTemplate_I,axis_I=0);
if order_columnsFromTemplate_I:
data_listDict.order_indexFromTemplate_pivotTable(template_I=order_columnsFromTemplate_I,axis_I=0);
#fill values with 'NA', convert to 1d numpy array, convert to list
data_O = self.get_dataMatrixList(na_str_I);
#extract out rows and column variables
row_variables_O = self.get_rowLabels(row_variables_I);
row_labels_O = row_variables_O[row_label_I];
# columns are in the same order as they were initialized during the pivot
column_variables_O = self.get_columnLabels(column_variables_I);
column_labels_O = column_variables_O[column_label_I];
# check that the length of the column_labels and row_labels
# are == to what they should be if only the row_label/column_label were used
assert(len(self.dataFrame.groupby([row_label_I]))==len(row_labels_O));
assert(len(self.dataFrame.groupby([column_label_I]))==len(column_labels_O));
##Broken (works only if len(column_variables/row_variables)==1
#assert(self.pivotTable.groupby([row_label_I]).count()==len(row_labels_O));
#assert(self.pivotTable.groupby([column_label_I]).count()==len(column_labels_O));
#return output based on input
if row_variables_I and column_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O,column_variables_O;
elif row_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O;
elif column_variables_I:
return data_O,row_labels_O,column_labels_O,column_variables_O;
else:
return data_O,row_labels_O,column_labels_O;
| StarcoderdataPython |
4998153 | # Generated by Django 3.1.3 on 2020-11-28 23:26
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('registry', '0016_auto_20200115_1212'),
]
operations = [
migrations.AddField(
model_name='aircraft',
name='is_registered',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='authorization',
name='end_date',
field=models.DateTimeField(default=datetime.datetime(2022, 11, 28, 0, 0, tzinfo=utc)),
),
migrations.AlterField(
model_name='operator',
name='expiration',
field=models.DateTimeField(default=datetime.datetime(2022, 11, 28, 0, 0, tzinfo=utc)),
),
]
| StarcoderdataPython |
11322873 | import time
import memcache_manager
from connection import Connection, ConnectionError
from connection_manager import ConnectionManager, NotConnectedError, SessionAttrError
class MemcacheConnectionManager(ConnectionManager, memcache_manager.MemcacheManager):
"""Manages connections stored by Memcache."""
CONNECTIONS_ATTR = '_connection_ids'
CHANNELS_ATTR = '_channels'
ATTRIBUTES = ('connection_info', 'connected', 'last_active',
'last_polled', 'authenticated', 'flex_user', 'session', 'notify_func_id')
def __init__(self, connection_class=Connection, connection_params=None,
mc_servers=['127.0.0.1:11211'], mc_debug=0):
ConnectionManager.__init__(self, connection_class=connection_class,
connection_params=connection_params)
self.mc = self.createMcClient(mc_servers, mc_debug)
self._lock = memcache_manager.MemcacheMutex(self.mc)
def reset(self):
self._lock.releaseAll()
lock_name = self.getLockName('connection_reset')
self._lock.acquire(lock_name)
try:
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
if connection_ids is not None:
for connection_id in connection_ids:
keys = [self.getKeyName(connection_id, attr) for attr in self.ATTRIBUTES]
self.mc.delete_multi(keys)
self.mc.set(self.CONNECTIONS_ATTR, [])
self.mc.set(self.CHANNELS_ATTR, {})
finally:
self._lock.release(lock_name)
def incrementChannelCount(self, channel_name):
lock_name = self.getLockName(self.CHANNELS_ATTR)
self._lock.acquire(lock_name)
try:
channels = self.mc.get(self.CHANNELS_ATTR)
if channels is None:
channels = {}
if channel_name in channels:
channels[channel_name] += 1
else:
channels[channel_name] = 1
self.mc.set(self.CHANNELS_ATTR, channels)
finally:
self._lock.release(lock_name)
def decrementChannelCount(self, channel_name):
lock_name = self.getLockName(self.CHANNELS_ATTR)
self._lock.acquire(lock_name)
try:
channels = self.mc.get(self.CHANNELS_ATTR)
if channels is None:
channels = {}
if channel_name in channels:
channels[channel_name] -= 1
self.mc.set(self.CHANNELS_ATTR, channels)
finally:
self._lock.release(lock_name)
def getConnectionCount(self, channel_name):
channels = self.mc.get(self.CHANNELS_ATTR)
if channels is None:
return 0
if channel_name in channels:
return channels[channel_name]
else:
return 0
def checkMultiSetResults(self, results):
if len(results) > 0:
msg = 'The following parameters were not set: ' + ', '.join(results)
raise ConnectionError(msg)
def loadConnection(self, connection_id):
connection_info = self.mc.get(self.getKeyName(connection_id, 'connection_info'))
if connection_info is None:
raise NotConnectedError("Connection '%s' is not connected." % connection_id)
return self.connection_class(self, connection_info['channel_name'],
connection_id, connection_info['timeout'])
def initConnection(self, connection, channel):
params = {
'connected': True,
'last_active': time.time() * 1000,
'last_polled': 0.0,
'authenticated': False,
'session': {}
}
connection_info = {
'channel_name': connection.channel_name,
'timeout': connection.timeout
}
cache_params = {}
for key, val in params.iteritems():
cache_params[self.getKeyName(connection.id, key)] = val
cache_params[self.getKeyName(connection.id, 'connection_info')] = connection_info
self.checkMultiSetResults(self.mc.set_multi(cache_params))
lock_name = self.getLockName(self.CONNECTIONS_ATTR)
self._lock.acquire(lock_name)
try:
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
if connection_ids is None:
connection_ids = []
connection_ids.append(connection.id)
self.mc.set(self.CONNECTIONS_ATTR, connection_ids)
finally:
self._lock.release(lock_name)
self.incrementChannelCount(connection.channel_name)
def iterConnectionIds(self):
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
if connection_ids != None:
return connection_ids.__iter__()
else:
return None
# --- proxies for connection properties --- #
def getConnected(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'connected'))
def getLastActive(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'last_active'))
def getLastPolled(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'last_polled'))
def getAuthenticated(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'authenticated'))
def getFlexUser(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'flex_user'))
def getNotifyFunc(self, connection):
notify_func_id = self.mc.get(self.getKeyName(connection.id, 'notify_func_id'))
if notify_func_id is None:
return None
else:
return connection._getNotifyFuncById(connection._notify_func_id)
# --- proxies for connection methods --- #
def deleteConnection(self, connection):
lock_name = self.getLockName(self.CONNECTIONS_ATTR)
self._lock.acquire(lock_name)
try:
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
for i, connection_id in enumerate(connection_ids):
if connection_id == connection.id:
connection_ids.pop(i)
break
self.mc.set(self.CONNECTIONS_ATTR, connection_ids)
finally:
self._lock.release(lock_name)
keys = [self.getKeyName(connection.id, attr) for attr in self.ATTRIBUTES]
self.mc.delete_multi(keys)
self.decrementChannelCount(connection.channel_name)
ConnectionManager.deleteConnection(self, connection)
def connectConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'connected'), True)
def disconnectConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'connected'), False)
def touchConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'last_active'), time.time() * 1000)
def touchPolled(self, connection):
self.mc.set(self.getKeyName(connection.id, 'last_polled'), time.time() * 1000)
def authenticateConnection(self, connection, user):
params = {
self.getKeyName(connection.id, 'authenticated'): True,
self.getKeyName(connection.id, 'flex_user'): user
}
self.checkMultiSetResults(self.mc.set_multi(params))
def unAuthenticateConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'authenticated'), False)
self.mc.delete(self.getKeyName(connection.id, 'flex_user'))
def setNotifyFunc(self, connection, func):
self.mc.set(self.getKeyName(connection.id, 'notify_func_id'),
connection._setNotifyFunc(func))
def unSetNotifyFunc(self, connection):
self.mc.delete(self.getKeyName(connection.id, 'notify_func_id'))
def getConnectionSessionAttr(self, connection, name):
session = self.mc.get(self.getKeyName(connection.id, 'session'))
try:
return session[name]
except KeyError:
raise SessionAttrError("Attribute '%s' not found." % name)
def setConnectionSessionAttr(self, connection, name, val):
key = self.getKeyName(connection.id, 'session')
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
session = self.mc.get(key)
session[name] = val
self.mc.set(key, session)
finally:
self._lock.release(lock_name)
def delConnectionSessionAttr(self, connection, name):
key = self.getKeyName(connection.id, 'session')
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
session = self.mc.get(key)
try:
del session[name]
self.mc.set(key, session)
except KeyError:
pass
finally:
self._lock.release(lock_name)
| StarcoderdataPython |
6688537 | <filename>src/z3c/password/tests.py
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Setup
"""
import doctest
import re
import unittest
from doctest import DocFileSuite
from zope.testing import renormalizing
from z3c.password import testing
checker = renormalizing.RENormalizing([
# Python 3 bytes add a "b".
(re.compile("b('.*?')"),
r"\1"),
(re.compile('b(".*?")'),
r"\1"),
# Python 3 adds module name to exceptions.
(re.compile("z3c.password.interfaces.NoPassword"),
r"NoPassword"),
(re.compile("zope.security.interfaces.NoInteraction"),
r"NoInteraction"),
])
def test_suite():
flags = doctest.NORMALIZE_WHITESPACE |\
doctest.ELLIPSIS |\
doctest.IGNORE_EXCEPTION_DETAIL
return unittest.TestSuite((
DocFileSuite('README.txt',
setUp=testing.setUp, tearDown=testing.tearDown,
optionflags=flags, checker=checker,
),
DocFileSuite('principal.txt',
setUp=testing.setUp, tearDown=testing.tearDown,
optionflags=flags, checker=checker,
),
))
| StarcoderdataPython |
5051336 | <gh_stars>1-10
from molly.molly import Molly
import pytest
@pytest.fixture(scope='function')
def _molly():
molly = Molly(
target='scanme.nmap.org',
mode='common',
workers=4
)
yield molly
| StarcoderdataPython |
1720460 | import numpy as np
import scipy.linalg as sl
import jax.numpy as np
from jax import grad, jit, vmap, jacfwd, jacrev
from jax import random
def log_likelihood(x, mu, Sigma):
x = np.atleast_2d(x)
mu = np.atleast_2d(mu)
Sigma = np.atleast_2d(Sigma)
sign, Slogdet = sl.slogdet(Sigma)
Sinv = sl.inv(Sigma)
return -.5*x.shape[1]*np.log(2.*np.pi) - .5*Slogdet - .5*((x-mu).T)*Sinv*(x-mu)
def log_prior(mu, Sigma, nu0=None, kappa0=None, Sigma0=None, mu0=None, N=1000):
nu0 = N+1
kappa0=1
Sigma0 = np.ones((mu.shape[0], mu.shape[0]))
mu0 = np.ones((mu.shape[0],1))
sign, Slogdet = sl.slogdet(Sigma)
Sinv = sl.inv(Sigma)
return -.5(nu0+mu.shape[0] + 2.)*Slogdet - .5*kappa0*(mu - mu0)^T*Sinv*(mu - mu0) - .5*np.trace(Sinv*Sigma0)
def log_joint(x, mu, Sigma, wts):
return (wts[:, np.newaxis]*log_likelihood(z, mu, Sigma)).sum(axis=0) + log_prior(mu, Sigma)
def grad_th_log_likelihood(x, mu, Sigma):
return grad(log_likelihood, (1,2))(x, mu, Sigma)
def grad_z_log_likelihood(x, mu, Sigma):
return grad(log_likelihood, 0)(x, mu, Sigma)
def grad_th_log_prior(mu, Sigma):
return grad(log_prior, (0,1))(mu, Sigma)
def grad_th_log_joint(x, mu, Sigma, wts):
return grad_th_log_prior(mu, Sigma) + (wts[:, np.newaxis, np.newaxis]*grad_th_log_likelihood(x, mu, Sigma)).sum(axis=0)
def hessian(f):
return jit(jacfwd(jacrev(f)))
def hess_th_log_likelihood(x, mu, Sigma):
return hessian(log_likelihood, (1,2))(x, mu, Sigma)
def hess_th_log_prior(mu, Sigma):
return hessian(log_prior, (0,1))(mu, Sigma)
def hess_th_log_joint(x, mu, Sigma, wts):
return hess_th_log_prior(mu, Sigma) + (wts[:, np.newaxis, np.newaxis, np.newaxis]*hess_th_log_likelihood(x, mu, Sigma)).sum(axis=0)
| StarcoderdataPython |
4878969 | <reponame>KnowEnG-Research/Gene_Prioritization_Pipeline
"""
Created on Fri Sep 23 16:39:35 2016
@author: The KnowEnG dev team
"""
def correlation(run_parameters):
""" gene prioritization """
from gene_prioritization_toolbox import run_correlation
run_correlation(run_parameters)
def net_correlation(run_parameters):
""" gene prioritization """
from gene_prioritization_toolbox import run_net_correlation
run_net_correlation(run_parameters)
def bootstrap_correlation(run_parameters):
""" gene prioritization """
from gene_prioritization_toolbox import run_bootstrap_correlation
run_bootstrap_correlation(run_parameters)
def bootstrap_net_correlation(run_parameters):
""" gene prioritization """
from gene_prioritization_toolbox import run_bootstrap_net_correlation
run_bootstrap_net_correlation(run_parameters)
SELECT = {
"correlation": correlation,
"net_correlation": net_correlation,
"bootstrap_correlation": bootstrap_correlation,
"bootstrap_net_correlation": bootstrap_net_correlation}
def main():
"""
This is the main function to perform gene prioritization.
"""
import sys
from knpackage.toolbox import get_run_directory_and_file
from knpackage.toolbox import get_run_parameters
run_directory, run_file = get_run_directory_and_file(sys.argv)
run_parameters = get_run_parameters(run_directory, run_file)
SELECT[run_parameters["method"]](run_parameters)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5146744 | <gh_stars>1-10
from functools import reduce
from django.db.models import Q
from django.shortcuts import reverse, get_object_or_404
from django.views.generic.list import ListView
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework import viewsets, filters
from rest_framework.decorators import detail_route
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from common.models import Tag
from common.serializers import TagSerializer
from common.permissions import IsCreatorOrReadOnly
from .models import Question, Set
from .serializers import QuestionSerializer, SetSerializer
class TagViewSet(viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
permission_classes = (
IsAuthenticatedOrReadOnly,
)
filter_backends = (
filters.SearchFilter,
)
search_fields = ('name',)
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
permission_classes = (
IsCreatorOrReadOnly,
)
filter_backends = (
DjangoFilterBackend,
filters.SearchFilter,
)
filter_fields = (
'created_by',
'difficulty',
)
search_fields = ('=tags__name',)
def perform_create(self, serializer):
serializer.save(created_by=self.request.user)
class SetQuestions:
@detail_route(url_path='questions')
def questions(self, request, pk=None):
questions = self.get_object().questions.all()
serializer = QuestionSerializer(questions, many=True)
return Response(serializer.data)
@detail_route(
methods=['post'],
url_path='questions/add',
url_name='questions-add',
)
def questions_add(self, request, pk=None):
id_list = request.data.get('questions', [])
questions = Question.objects.filter(id__in=id_list)
self.get_object().questions.add(*questions)
content = {
'status': 'success'
}
return Response(content)
@detail_route(
methods=['post', 'delete'],
url_path='questions/remove',
url_name='questions-remove',
)
def questions_remove(self, request, pk=None):
id_list = request.data.get('questions', [])
questions = Question.objects.filter(id__in=id_list)
self.get_object().questions.remove(*questions)
content = {
'status': 'success'
}
return Response(content)
class SetViewSet(viewsets.ModelViewSet, SetQuestions):
queryset = Set.objects.all()
serializer_class = SetSerializer
permission_classes = (
IsCreatorOrReadOnly,
)
def perform_create(self, serializer):
user = User.objects.get(pk=1)
serializer.save(created_by=user)
| StarcoderdataPython |
9742415 | #coding:utf-8
#author:<EMAIL>
import re
import urllib2
def check(host,port,timeout):
try:
url = "http://%s:%d"%(host,int(port))
res = urllib2.urlopen(url+'/axis2/services/listServices',timeout=timeout)
res_code = res.code
res_html = res.read()
if int(res_code) == 404:
return 'NO'
m=re.search('\/axis2\/services\/(.*?)\?wsdl">.*?<\/a>',res_html)
if m.group(1):
server_str = m.group(1)
read_url = url+'/axis2/services/%s?xsd=../conf/axis2.xml'%(server_str)
res = urllib2.urlopen(read_url,timeout=timeout)
res_html = res.read()
if 'axisconfig' in res_html:
try:
user=re.search('<parameter name="userName">(.*?)<\/parameter>',res_html)
password=re.search('<parameter name="password">(.*?)<\/parameter>',res_html)
info = '%s Local File Inclusion Vulnerability %s:%s'%(read_url,user.group(1),password.group(1))
except:
pass
return 'YES|'+info
except Exception,e:
return 'NO'
return 'NO'
| StarcoderdataPython |
3432427 | <reponame>socrateslee/hostsed
'''
Utilities for adding or deleting entries on hosts file
as /etc/hosts.
'''
import os
import sys
import json
import subprocess
import socket
import argparse
def is_valid_ip_address(ip):
'''
Check whether an ip address is valid, both for ipv4
and ipv6.
'''
try:
socket.inet_pton(socket.AF_INET, ip)
return True
except socket.error:
pass
try:
socket.inet_pton(socket.AF_INET6, ip)
return True
except socket.error:
pass
return False
def parse_line(line):
pos = line.find("#")
new_line = line[:pos].strip() if pos != -1 else line.strip()
comment = line[pos:] if pos != -1 else ''
if new_line:
parts = list(map(lambda x: x.strip(), new_line.split()))
return (line, parts, comment)
else:
return (line, None, comment)
class HostEditor(object):
def __init__(self, filename='/etc/hosts'):
self.filename = filename
self._parse()
def chk_user_permissions(self):
'''
Check if current user has sufficient permissions to
edit hosts file.
Raise an exception if user is invalid
'''
if self.filename != '-' and not os.access(self.filename, os.W_OK):
msg = 'User does not have sufficient permissions, are you super user?'
raise Exception(msg)
return
def add(self, ip, *hostnames):
'''
Add an entry to hosts file.
'''
self.chk_user_permissions()
if not is_valid_ip_address(ip):
raise Exception("IP %s is not valid." % ip)
if not self.entries:
return
ret = []
added = False
if not self.entries:
return
for (line, parts, comment) in self.entries:
if parts and parts[0] == ip and not added:
for hostname in hostnames:
if hostname not in parts[1:]:
parts.append(hostname)
line = ' '.join(['\t'.join(parts), comment])
added = True
ret.append((line, parts, comment))
if not added:
parts = [ip] + list(hostnames)
line = '\t'.join(parts)
ret.append((line, parts, comment))
self.entries = ret
self.write()
self.output()
def drop(self, ip_or_hostname):
'''
Drop lines with specified ip or hostname from hosts file.
'''
self.chk_user_permissions()
ret = []
for (line, parts, comment) in self.entries:
if parts and ip_or_hostname in parts:
continue
ret.append((line, parts, comment))
self.entries = ret
self.write()
self.output()
def delete(self, ip, hostname):
'''
Delete host from the lines with (ip, hostname) tuple from hosts file.
'''
self.chk_user_permissions()
if not is_valid_ip_address(ip):
raise Exception("IP %s is not valid." % ip)
ret = []
for (line, parts, comment) in self.entries:
if parts and parts[0] == ip:
parts = list(filter(lambda x: x != hostname, parts))
if not parts[1:]:
continue
line = ' '.join(['\t'.join(parts), comment])
ret.append((line, parts, comment))
self.entries = ret
self.write()
self.output()
def _parse(self):
'''
Parse the files into entries.
'''
self.entries = []
fd = sys.stdin if self.filename == '-' else open(self.filename)
for line in fd.readlines():
self.entries.append(parse_line(line))
def output(self, fd=None):
if fd is None:
fd = sys.stdout
fd.write('\n'.join(map(lambda x: x[0].strip(), self.entries)))
fd.write('\n')
def write(self):
if self.filename != '-':
fd = open(self.filename, 'w')
self.output(fd=fd)
fd.close()
def output_docker_ip(self, container):
proc = subprocess.Popen("docker inspect %s" % container,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode == 0:
ret = json.loads(stdout.decode('utf-8'))
ip = ret[0]['NetworkSettings']['IPAddress']
sys.stdout.write(ip)
def parse_cmdline():
'''
Parse cmd line arguments and returns a dictionary
with its parsed values
'''
parser = argparse.ArgumentParser(
prog='hostsed',
description='A hosts file editing tool for command line shell')
subparsers = parser.add_subparsers(dest='name')
add_parser = subparsers.add_parser(
name='add',
help='Add entry IPADDRESS HOSTNAME1 [HOSTNAME2 ...]'
)
add_parser.add_argument('add',
type=str,
metavar='IPADDRESS_OR_HOSTNAME',
nargs='+')
# subparser does not support aliasing
del_aliases = ['rm', 'delete', 'remove']
del_parser = subparsers.add_parser(
name='del',
help='Delete an IPADDRESS HOSTNAME entry',
aliases=del_aliases
)
del_parser.add_argument('del', nargs=2, metavar='IPADDRESS_OR_HOSTNAME',)
drop_parser = subparsers.add_parser(
name='drop',
help='Drop a lines containing an IP_OR_HOSTNAME entry'
)
drop_parser.add_argument('drop', nargs=1, metavar='IPADDRESS_OR_HOSTNAME')
docker_parser = subparsers.add_parser(
name='docker',
help='Show docker cointainer IP address of the given name'
)
docker_parser.add_argument(
'docker',
help='Name of the Container to get IP address from',
metavar='CONTAINER',
type=str,
nargs=1
)
parser.add_argument("-f", "--file",
default="/etc/hosts",
help="The location of hosts file, default /etc/hosts, - for reading from stdin",
type=str)
args = vars(parser.parse_args())
if args.get('name') in del_aliases:
args['name'] = 'del'
return args
def main():
args = parse_cmdline()
f_name = args.get('name')
he = HostEditor(filename=args.get('file'))
funcs = {
'add': he.add,
'del': he.delete,
'drop': he.drop,
'docker': he.output_docker_ip
}
try:
if not args.get(f_name):
he.output()
else:
funcs.get(f_name)(*args.get(f_name))
except Exception as e:
fd = sys.stdout
fd.write('ERROR: {} \n'.format(e))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1838219 | <reponame>GambitResearch/capnpy
# -*- encoding: utf-8 -*-
import pytest
import six
from capnpy.compiler.util import as_identifier
def test_as_identifier():
assert as_identifier(b'hello') == 'hello'
t = type(as_identifier(b'hello'))
assert t is str # bytes on Py2, unicode on Py3
with pytest.raises(TypeError):
as_identifier(u'hello')
#
unicode_identifier = u'hellò'.encode('utf-8')
with pytest.raises(ValueError) as exc:
as_identifier(unicode_identifier)
assert six.text_type(exc.value) == (u'Non-ASCII identifiers are not '
u'supported by capnpy: hellò')
| StarcoderdataPython |
3224439 | <reponame>Rimuwu/Ranga<filename>Cog/moderation.py<gh_stars>1-10
import nextcord as discord
from nextcord.ext import tasks, commands
from nextcord.utils import utcnow
from datetime import timedelta
import sys
import random
from random import choice
import asyncio
import time
import datetime
import pymongo
sys.path.append("..")
from functions import functions as funs
import config
client = funs.mongo_c()
db = client.bot
servers = db.servers
settings = db.settings
class mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(usage = '(@member) [reason]', description = 'Забанить пользователя.', help = 'Модерация', aliases = ['бан'])
async def ban(self, ctx, member: discord.Member, *, arg="Причина не указана"):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if ctx.author.guild_permissions.ban_members == True:
await member.send(f'Вы были забанены на сервере `{ctx.guild.name}` по причине: `{arg}`\nСоздатель сервера: `{ctx.guild.owner}`')
ban = f"{ctx.author}({ctx.author.id}) - {arg}"
await member.ban(reason=ban)
msg = [
"Забаненый уже никогда не будет тем, кто был раньше...",
"Бан это и плохо и хорошо, смотря с какой стороны смотреть...",
"Тот ли человек после бана кем он был раньше?",
]
server = servers.find_one({"server": ctx.guild.id})
await ctx.send(embed = discord.Embed(color=server['embed_color']).add_field(
name="Бан",
value=f"Забанен: {member.mention}\n"
f"Забанил: {ctx.author.mention}\n"
f"Причина: {arg}\n"
f"Банов: {int(len(await ctx.guild.bans()))-1} +1"
).set_thumbnail(
url= "https://ia.wampi.ru/2020/08/09/1452967606_anime-sword-art-online-lisbeth-anime-gifki-2775271.gif").set_footer(
icon_url=ctx.author.avatar.url,
text=random.choice(msg)))
@commands.command(usage = '(member_id)', description = 'Разбанить пользователя на сервере.', help = 'Модерация', aliases = ['разбанить'])
async def unban(self, ctx, member_id:int):
user = await self.bot.fetch_user(member_id)
await ctx.guild.unban(user)
await user.send(f'Вы были разбанены на сервере `{ctx.guild.name}`\nСоздатель сервера: `{ctx.guild.owner}`')
await ctx.send(f"Пользователь {user} был разбанен.")
@commands.command(usage = '(@member) [reason]', description = 'Кикнуть пользователя.', help = 'Модерация', aliases = ['кик'])
async def kick(self, ctx, member: discord.Member, arg="Причина не указана"):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if ctx.author.guild_permissions.kick_members == True:
await member.send(f'Вы были выгнаны с сервере `{ctx.guild.name}` по причине: `{arg}`\nСоздатель сервера: `{ctx.guild.owner}`')
k = f"{ctx.author}({ctx.author.id}) - {arg}"
await member.kick(reason=k)
server = servers.find_one({"server": ctx.guild.id})
await ctx.send(embed = discord.Embed(color=server['embed_color']).add_field(
name="Кик",
value=f"Кикнут: {member.mention}\n"
f"Кикнул: {ctx.author.mention}\n"
f"Причина: {arg}\n"
).set_thumbnail(
url= "https://pa1.narvii.com/6392/9b4dd5ba812d32198cbd5465e0d10b46153c2208_hq.gif"))
@commands.command(usage = '-', description = 'Задержка бота.', help = 'Бот', aliases = ['пинг'])
async def ping(self, ctx):
ping = self.bot.latency
ping_emoji = "🟩🔳🔳🔳🔳"
ping_list = [
{"ping": 0.10000000000000000, "emoji": "🟧🟩🔳🔳🔳"},
{"ping": 0.15000000000000000, "emoji": "🟥🟧🟩🔳🔳"},
{"ping": 0.20000000000000000, "emoji": "🟥🟥🟧🟩🔳"},
{"ping": 0.25000000000000000, "emoji": "🟥🟥🟥🟧🟩"},
{"ping": 0.30000000000000000, "emoji": "🟥🟥🟥🟥🟧"},
{"ping": 0.35000000000000000, "emoji": "🟥🟥🟥🟥🟥"}]
for ping_one in ping_list:
if ping > ping_one["ping"]:
ping_emoji = ping_one["emoji"]
message = await ctx.send("Пожалуйста, подождите. . .")
await message.edit(content = f"Понг! {ping_emoji} `{ping * 1000:.0f}ms` :ping_pong:")
@commands.command(usage = '(@member) (time) [reason]', description = 'Замьютить пользователя на сервере.', help = 'Мьюты', aliases = ['мьют'])
async def mute(self, ctx, member: discord.Member = None, timem = None, *, reason = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({"server": ctx.guild.id})
if member is None:
await ctx.send("Вы не указали пользователя!")
elif timem is None:
await ctx.send("Вы не указали время!\nФормат: 10m (s/m/h/d/w)")
else:
if server['mod']['muterole'] is not None:
role = ctx.guild.get_role(server['mod']['muterole']) #id роли
if role != None:
await member.add_roles(role)
if reason == None:
reason = 'Не указана'
try:
ttime = int(timem[:-1])
except:
await ctx.send(f"Укажите число!")
return
if member.id == ctx.guild.owner.id:
return
if timem.endswith("s"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime)
embs = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\nПричина: {reason}", color=server['embed_color'])
await ctx.send(embed = embs)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime))
except:
pass
elif timem.endswith("m"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*60 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*60)
embm = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embm)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*60))
except:
pass
elif timem.endswith("h"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*3600 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*3600)
embh = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embh)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*3600))
except:
pass
elif timem.endswith("d"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*86400 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*86400)
embd = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*86400))
except:
pass
elif timem.endswith("w"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*604800 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*604800)
embd = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*604800))
except:
pass
else:
await ctx.send('Ошибка указания времени.')
return
@commands.command(usage = '(@member) [reason]', description = 'Выдать варн пользователю.', help = 'Варны', aliases = ['варн'])
async def warn(self, ctx, user:discord.Member, *,reason = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
await funs.warn(ctx, user, reason, ctx.author)
@commands.command(usage = '(@member)', description = 'Просмотреть варны пользователя.', help = 'Варны', aliases = ['варны'])
async def warns(self, ctx, user:discord.Member = None):
if user == None:
user = ctx.author
server = servers.find_one({'server':ctx.guild.id})
text = ""
if user == None:
await ctx.send("Вы не указали пользователя")
return
try:
print(server['mod']['warns'][str(user.id)].items())
except Exception:
embd = discord.Embed(title = f"Варны: {user.name}", description = f"Варны отсутсвуют", color=server['embed_color'])
await ctx.send(embed = embd)
return
else:
for i in server['mod']['warns'][str(user.id)].items():
if i[1]['reason'] == None:
reason = "Не указано"
else:
reason = i[1]['reason']
text = text + f"**#{i[0]}** **{i[1]['time']}: **{reason}\nВыдал: <@{i[1]['author']}>\n\n"
embd = discord.Embed(title = f"Варны: {user.name}", description = f"{text}", color=server['embed_color'])
await ctx.send(embed = embd)
@commands.command(usage = '(@member) [warn_id]', description = 'Снять варн с пользователя.', help = 'Варны', aliases = ['разварнить'])
async def unwarn(self, ctx, member:discord.Member, num:int = 1):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({'server':ctx.guild.id})
try:
server['mod']['warns'][str(member.id)]
except Exception:
await ctx.send("У этого пользователя нету такого варна.")
return
m = server['mod']
w = m['warns']
w.copy()
try:
w[str(member.id)].pop(str(num))
servers.update_one({"server": ctx.guild.id}, {"$set": {'mod': m}})
except Exception:
await ctx.send(f"У данного пользователя нет варна #{num}")
return
embd = discord.Embed(title = "Сброс", description = f"Варн #{num}, пользователя {member.mention} был сброшен", color=server['embed_color'])
await ctx.send(embed = embd)
@commands.command(usage = '(@member)', description = 'Размьютить пользователя.', help = 'Мьюты', aliases = ['размьютить'])
async def unmute(self, ctx, member:discord.Member):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({'server':ctx.guild.id})
try:
server['mute_members'][str(member.id)]
except Exception:
await ctx.send("Этот пользователь не в мьюте.")
return
a = server['mute_members'].copy()
a.pop(str(member.id))
servers.update_one({'server':server['server']},{'$set': {'mute_members':a}})
try:
await self.bot.get_guild(ctx.guild.id).get_member(member.id).remove_roles(self.bot.get_guild(ctx.guild.id).get_role(server['mod']['muterole']))
except Exception:
await ctx.send("У бота не достаточно прав на снятие или роль мьюта сброшена")
return
embd = discord.Embed(title = "Сброс", description = f"Мьют с пользователя {member.mention} был снят.", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = 0))
except:
pass
@commands.command(usage = '-', description = 'Просмотреть всех замьюченых пользователей.', help = 'Мьюты', aliases = ['мьюты'])
async def mutes(self,ctx):
server = servers.find_one({'server':ctx.guild.id})
text = ''
for memid in server['mute_members']:
try:
member = ctx.guild.get_member(int(memid))
text = text + f"{member.mention}, осталось: {funs.time_end(server['mute_members'][str(memid)]-time.time())}\n"
except Exception:
a = server['mute_members'].copy()
a.pop(memid)
servers.update_one({'server':server['server']},{'$set':{'mute_members':a}})
await ctx.send(embed = discord.Embed(title="Мьюты", description=text, color=server['embed_color']))
@commands.command(usage = '(number max100)', description = 'Очистить чат.', help = 'Модерация', aliases = ['очистить'])
async def clear(self, ctx, number:int):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if number <= 100:
deleted = await ctx.channel.purge(limit=number)
message = await ctx.send('Удалено {} сообщений(я)'.format(len(deleted)))
await asyncio.sleep(3)
await message.delete()
else:
await ctx.send('Не возможно удалить более 100-та сообщений за раз!')
@commands.command(hidden=True)
async def global_warn(self,ctx, id:int, *, reason:str = "None"):
s = settings.find_one({"sid": 1})
if ctx.author.id not in s['moderators']:
await ctx.send("У вас нет прав модератора бота!")
return
try:
s['bl global chat'][str(id)]
nw = len(s['bl global chat'][str(id)].keys())
if nw < 3:
s['bl global chat'][str(id)].update({str(nw+1):{'reason':reason,"time":time.time() + 2628000}})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` получил варн #{nw+1}")
else:
s['bl global chat'][str(id)].update({'ban':f'{reason} | auto ban due to 3 warns'})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` был автоматически забанен за х3 предупреждения")
except Exception:
s['bl global chat'].update({str(id):{'1':{'reason':reason,"time":time.time() + 2628000}}})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` получил варн #1")
@commands.command(hidden=True)
async def global_ban(self,ctx, id:int, *, reason:str = "None"):
s = settings.find_one({"sid": 1})
if ctx.author.id not in s['moderators']:
await ctx.send("У вас нет прав модератора бота!")
return
try:
s['bl global chat'][str(id)].update({'ban':f'ban: {reason}'})
except Exception:
s['bl global chat'].update({str(id):{} })
s['bl global chat'][str(id)].update({'ban':f'ban: {reason}'})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` был забанен в межсерверном чате.")
@commands.command(usage = '[#channel]', description = 'Очистить голос. канал\каналы от пользователей.', help = 'Модерация', aliases = ['очистить_войс'])
async def voice_clean(self, ctx, channel:discord.VoiceChannel = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if channel != None:
for i in channel.members:
await i.move_to(channel=None)
await ctx.send("Голосовой канал был очищен от пользователей!")
else:
ch = []
for i in ctx.guild.channels:
if type(i) == discord.channel.VoiceChannel:
if len(i.members) > 0:
ch.append(i)
for c in ch:
for i in c.members:
await i.move_to(channel=None)
await ctx.send("Голосовой канал был очищен от пользователей!")
def setup(bot):
bot.add_cog(mod(bot))
| StarcoderdataPython |
3497247 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import configparser
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.utils import formatdate
import json
import urllib3
from app import redis_store, logger
import logging
from OpenSSL import crypto
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def scheduler_redis_job():
#print('Redis_job: updating data in redis DB')
logger.info('Redis_job: updating data in redis DB')
database_lxdservers_list = redis_store.keys('servers:*')
#print(database_lxdservers_list)
for serverkey in database_lxdservers_list:
lxdserver = json.loads(redis_store.get(serverkey))
all = []
try:
res = lxd_api_get_scheduler(lxdserver, 'instances')
for c in res.json()['metadata']:
all.append(c[15:]) # get instance name from api url
except Exception as e:
print(e)
#print(all)
if len(all) > 0:
for c in all:
res = lxd_api_get_scheduler(lxdserver, 'instances/' + c)
redis_store.set('server:' + lxdserver['name'] + ':instance:' + c + ':info', json.dumps(res.json()['metadata']))
# print(res.json()['metadata'])
res_state = lxd_api_get_scheduler(lxdserver, 'instances/' + c + '/state')
redis_store.set('server:' + lxdserver['name'] + ':instance:' + c + ':state', json.dumps(res_state.json()['metadata']))
def get_config():
"""
Get configuration for LXD from lxdconfig.conf file
:return: config object
"""
config = configparser.ConfigParser()
endpoint = ""
try:
config.read('lxdconfig.conf')
endpoint = config['remote']['endpoint']
cert = (config['remote']['cert_crt'], config['remote']['cert_key'])
verify = True
if config['remote']['verify'] == "False":
verify = False
except Exception as e:
# print("Wrong config.conf file.")
print("")
return {'endpoint': endpoint, 'cert': cert, 'verify': verify}
def send_request(subject, message, useremail=None):
"""
Send mail to admin and reply to user if usermail set
:param message:
:param subject:
:param useremail:
:return: status message
"""
config = configparser.ConfigParser()
config.read('lxdconfig.conf')
enabled = config['smtp']['enabled']
if enabled == 'True':
sender = config['smtp']['sender']
to = config['smtp']['recipient']
if config['smtp']['notify_user'] == 'True':
cc = useremail
else:
cc = None
# print("Sending email" + message + " subject: " + subject)
content = MIMEText(message, 'html')
try:
if cc is not None:
receivers = [cc] + [to]
else:
receivers = to
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
msg['Cc'] = cc
msg["Date"] = formatdate(localtime=True)
msg.attach(content)
mailserver = smtplib.SMTP(config['smtp']['server'], config['smtp']['port'], timeout=30)
mailserver.ehlo()
mailserver.starttls()
mailserver.ehlo()
mailserver.login(config['smtp']['login'], config['smtp']['password'])
try:
mailserver.send_message(msg, sender, receivers)
print("Successfully sent email")
return "Successfully sent email"
except:
return "Error: unable to send email"
finally:
mailserver.quit()
except smtplib.SMTPException:
print("Error: unable to send email")
return "Error: unable to send email"
def send_otp_email(key, useremail=None):
"""
Send mail to user with time based one time password
:param key:
:param useremail:
:return: status message
"""
config = configparser.ConfigParser()
config.read('lxdconfig.conf')
subject = 'Access key to ' + config['app']['production_name']
message = 'Your otp access key is: ' + str(key)
enabled = config['smtp']['enabled']
if enabled == 'True':
sender = config['smtp']['sender']
to = useremail
# print("Sending email" + " subject: " + subject + "message: " + message)
content = MIMEText(message, 'html')
try:
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
msg["Date"] = formatdate(localtime=True)
msg.attach(content)
mailserver = smtplib.SMTP(config['smtp']['server'], config['smtp']['port'], timeout=30)
mailserver.ehlo()
mailserver.starttls()
mailserver.ehlo()
mailserver.login(config['smtp']['login'], config['smtp']['password'])
try:
# mailserver.sendmail(sender, receivers, msg.as_string())
mailserver.send_message(msg, sender, to)
# print("Successfully sent email")
return "Successfully sent email"
finally:
mailserver.quit()
except smtplib.SMTPException:
# print("Error: unable to send email")
return "Error: unable to send email"
def lxd_api_get_scheduler(server, endpoint):
"""
Get function for LXD API
:param endpoint:
:return: response:
"""
cert = ('certs/' + server['key_public'], 'certs/' + server['key_private'])
r = requests.get(server['address'] + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=cert, timeout=100)
#print(r.text)
return r
def lxd_api_get(server, endpoint):
"""
Get function for LXD API
:param endpoint:
:return: response:
"""
cert = ('certs/' + server.key_public, 'certs/' + server.key_private)
r = requests.get(server.address + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=cert, timeout=100)
#print(r.text)
return r
def lxd_api_post(server, endpoint, data):
"""
Post function for LXD API
:param endpoint:
:param data:
:return: response:
"""
cert = ('certs/' + server.key_public, 'certs/' + server.key_private)
r = requests.post(server.address + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=cert, data=json.dumps(data))
#r = requests.post(get_config()['endpoint'] + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=get_config()['cert'], data=json.dumps(data))
#print(r.text)
return r
def lxd_api_put(server, endpoint, data):
"""
Put function for LXD API
:param endpoint:
:param data:
:return: response:
"""
cert = ('certs/' + server.key_public, 'certs/' + server.key_private)
r = requests.put(server.address + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=cert,
data=json.dumps(data))
#r = requests.put(get_config()['endpoint'] + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=get_config()['cert'], data=json.dumps(data))
#print(r.text)
return r
def lxd_api_patch(server, endpoint, data):
"""
Patch function for LXD API
:param endpoint:
:param data:
:return: response:
"""
cert = ('certs/' + server.key_public, 'certs/' + server.key_private)
r = requests.patch(server.address + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=cert,
data=json.dumps(data))
r = requests.patch(get_config()['endpoint'] + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=get_config()['cert'], data=json.dumps(data))
#print(r.text)
return r
def lxd_api_delete(server, endpoint):
"""
Delete function for LXD API
:param endpoint:
:return: response:
"""
cert = ('certs/' + server.key_public, 'certs/' + server.key_private)
r = requests.delete(server.address + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=cert)
#r = requests.delete(get_config()['endpoint'] + '/1.0/' + endpoint + '', verify=get_config()['verify'], cert=get_config()['cert'])
#print(r.text)
return r
def lxd_api_get_config(server):
"""
Get function for get LXD API config
:return: response:
"""
cert = ('certs/' + server.key_public, 'certs/' + server.key_private)
r = requests.get(server.address + '/1.0', verify=get_config()['verify'], cert=cert)
#r = requests.get(get_config()['endpoint'] + '/1.0', verify=get_config()['verify'], cert=get_config()['cert'])
#print(r.text)
return r
def lxd_remote_get():
"""
Get function for get LXD API to remote
:return: response:
"""
r = requests.get('https://uk.images.linuxcontainers.org' + '/1.0/images/aliases?recursion=1', timeout=10)
#print(r.text)
return r
def certificate_generator(
emailAddress="lxdmanager",
commonName="lxdmanager",
countryName="SK",
localityName="Slovakia",
stateOrProvinceName="Slovakia",
organizationName="lxdmanger",
organizationUnitName="lxdmanager",
serialNumber=0,
validityStartInSeconds=0,
validityEndInSeconds=10*365*24*60*60,
KEY_FILE="test_key.key",
CERT_FILE="test_key.crt"):
#can look at generated file using openssl:
#openssl x509 -inform pem -in selfsigned.crt -noout -text
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = countryName
cert.get_subject().ST = stateOrProvinceName
cert.get_subject().L = localityName
cert.get_subject().O = organizationName
cert.get_subject().OU = organizationUnitName
cert.get_subject().CN = commonName
cert.get_subject().emailAddress = emailAddress
cert.set_serial_number(serialNumber)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(validityEndInSeconds)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha512')
with open(CERT_FILE, "w") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
with open(KEY_FILE, "w") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode("utf-8"))
def send_cert_to_server(server_name='lxd', server_address='https:127.0.0.1:8443', server_password=''):
key_file = 'certs/' + server_name + '_key.key'
cert_file = 'certs/' + server_name + '_key.crt'
certificate_generator(KEY_FILE=key_file, CERT_FILE=cert_file)
with open(cert_file, 'r') as f:
cert = ''.join(f.readlines()[1:-1])
f.close()
data = {
"type": "client",
"certificate": cert,
"name": server_name,
"password": <PASSWORD>
}
#print(data)
data = json.dumps(data)
r = requests.post(server_address + '/1.0/' + 'certificates' + '', data=data, verify=False)
print(r.text)
return r
| StarcoderdataPython |
12843502 | <gh_stars>0
# https://docs.microsoft.com/en-us/windows/python/beginners
# https://developers.google.com/identity/protocols/oauth2/service-account#python
from __future__ import print_function
from pathlib import Path
from googleapiclient.discovery import build
from google.oauth2 import service_account
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
HOME_PATH = str(Path.home())
SERVICE_ACCOUNT_FILE = HOME_PATH + '/devkey/devhkmci-gmaildomainwide-1d7640a0c6d2.json'
def main():
DELEGATE='<EMAIL>' # Service account will impersonate this user. Must have proper admin privileges in G Suite.
# TARGET='dev.hkmci.com' # Service account wants to access data from this.
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
credentials_delegated = credentials.with_subject(DELEGATE)
service = build('gmail', 'v1', credentials=credentials_delegated)
# Call the Gmail API
results = service.users().getProfile(userId='me').execute()
print(results)
results = service.users().labels().list(userId='me').execute()
print(results)
# labels = results.get('labels', [])
# for label in labels:
# print(label['name'])
# if not labels:
# print('No labels found.')
# else:
# print('Labels:')
# for label in labels:
# print(label['name'])
if __name__ == '__main__':
main()
# [END gmail_quickstart]
| StarcoderdataPython |
3366661 | <filename>app/models.py
from app import create_app,db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__='users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True, index =True)
password_hash = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
post = db.relationship("Post", backref="user", lazy = "dynamic")
comment = db.relationship("Comments", backref="user", lazy = "dynamic")
@property
def password(self):
raise AttributeError('You can not read the Password Attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return 'User {}'.format(self.username)
class Post(db.Model):
__tablename__ = 'post'
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50))
subtitle = db.Column(db.String(50))
author = db.Column(db.String(20))
date_posted = db.Column(db.DateTime)
blog_post = db.Column(db.Text)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
comment = db.relationship("Comments", backref="post", lazy = "dynamic")
def save_post(self):
db.session.add(self)
db.session.commit()
@classmethod
def clear_post(cls):
Post.all_post.clear()
def get_pitches(id):
post = Post.query.all()
return post
class Comments(db.Model):
__tablename__ = 'comments'
id = db.Column(db. Integer, primary_key=True)
comment_id = db.Column(db.String(255))
date_posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
post_id = db.Column(db.Integer, db.ForeignKey("post.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(self, id):
comment = Comments.query.order_by(Comments.date_posted.desc()).filter_by(post_id=id).all()
return comment | StarcoderdataPython |
19204 | import torch
import hcat.lib.functional
from hcat.lib.functional import IntensityCellReject
from hcat.backends.backend import Backend
from hcat.models.r_unet import embed_model as RUnet
from hcat.train.transforms import median_filter, erosion
import hcat.lib.utils
from hcat.lib.utils import graceful_exit
import os.path
import wget
from typing import Dict, Optional
class SpatialEmbedding(Backend):
def __init__(self,
sigma: Optional[torch.Tensor] = torch.tensor([0.02, 0.02, 0.02]),
device: Optional[str] = 'cuda',
model_loc: Optional[str] = None,
postprocessing: Optional[bool] = True,
scale: Optional[int] = 25,
figure: Optional[str] = None,
archetecture: Optional[RUnet] = RUnet):
"""
Initialize Spatial embedding Algorithm.
:param sigma: torch.Tensor[sigma_x, sigma_y, sigma_z] values for gaussian probability estimation.
:param device: String value for torch device by which to run segmentation backbone on.
:param model_loc: Path to trained model files.
:param postprocessing: Disable segmentation postprocessing, namely
:param scale: scale factor based on max diameter of object
:param figure: filename and path of diagnostic figure which may be rendered
"""
super(SpatialEmbedding, self).__init__()
self.url = 'https://github.com/buswinka/hcat/blob/master/modelfiles/spatial_embedding.trch?raw=true'
# self.url = None
self.scale = torch.tensor(scale)
self.device = device
self.sigma = sigma.to(device)
self.postprocessing = postprocessing
self.figure = figure
if self.url:
self.model = self._model_loader_url(self.url, archetecture, device)
else:
self.model = self._model_loader_path(model_loc, archetecture, device)
self.vector_to_embedding = torch.jit.script(
hcat.lib.functional.VectorToEmbedding(scale=self.scale).requires_grad_(False).eval())
self.embedding_to_probability = torch.jit.script(
hcat.lib.functional.EmbeddingToProbability(scale=self.scale).requires_grad_(False).eval())
self.estimate_centroids = hcat.lib.functional.EstimateCentroids(scale=self.scale).requires_grad_(False)
self.filter = median_filter(kernel_targets=3, rate=1, device=device)
self.binary_erosion = erosion(device=device)
self.intensity_rejection = IntensityCellReject()
self.nms = hcat.lib.functional.nms().requires_grad_(False)
self.centroids = None
self.vec = None
self.embed = None
self.prob = None
@graceful_exit('\x1b[1;31;40m' + 'ERROR: Spatial Embedding Failed. Aborting...' + '\x1b[0m')
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Inputs an image and outputs a probability mask of everything seen in the image.
.. note::
Call the module as a function to execute this method (similar to torch.nn.module).
.. warning:
Will not raise an error upon failure, instead returns None and prints to standard out
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> import torch
>>> backend = SpatialEmbedding()
>>> image = torch.load('path/to/my/image.trch')
>>> assert image.ndim == 5 # Shape should be [B, C, X, Y, Z]
>>> masks = backend(image)
:param image: [B, C=4, X, Y, Z] input image
:return: [B, 1, X, Y, Z] output segmentation mask where each pixel value is a cell id (0 is background)
"""
assert image.ndim == 5
assert image.shape[1] == 1
assert image.min() >= -1
assert image.max() <= 1
# image = self.filter(image.to(self.device))
image = image.to(self.device)
b, c, x, y, z = image.shape
if self.image_reject and self._is_image_bad(image):
return torch.zeros((b, 0, x, y, z), device=self.device)
# Evaluate Neural Network Model
out: torch.Tensor = self.model(image)
# Assign Outputs
probability_map = out[:, [-1], ...]
out = out[:, 0:3:1, ...]
self.prob = probability_map.cpu()
self.vec = out.cpu()
out: torch.Tensor = self.vector_to_embedding(out)
self.embed = out.cpu()
centroids: Dict[str, torch.Tensor] = self.estimate_centroids(out, probability_map)
self.centroids = centroids
out: torch.Tensor = self.embedding_to_probability(out, centroids, self.sigma)
# Reject cell masks that overlap or meet min Myo7a criteria
if self.postprocessing:
out: torch.Tensor = self.intensity_rejection(out, image)
# print(centroids.shape, out.shape)
if out.numel() == 0:
return torch.zeros((b, 0, x, y, z), device=self.device)
ind = self.nms(out, 0.5)
out = out[:, ind, ...]
# Take probabilities and generate masks!
probability_map = probability_map.lt(0.8).squeeze(1)
for i in range(out.shape[1]):
out[:, i, ...][probability_map] = 0
self.zero_grad()
return out
def load(self, model_loc: str) -> None:
"""
Initializes model weights from a url or filepath.
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> backend = SpatialEmbedding()
>>>
>>> url = 'https://www.model_location.com/model.trch'
>>> backend.load(url) # Works with url
>>>
>>> model_path = 'path/to/my/model.trch'
>>> backend.load(model_path) # Also works with path
:param model_loc: url or filepath
:return: None
"""
if self._is_url(model_loc):
return self._model_loader_url(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
else:
return self._model_loader_path(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
| StarcoderdataPython |
3587244 | <reponame>marcosolvr/Uri-Problem-Solutions<filename>Python/1003 - Soma Simples.py
a = int(input())
b = int(input())
operation = a + b
print('SOMA = %i' %operation) | StarcoderdataPython |
1882677 | <filename>tests/test_models.py
from typing import OrderedDict
import pytest
from pygitguardian.models import (
Document,
DocumentSchema,
HealthCheckResponseSchema,
Match,
MatchSchema,
MultiScanResult,
MultiScanResultSchema,
PolicyBreak,
PolicyBreakSchema,
Quota,
QuotaResponse,
QuotaResponseSchema,
QuotaSchema,
ScanResult,
ScanResultSchema,
)
class TestModel:
def test_document_model(self):
"""
GIVEN a simple document
THEN base model methods should produce the appropriate types.
"""
document = Document("hello", "hello")
assert isinstance(document.to_json(), str)
assert isinstance(document.to_dict(), dict)
assert isinstance(str(document), str)
@pytest.mark.parametrize(
"schema_klass, expected_klass, instance_data",
[
(DocumentSchema, OrderedDict, {"filename": "hello", "document": "hello"}),
(
HealthCheckResponseSchema,
OrderedDict,
{"detail": "hello", "status_code": 200},
),
(MatchSchema, Match, {"match": "hello", "type": "hello"}),
(
MultiScanResultSchema,
MultiScanResult,
{
"scan_results": [
{
"policy_break_count": 1,
"policies": ["pol"],
"policy_breaks": [
{
"type": "break",
"policy": "mypol",
"matches": [
{
"match": "hello",
"type": "hello",
}
],
}
],
}
],
"type": "hello",
},
),
(
PolicyBreakSchema,
PolicyBreak,
{
"type": "hello",
"policy": "hello",
"validity": "hey",
"matches": [{"match": "hello", "type": "hello"}],
},
),
(
QuotaSchema,
Quota,
{
"count": 1,
"limit": 1,
"remaining": 1,
"since": "2021-04-18",
},
),
(
QuotaResponseSchema,
QuotaResponse,
{
"content": {
"count": 1,
"limit": 1,
"remaining": 1,
"since": "2021-04-18",
}
},
),
(
ScanResultSchema,
ScanResult,
{"policy_break_count": 1, "policy_breaks": [], "policies": []},
),
],
)
def test_schema_loads(self, schema_klass, expected_klass, instance_data):
"""
GIVEN the right kwargs and an extra field in dict format
WHEN loading using the schema
THEN the extra field should be excluded
AND the result should be an instance of the expected class
"""
schema = schema_klass()
data = {**instance_data, "field": "extra"}
obj = schema.load(data)
assert isinstance(obj, expected_klass)
| StarcoderdataPython |
4927765 | <reponame>abhinavkbij/machine_flow
# *************** MODEL BUILDING FUNCTIONS *******************
# add libraries
import json
from Python_Scripts.utility_functions import *
from sklearn import linear_model
def run_linear_regression(read_path, write_path):
X_train, X_test, Y_train, Y_test = read_split_sets(read_path)
lm = linear_model.LinearRegression()
lm.fit(X_train, Y_train)
Y_pred = lm.predict(X_test)
write_model_results(write_path, Y_pred, Y_test)
return('done') | StarcoderdataPython |
11219922 | # coding: utf-8
import random
import time
import json
import requests
from db import REDIS
import pymysql
REPO_SHOW = '1'
REPO_HIDDEN = '0'
SEARCH_API = 'https://api.github.com/search/repositories?q=%s&sort=updated&order=desc&page=%s'
def LoadUserAgents(uafile):
uas = []
with open(uafile, 'rb') as uaf:
for ua in uaf.readlines():
if ua:
uas.append(ua.strip()[:-1])
random.shuffle(uas)
return uas
uas = LoadUserAgents("user_agents.txt")
head = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://space.bilibili.com/45388',
'Origin': 'http://space.bilibili.com',
'Host': 'space.bilibili.com',
'AlexaToolbar-ALX_NS_PH': 'AlexaToolbar/alx-4.0',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4',
'Accept': 'application/json, text/javascript, */*; q=0.01',
}
def search_github(keyword):
ua = random.choice(uas)
head = {
'User-Agent': ua
}
# 爬取 20 页最新的列表
for i in range(1, 21):
res = requests.get(SEARCH_API % (keyword, i),headers=head)
repo_list = res.json()['items']
for repo in repo_list:
repo_name = repo['html_url'].strip()
desc = {
'repo_desc': repo['description'],
'star': repo['stargazers_count'],
'is_show': REPO_SHOW
}
ticks=str(time.time()).replace('.', '')
# Please write your MySQL's information.
try:
conn = pymysql.connect(
host='localhost', user='root', passwd='<PASSWORD>', db='spider', charset='utf8')
cur = conn.cursor()
cur.execute('INSERT INTO github(`id`,`respo_name`, `desc`, `stars`, `is_show`) \
VALUES ("%s","%s","%s","%s","%s")'
%
(ticks,repo_name.strip(),str(desc['repo_desc']).replace("\"","\"\""),desc['star'],desc['is_show'] ))
print(ticks,',',repo_name)
conn.commit()
except Exception as e:
print(e)
# if REDIS.hsetnx('repos', repo_name.strip(), json.dumps(desc)):
# print(repo_name)
time.sleep(10)
if __name__ == '__main__':
keywords = ['爬虫', 'spider', 'crawl','批量']
REDIS.set('keywords', ' '.join(keywords))
for keyword in keywords:
search_github(keyword)
| StarcoderdataPython |
8031597 | # -*- coding:utf-8 -*-
# author : gfjiangly
# time : 2019/5/17 10:17
# e-mail : <EMAIL>
# software : PyCharm
import os
import json
from tqdm import tqdm
import numpy as np
import cv2
from cvtools.utils.file import get_files_list
class BDD2COCO:
def __init__(self, bdd_root, phase, save_name='custom', path_replace=None):
self.bdd_root = bdd_root
self.phase = phase
self.save_name = save_name
self.path_replace = path_replace
# 这里是我需要的10个类别
self.categorys = ['car', 'bus', 'person', 'bike', 'truck',
'motor', 'rider', 'traffic sign', 'traffic light']
self.categorys_map = {}
self.json_files = get_files_list(self.bdd_root+'labels/100k/'+self.phase, file_type='json')
self.imageID = 1
self.annID = 1
self.coco_dataset = {
"info": {
"description": "bdd to coco dataset format.",
"url": "http://www.gfjiang.com",
"version": "0.1", "year": 2019,
"contributor": "jiang",
"date_created": "2019-05-09 09:11:52.357475"
},
"categories": [],
"images": [], "annotations": []
}
def parse_json(self, json_file):
"""
:param json_file: BDD00K数据集的一个json标签文件
:return:
返回一个,存储了一个json文件里面的方框坐标及其所属的类,
形如:image_name, [[325.0, 342.0, 376.0, 384.0, 'car'], ...]
"""
objs = []
obj = []
info = json.load(open(json_file))
image_name = info['name']
objects = info['frames'][0]['objects']
for i in objects:
if i['category'] in self.categorys:
obj.append(int(i['box2d']['x1']))
obj.append(int(i['box2d']['y1']))
obj.append(int(i['box2d']['x2']))
obj.append(int(i['box2d']['y2']))
obj.append(i['category'])
objs.append(obj)
obj = []
return image_name, objs
def convert(self):
for cls_id, cls_name in enumerate(self.categorys, start=1):
self.categorys_map[cls_name] = cls_id
self.coco_dataset['categories'].append({
'id': cls_id,
'name': cls_name,
'supercategory': cls_name
})
for json_file in tqdm(self.json_files):
image_name, objects = self.parse_json(json_file)
image_name = self.bdd_root+'images/100k/'+self.phase+'/'+image_name+'.jpg'
im = cv2.imdecode(np.fromfile(image_name, dtype=np.uint8), cv2.IMREAD_COLOR)
if im is None:
print('Waring: !!!can\'t read %s, continue this image' % image_name)
continue
height, width, _ = im.shape
# 添加图像的信息到dataset中
for key, value in self.path_replace.items():
image_name = image_name.replace(key, value)
self.coco_dataset["images"].append({
'file_name': image_name,
'id': self.imageID,
'width': width,
'height': height
})
for bbox in objects:
cls_name = bbox[4]
x1, y1, x2, y2 = bbox[:4]
width = max(0., x2 - x1)
height = max(0., y2 - y1)
self.coco_dataset['annotations'].append({
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': self.categorys_map[cls_name], # 0 for backgroud
'id': self.annID,
'image_id': self.imageID,
'iscrowd': 0,
# mask, 矩形是从左上角点按顺时针的四个顶点
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
})
self.annID += 1
self.imageID += 1
self.save_json()
def save_json(self):
# 保存结果的文件夹
folder = os.path.join(self.bdd_root, 'annotations')
if not os.path.exists(folder):
os.makedirs(folder)
json_name = os.path.join(self.bdd_root, 'annotations/{name}_{phase}.json'.format(
name=self.save_name, phase=self.phase))
with open(json_name, 'w') as f:
json.dump(self.coco_dataset, f) # using indent=4 show more friendly
if __name__ == '__main__':
name = 'custom'
root_path = 'F:/bdd/bdd100k/'
phase = 'train'
# path_replace = {'F:/头肩检测分类/data/': '/home/arc-fsy8515/data/',
# '/root/data/': '/home/arc-fsy8515/data/',
# 'E:/data': '/home/arc-fsy8515/data/'}
path_replace = {'F:/': '/media/data1/jgf/',
'/root/data/': '/media/data1/jgf/',
'E:/data': '/media/data1/jgf/'}
bdd2coco = BDD2COCO(root_path, phase, save_name=name, path_replace=path_replace)
bdd2coco.convert()
| StarcoderdataPython |
357438 | from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
class BackupStorage(S3BotoStorage):
access_key = getattr(settings, 'BACKUP_BUCKET_AWS_ACCESS_KEY_ID')
secret_key = getattr(settings, 'BACKUP_BUCKET_AWS_SECRET_ACCESS_KEY')
bucket_name = getattr(settings, 'BACKUP_BUCKET_BUCKET_NAME')
location = getattr(settings, 'BACKUP_BUCKET_LOCATION')
| StarcoderdataPython |
11302717 | <reponame>cyberworm/ircbot
import socket
import unittest
from mock import Mock
from ircbot.ircsocket import IrcSocket
from ircbot.exceptions import TimeoutError
class TestIrcSocket(unittest.TestCase):
def setUp(self):
self.__socketMock = Mock(socket.socket())
self.__ircSocket = IrcSocket(self.__socketMock)
def tearDown(self):
del self.__ircSocket
del self.__socketMock
def test_connect_should_call_connect_socket_method_with_server_and_port(self):
server = 'irc.rizon.net'
port = 6667
self.__ircSocket.connect(server, port)
self.__socketMock.connect.assert_called_once_with((server, port))
def test_disconnect_should_call_close_socket_method(self):
self.__ircSocket.disconnect()
self.__socketMock.close.assert_called_once_with()
def test_send_should_call_send_socket_method_with_content(self):
self.__ircSocket.send('some content')
self.__socketMock.send.assert_called_once_with('some content')
def test_receive_should_call_recv_socket_method_and_return_received_content(self):
expected = 'Hello World'
self.__socketMock.recv.return_value = expected
content = self.__ircSocket.receive()
self.assertEqual(content, 'Hello World', 'Expected: %s, got: %s' % (expected, content))
self.__socketMock.recv.assert_called_once_with(4096)
def test_receive_should_call_recv_socket_method_and_raise_TimeoutError(self):
expected = 'some timeout'
self.__socketMock.recv.side_effect = socket.timeout('some timeout')
try:
self.__ircSocket.receive()
except TimeoutError as timeout:
self.assertEqual(timeout.message, expected, 'Expected: %s, got: %s' % (expected, timeout.message))
self.__socketMock.recv.assert_called_once_with(4096)
def test_set_timeout_should_call_set_timeout_socket_method_with_timeout_in_seconds(self):
self.__ircSocket.set_timeout(300)
self.__socketMock.settimeout.assert_called_once_with(300)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
318605 | <reponame>isudox/leetcode-solution
"""1007. Minimum Domino Rotations For Equal Row
https://leetcode.com/problems/minimum-domino-rotations-for-equal-row/
"""
from typing import List
class Solution:
def minDominoRotations(self, tops: List[int], bottoms: List[int]) -> int:
def rotate(l1: List[int], x: int, is_top: bool) -> int:
nums = bottoms if is_top else tops
for pos in l1:
if nums[pos] != x:
return -1
return len(l1)
n = len(tops)
store1 = [[] for _ in range(7)]
store2 = [[] for _ in range(7)]
for i in range(n):
store1[tops[i]].append(i)
store2[bottoms[i]].append(i)
ans = n + 1
for i in range(1, 7):
tmp = 0
ok = True
for j in range(1, 7):
if j != i:
ret = rotate(store1[j], i, True)
if ret == -1:
ok = False
break
tmp += ret
if ok:
ans = min(ans, tmp)
for i in range(1, 7):
tmp = 0
ok = True
for j in range(1, 7):
if j != i:
ret = rotate(store2[j], i, False)
if ret == -1:
ok = False
break
tmp += ret
if ok:
ans = min(ans, tmp)
return ans if ans <= n else -1
| StarcoderdataPython |
164543 | <gh_stars>100-1000
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
from absl.testing import absltest
import mock
import webtest
from google.appengine.api import users
from google.appengine.ext import db
from cauliflowervest import settings as base_settings
from cauliflowervest.server import crypto
from cauliflowervest.server import main as gae_main
from cauliflowervest.server import permissions
from cauliflowervest.server import service_factory
from cauliflowervest.server import services
from cauliflowervest.server import settings
from cauliflowervest.server import util
from cauliflowervest.server.handlers import test_util
from cauliflowervest.server.models import base
from cauliflowervest.server.models import firmware
class AppleFirmwareHandlerTest(test_util.BaseTest):
def setUp(self):
super(AppleFirmwareHandlerTest, self).setUp()
self.testapp = webtest.TestApp(gae_main.app)
inventory = service_factory.GetInventoryService()
inventory.GetAssetTagsFromUploadRequest = mock.Mock(return_value=['111'])
inventory.GetMetadataUpdates = mock.Mock(return_value={})
def tearDown(self):
service_factory.inventory_service = None
super(AppleFirmwareHandlerTest, self).tearDown()
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testUpload(self):
password = '<PASSWORD>'
hostname = 'host1'
serial = 'SERIAL'
self.testapp.put(
'/apple_firmware/?volume_uuid=%s&hostname=%s&platform_uuid=ID1' % (
serial, hostname),
params=password, status=httplib.OK)
passwords = firmware.AppleFirmwarePassword.all().fetch(None)
self.assertEqual(1, len(passwords))
self.assertEqual(password, passwords[0].password)
self.assertEqual(serial, passwords[0].target_id)
self.assertEqual(hostname, passwords[0].hostname)
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testRetrieval(self):
password = '<PASSWORD>'
hostname = 'host1'
serial = 'SERIAL'
firmware.AppleFirmwarePassword(
serial=serial, hostname=hostname, password=password, owners=['stub7'],
platform_uuid='ID1',
).put()
resp = util.FromSafeJson(
self.testapp.get('/apple_firmware/SERIAL', status=httplib.OK).body)
self.assertEqual(password, resp['passphrase'])
self.assertEqual(serial, resp['volume_uuid'])
class AppleFirmwarePasswordChangeOwnerTest(test_util.BaseTest):
"""Test the apple_firmware.AppleFirmwarePasswordChangeOwner class."""
def setUp(self):
super(AppleFirmwarePasswordChangeOwnerTest, self).setUp()
settings.KEY_TYPE_DEFAULT_FILEVAULT = settings.KEY_TYPE_DATASTORE_FILEVAULT
settings.KEY_TYPE_DEFAULT_XSRF = settings.KEY_TYPE_DATASTORE_XSRF
self.user = base.User(
key_name='<EMAIL>', user=users.User('<EMAIL>'))
self.user.apple_firmware_perms = [permissions.CHANGE_OWNER]
self.user.put()
self.manufacturer = 'SampleManufacturer'
self.serial = 'XX123456'
self.platform_uuid = 'A4E75A65-FC39-441C-BEF5-49D9A3DC6BE0'
self.volume_id = self._EscrowPassphrase('<PASSWORD>')
self.testapp = webtest.TestApp(gae_main.app)
def _EscrowPassphrase(self, passphrase):
fvv = firmware.AppleFirmwarePassword(
serial=self.serial,
password=<PASSWORD>,
hostname='somehost.local',
platform_uuid=self.platform_uuid,
created_by=users.User('<EMAIL>'))
return fvv.put()
@property
def change_owner_url(self):
return '/api/internal/change-owner/apple_firmware/%s/' % (self.volume_id)
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testChangeOwner(self):
self.testapp.post(self.change_owner_url, params={'new_owner': 'mew'},
status=httplib.OK)
self.assertEqual(
['<EMAIL>'], firmware.AppleFirmwarePassword.GetLatestForTarget(
self.serial).owners)
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testChangeOwnerForBadVolumeId(self):
self.volume_id = 'bad-uuid'
self.testapp.post(self.change_owner_url, params={'new_owner': 'mew'},
status=httplib.NOT_FOUND)
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testChangeOwnerForNonexistantUuid(self):
self.volume_id = db.Key.from_path('Testing', 'NonExistKeyTesting')
self.testapp.post(self.change_owner_url, params={'new_owner': 'mew'},
status=httplib.NOT_FOUND)
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testChangeOwnerForInactiveEntity(self):
_ = self._EscrowPassphrase('<PASSWORD>')
self.testapp.post(self.change_owner_url, params={'new_owner': 'mew'},
status=httplib.BAD_REQUEST)
def testChangeOwnerWithoutValidXsrf(self):
self.testapp.post(self.change_owner_url, params={'new_owner': 'mew'},
status=httplib.FORBIDDEN)
@mock.patch.dict(settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
def testChangeOwnerWithoutPermission(self):
self.user.apple_firmware_perms = []
self.user.put()
self.testapp.post(self.change_owner_url, params={'new_owner': 'mew'},
status=httplib.FORBIDDEN)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
11364219 | <filename>admin_panel/api/serializers.py
from rest_framework import serializers
from admin_panel.models import Category, Highlights
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ['title', 'img']
class HighlightsSerializer(serializers.ModelSerializer):
class Meta:
model = Highlights
fields = ['title', 'img']
| StarcoderdataPython |
3320693 | from mindpile.Mapping.types import OutPort
from mindpile.Mapping.utils import MethodCall, Requires, Setup
@Setup
def largeMotorSetup():
return '''
from ev3dev2.motor import LargeMotor
'''
@MethodCall(target="MotorStop.vix", MotorPort=OutPort, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorStop():
return '''
m(MotorPort, LargeMotor).stop(stop_action=("hold" if BrakeAtEnd else "coast"))
'''
@MethodCall(target="MotorUnlimited.vix", MotorPort=OutPort, Speed=float)
@Requires(largeMotorSetup)
def largeMotorUnlimited():
return '''
m(MotorPort, LargeMotor).on(speed=Speed)
'''
@MethodCall(target="MotorTime.vix", MotorPort=OutPort, Speed=float, Seconds=float, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorSeconds():
return '''
m(MotorPort, LargeMotor).on_for_seconds(speed=Speed, seconds=Seconds, brake=BrakeAtEnd)
'''
@MethodCall(target="MotorDistance.vix", MotorPort=OutPort, Speed=float, Degrees=float, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorDegrees():
return '''
m(MotorPort, LargeMotor).on_for_degrees(speed=Speed, degrees=Degrees, brake=BrakeAtEnd)
'''
@MethodCall(target="MotorDistanceRotations.vix", MotorPort=OutPort, Speed=float, Rotations=float, BrakeAtEnd=bool)
@Requires(largeMotorSetup)
def largeMotorRotations():
return '''
m(MotorPort, LargeMotor).on_for_rotations(speed=Speed, rotations=Rotations, brake=BrakeAtEnd)
'''
| StarcoderdataPython |
3444413 | <gh_stars>1-10
# didn't use debayan's code as base so i reimplemented the example triangle for reference
def triangle_example(total_lines):
for current_line in range(total_lines):
for space in range(0, current_line, 1):
print(" ", end="")
for number in range(total_lines - current_line - 1, -1, -1):
print(number, end="")
print()
print("\n")
def triangle_1(total_lines):
for current_line in range(total_lines):
for space in range(0, total_lines - current_line - 1, 1):
print(" ", end="")
for number in range(current_line, -1, -1):
print(number, end="")
print()
print("\n")
def triangle_2(total_lines):
for current_line in range(total_lines):
for number in range(0, current_line + 1, 1):
print(number, end="")
for space in range(0, total_lines - current_line - 1, 1):
print(" ", end="")
print()
print("\n")
def triangle_3(total_lines):
for current_line in range(total_lines):
for number in range(0, total_lines - current_line, 1):
print(number, end="")
for space in range(0, current_line, 1):
print(" ", end="")
print()
print("\n")
def diagonal(total_lines):
# it's the same as the code provided in 3-1 except the order of iteration of i is reversed
for i in range(total_lines - 1, -1, -1):
for j in range(0, i):
print(" ", end="")
print(i)
# triangle_example(5)
triangle_1(5) # Triangle A
triangle_2(5) # Triangle B
triangle_3(5) # Triangle C
diagonal(5) # Diagonal for D
| StarcoderdataPython |
3329314 | # -*- coding: utf-8 -*-
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TaxReportTest(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.test_country_1 = cls.env['res.country'].create({
'name': "The Old World",
'code': 'YY',
})
cls.test_country_2 = cls.env['res.country'].create({
'name': "The Principality of Zeon",
'code': 'ZZ',
})
cls.test_country_3 = cls.env['res.country'].create({
'name': "Alagaësia",
'code': 'QQ',
})
cls.tax_report_1 = cls.env['account.tax.report'].create({
'name': "Tax report 1",
'country_id': cls.test_country_1.id,
})
cls.tax_report_line_1_1 = cls.env['account.tax.report.line'].create({
'name': "[01] Line 01",
'tag_name': '01',
'report_id': cls.tax_report_1.id,
'sequence': 2,
})
cls.tax_report_line_1_2 = cls.env['account.tax.report.line'].create({
'name': "[01] Line 02",
'tag_name': '02',
'report_id': cls.tax_report_1.id,
'sequence': 3,
})
cls.tax_report_line_1_3 = cls.env['account.tax.report.line'].create({
'name': "[03] Line 03",
'tag_name': '03',
'report_id': cls.tax_report_1.id,
'sequence': 4,
})
cls.tax_report_line_1_4 = cls.env['account.tax.report.line'].create({
'name': "[04] Line 04",
'report_id': cls.tax_report_1.id,
'sequence': 5,
})
cls.tax_report_line_1_5 = cls.env['account.tax.report.line'].create({
'name': "[05] Line 05",
'report_id': cls.tax_report_1.id,
'sequence': 6,
})
cls.tax_report_line_1_55 = cls.env['account.tax.report.line'].create({
'name': "[55] Line 55",
'tag_name': '55',
'report_id': cls.tax_report_1.id,
'sequence': 7,
})
cls.tax_report_line_1_6 = cls.env['account.tax.report.line'].create({
'name': "[100] Line 100",
'tag_name': '100',
'report_id': cls.tax_report_1.id,
'sequence': 8,
})
cls.tax_report_2 = cls.env['account.tax.report'].create({
'name': "Tax report 2",
'country_id': cls.test_country_1.id,
})
cls.tax_report_line_2_1 = cls.env['account.tax.report.line'].create({
'name': "[01] Line 01, but in report 2",
'tag_name': '01',
'report_id': cls.tax_report_2.id,
'sequence': 1,
})
cls.tax_report_line_2_2 = cls.env['account.tax.report.line'].create({
'name': "[02] Line 02, but in report 2",
'report_id': cls.tax_report_2.id,
'sequence': 2,
})
cls.tax_report_line_2_42 = cls.env['account.tax.report.line'].create({
'name': "[42] Line 42",
'tag_name': '42',
'report_id': cls.tax_report_2.id,
'sequence': 3,
})
cls.tax_report_line_2_6 = cls.env['account.tax.report.line'].create({
'name': "[100] Line 100",
'tag_name': '100',
'report_id': cls.tax_report_2.id,
'sequence': 4,
})
def _get_tax_tags(self, tag_name=None):
domain = [('country_id', '=', self.test_country_1.id), ('applicability', '=', 'taxes')]
if tag_name:
domain.append(('name', 'like', '_' + tag_name ))
return self.env['account.account.tag'].search(domain)
def test_write_add_tagname(self):
""" Adding a tag_name to a line without any should create new tags.
"""
tags_before = self._get_tax_tags()
self.tax_report_line_2_2.tag_name = 'tournicoti'
tags_after = self._get_tax_tags()
self.assertEqual(len(tags_after), len(tags_before) + 2, "Two tags should have been created, +tournicoti and -tournicoti.")
def test_write_single_line_tagname(self):
""" Writing on the tag_name of a line with a non-null tag_name used in
no other line should overwrite the name of the existing tags.
"""
start_tags = self._get_tax_tags()
original_tag_name = self.tax_report_line_1_55.tag_name
original_tags = self.tax_report_line_1_55.tag_ids
self.tax_report_line_1_55.tag_name = 'Mille sabords !'
self.assertEqual(len(self._get_tax_tags(original_tag_name)), 0, "The original tag name of the line should not correspond to any tag anymore.")
self.assertEqual(original_tags, self.tax_report_line_1_55.tag_ids, "The tax report line should still be linked to the same tags.")
self.assertEqual(len(self._get_tax_tags()), len(start_tags), "No new tag should have been created.")
def test_write_single_line_remove_tagname(self):
""" Setting None as the tag_name of a line with a non-null tag_name used
in a unique line should delete the tags, also removing all the references to it
from tax repartition lines and account move lines
"""
test_tax = self.env['account.tax'].create({
'name': "<NAME>",
'amount_type': 'percent',
'amount': 25,
'type_tax_use': 'sale',
'invoice_repartition_line_ids': [
(0,0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0,0, {
'factor_percent': 100,
'repartition_type': 'tax',
'tag_ids': [(6, 0, self.tax_report_line_1_55.tag_ids[0].ids)],
}),
],
'refund_repartition_line_ids': [
(0,0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0,0, {
'factor_percent': 100,
'repartition_type': 'tax',
}),
],
})
test_invoice = self.env['account.move'].create({
'move_type': 'out_invoice',
'partner_id': self.partner_a.id,
'date': '1992-12-22',
'invoice_line_ids': [
(0, 0, {'quantity': 1, 'price_unit': 42, 'tax_ids': [(6, 0, test_tax.ids)]}),
],
})
test_invoice.action_post()
self.assertTrue(any(line.tax_tag_ids == self.tax_report_line_1_55.tag_ids[0] for line in test_invoice.line_ids), "The test invoice should contain a tax line with tag 55")
tag_name_before = self.tax_report_line_1_55.tag_name
tag_nber_before = len(self._get_tax_tags())
self.tax_report_line_1_55.tag_name = None
self.assertFalse(self.tax_report_line_1_55.tag_name, "The tag name for line 55 should now be None")
self.assertEqual(len(self._get_tax_tags(tag_name_before)), 0, "None of the original tags for this line should be left after setting tag_name to None if no other line was using this tag_name.")
self.assertEqual(len(self._get_tax_tags()), tag_nber_before - 2, "No new tag should have been created, and the two that were assigned to the report line should have been removed.")
self.assertFalse(test_tax.mapped('invoice_repartition_line_ids.tag_ids'), "There should be no tag left on test tax's repartition lines after the removal of tag 55.")
self.assertFalse(test_invoice.mapped('line_ids.tax_tag_ids'), "The link between test invoice and tag 55 should have been broken. There should be no tag left on the invoice's lines.")
def test_write_multi_no_change(self):
""" Writing the same tag_name as they already use on a set of tax report
lines with the same tag_name should not do anything.
"""
tags_before = self._get_tax_tags().ids
(self.tax_report_line_1_1 + self.tax_report_line_2_1).write({'tag_name': '01'})
tags_after = self._get_tax_tags().ids
self.assertEqual(tags_before, tags_after, "Re-assigning the same tag_name should keep the same tags.")
def test_edit_line_shared_tags(self):
""" Setting the tag_name of a tax report line sharing its tags with another line
should edit the tags' name and the tag_name of this other report line, to
keep consistency.
"""
original_tag_name = self.tax_report_line_1_1.tag_name
self.tax_report_line_1_1.tag_name = 'Groucha'
self.assertEqual(self.tax_report_line_2_1.tag_name, self.tax_report_line_1_1.tag_name, "Modifying the tag name of a tax report line sharing it with another one should also modify the other's.")
def test_edit_multi_line_tagname_all_different_new(self):
""" Writing a tag_name on multiple lines with distinct tag_names should
delete all the former tags and replace them by new ones (also on lines
sharing tags with them).
"""
lines = self.tax_report_line_1_1 + self.tax_report_line_2_2 + self.tax_report_line_2_42
previous_tag_ids = lines.mapped('tag_ids.id')
lines.write({'tag_name': 'crabe'})
new_tags = lines.mapped('tag_ids')
self.assertNotEqual(new_tags.ids, previous_tag_ids, "All the tags should have changed")
self.assertEqual(len(new_tags), 2, "Only two distinct tags should be assigned to all the lines after writing tag_name on them all")
surviving_tags = self.env['account.account.tag'].search([('id', 'in', previous_tag_ids)])
self.assertEqual(len(surviving_tags), 0, "All former tags should have been deleted")
self.assertEqual(self.tax_report_line_1_1.tag_ids, self.tax_report_line_2_1.tag_ids, "The report lines initially sharing their tag_name with the written-on lines should also have been impacted")
def test_remove_line_dependency(self):
""" Setting to None the tag_name of a report line sharing its tags with
other lines should only impact this line ; the other ones should keep their
link to the initial tags (their tag_name will hence differ in the end).
"""
tags_before = self.tax_report_line_1_1.tag_ids
self.tax_report_line_1_1.tag_name = None
self.assertEqual(len(self.tax_report_line_1_1.tag_ids), 0, "Setting the tag_name to None should have removed the tags.")
self.assertEqual(self.tax_report_line_2_1.tag_ids, tags_before, "Setting tag_name to None on a line linked to another one via tag_name should break this link.")
def test_tax_report_change_country(self):
""" Tests that duplicating and modifying the country of a tax report works
as intended (countries wanting to use the tax report of another
country need that).
"""
# Copy our first report
tags_before = self._get_tax_tags().ids
copied_report_1 = self.tax_report_1.copy()
copied_report_2 = self.tax_report_1.copy()
tags_after = self._get_tax_tags().ids
self.assertEqual(tags_before, tags_after, "Report duplication should not create or remove any tag")
for original, copy in zip(self.tax_report_1.get_lines_in_hierarchy(), copied_report_1.get_lines_in_hierarchy()):
self.assertEqual(original.tag_ids, copy.tag_ids, "Copying the lines of a tax report should keep the same tags on lines")
# Assign another country to one of the copies
copied_report_1.country_id = self.test_country_2
for original, copy in zip(self.tax_report_1.get_lines_in_hierarchy(), copied_report_1.get_lines_in_hierarchy()):
if original.tag_ids or copy.tag_ids:
self.assertNotEqual(original.tag_ids, copy.tag_ids, "Changing the country of a copied report should create brand new tags for all of its lines")
for original, copy in zip(self.tax_report_1.get_lines_in_hierarchy(), copied_report_2.get_lines_in_hierarchy()):
self.assertEqual(original.tag_ids, copy.tag_ids, "Changing the country of a copied report should not impact the other copies or the original report")
# Direclty change the country of a report without copying it first (some of its tags are shared, but not all)
original_report_2_tags = {line.id: line.tag_ids.ids for line in self.tax_report_2.get_lines_in_hierarchy()}
self.tax_report_2.country_id = self.test_country_2
for line in self.tax_report_2.get_lines_in_hierarchy():
if line == self.tax_report_line_2_42:
# This line is the only one of the report not sharing its tags
self.assertEqual(line.tag_ids.ids, original_report_2_tags[line.id], "The tax report lines not sharing their tags with any other report should keep the same tags when the country of their report is changed")
elif line.tag_ids or original_report_2_tags[line.id]:
self.assertNotEqual(line.tag_ids.ids, original_report_2_tags[line.id], "The tax report lines sharing their tags with other report should receive new tags when the country of their report is changed")
def test_unlink_report_line_tags(self):
""" Under certain circumstances, unlinking a tax report line should also unlink
the tags that are linked to it. We test those cases here.
"""
def check_tags_unlink(tag_name, report_lines, unlinked, error_message):
report_lines.unlink()
surviving_tags = self._get_tax_tags(tag_name)
required_len = 0 if unlinked else 2 # 2 for + and - tag
self.assertEqual(len(surviving_tags), required_len, error_message)
check_tags_unlink('42', self.tax_report_line_2_42, True, "Unlinking one line not sharing its tags should also unlink them")
check_tags_unlink('01', self.tax_report_line_1_1, False, "Unlinking one line sharing its tags with others should keep the tags")
check_tags_unlink('100', self.tax_report_line_1_6 + self.tax_report_line_2_6, True, "Unlinkink all the lines sharing the same tags should also unlink them")
| StarcoderdataPython |
9742101 | <reponame>danielSbastos/understanding-tests
from unittest import TestCase
from understanding_tests.user import User
class UserTestCase(TestCase):
def test_user_sets_email_and_username(self):
user = User('Daniel', 'Bastos', '18')
self.assertEqual(user.email(), '<EMAIL>')
self.assertEqual(user.username(), '18Daniel')
def test_user_rigth_string_repr(self):
user = User('Daniel', 'Bastos', '18')
self.assertEqual('<NAME>, 18', repr(user))
| StarcoderdataPython |
8017399 | """Bite 130. Analyze some basic Car Data."""
from collections import Counter
from typing import Set
import requests
CAR_DATA = "https://bites-data.s3.us-east-2.amazonaws.com/cars.json"
# pre-work: load JSON data into program
with requests.Session() as session:
data = session.get(CAR_DATA).json()
# TODO: write the code
def most_prolific_automaker(year: int) -> str:
"""Find most prolific automaker of the year.
Given year 'year' return the automaker that released
the highest number of new car models
"""
year_data = [datum["automaker"] for datum in data if (datum["year"] == year)]
counted = Counter(year_data)
return counted.most_common()[0][0]
def get_models(automaker: str, year: int) -> Set[str]:
"""Get models for automaker and year.
Filter cars 'data' by 'automaker' and 'year',
return a set of models (a 'set' to avoid duplicate models)
"""
filtered = [
datum["model"]
for datum in data
if datum["automaker"] == automaker and datum["year"] == year
]
return set(filtered)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.