id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
82010 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 03:48:59 2020
@author: lukepinkel
"""
import scipy as sp # analysis:ignore
import numpy as np # analysis:ignore
def difference_mat(k, order=2):
Dk = np.diff(np.eye(k), order, axis=0)
return Dk
def equispaced_knots(x, degree, ndx):
xl = np.min(x)
xr = np.max(x)
dx = (xr - xl) / ndx
order = degree + 1
lb = xl - order * dx
ub = xr + order * dx
knots = np.arange(xl - order * dx, xr + order * dx, dx)
return knots, order, lb, ub
def bspline(x, knots, degree, deriv=0):
if len(knots)<=(degree+1):
raise ValueError("Number of knots must be greater than order")
order = degree + 1
q = len(knots) - order
u = np.zeros(q)
B = np.zeros((len(x), q))
for i in range(q):
u[i] = 1
tck = (knots, u, degree)
B[:, i] = sp.interpolate.splev(x, tck, der=deriv)
u[i] = 0
return B[:, 1:]
def bspline_des(x, degree=3, ndx=20, deriv=0):
knots, order, _, _ = equispaced_knots(x, degree, ndx)
B = bspline(x, knots, degree, deriv)
return B | StarcoderdataPython |
6484034 | class Heuristics:
# 当可以在多个操作之间进行选择时始终选择第一个操作
@staticmethod
def select_first_operation(jobs_to_be_done, max_operations, _):
best_candidates = {}
for job in jobs_to_be_done:
current_activity = job.current_activity
best_operation = current_activity.shortest_operation
if best_candidates.get(best_operation.id_machine) is None:
best_candidates.update({best_operation.id_machine: [(current_activity, best_operation)]})
elif len(best_candidates.get(best_operation.id_machine)) < max_operations:
best_candidates.get(best_operation.id_machine).append((current_activity, best_operation))
else:
list_operations = best_candidates.get(best_operation.id_machine)
for key, (_, operation) in enumerate(list_operations):
if operation.duration < best_operation.duration:
list_operations.pop(key)
break
if len(list_operations) < max_operations:
list_operations.append((current_activity, best_operation))
return best_candidates
# LEPT 规则
@staticmethod
def longest_expected_processing_time_first(jobs_to_be_done, max_operations, current_time):
pass
# 剩余操作的最短剩余时间
# S/RO = [(期限时间 - 当前时间) - 总工序完成剩余时间] / 剩余操作数
@staticmethod
def shortest_slack_per_remaining_operations(jobs_to_be_done, max_operations, current_time):
pass
# 最高临界比例
# CR = 处理时间 / (期限时间 - 当前时间)
@staticmethod
def highest_critical_ratios(jobs_to_be_done, max_operations, current_time):
best_candidates = {}
critical_ratios = {}
assignment = {}
for job in jobs_to_be_done:
current_activity = job.current_activity
# 计算一个作业活动的每一个操作的临界比例
for operation in current_activity.next_operations:
critical_ratio = operation.duration / (job.total_shop_time - current_time)
critical_ratios.update({job.id_job: (current_activity, operation, critical_ratio)})
for id_job, current_activity, operation, critical_ratio in critical_ratios.items():
if assignment.get(operation.id_machine) is None:
assignment.update({operation.id_machine: (current_activity, operation, critical_ratio)})
elif len(assignment.get(operation.id_machine)) < max_operations:
list_operations = assignment.get(operation.id_machine)
list_operations.append((current_activity, operation, critical_ratio))
best_candidates.update({operation.id_machine: list_operations})
# TODO: end that
# 随机分配工件给机床
@staticmethod
def random_operation_choice(jobs_to_be_done, max_operations, _):
import random
best_candidates = {}
dict_operations = {}
for job in jobs_to_be_done:
current_activity = job.current_activity
for operation in current_activity.next_operations:
if dict_operations.get(operation.id_machine) is None:
dict_operations.update({operation.id_machine: [(current_activity, operation)]})
else:
dict_operations.get(operation.id_machine).append((current_activity, operation))
for machine, list_operations in dict_operations.items():
best_candidates.update({machine: list(
set([list_operations[random.randint(0, len(list_operations) - 1)] for _ in range(max_operations)]))})
return best_candidates
## 创建机器分配和操作顺序列表(待改进)
@staticmethod
def initialisation_list(jobs_to_be_done):
machine_assignment = []
operation_sequence = []
for job in jobs_to_be_done:
for activity in job.activities_to_be_done:
operation_sequence.append(job.id_job)
machine_assignment.append(activity.next_operations[0].id_machine)
print("已分配的机器 :")
for machine in machine_assignment:
print(str(machine))
print("工序操作序列 :")
for operation in operation_sequence:
print(operation)
| StarcoderdataPython |
1975959 | <filename>qmt/data/__init__.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .template import Data
from .geo_data import Geo2DData, Geo3DData
from .part_data import Part3DData
from .thomas_fermi_data import ThomasFermiData
from .schrodinger_poisson_data import SchrodingerPoissonData
from .mobility_data import MobilityData
from .data_utils import load_serial, store_serial, write_deserialised, serialised_file
| StarcoderdataPython |
3215089 | <filename>util/quick-compare.py
# What happened to the taxa in taxonomy 1, when taxonomy 1 was
# replaced by taxonomy 2?
import sys, os, json, argparse, csv
from org.opentreeoflife.taxa import Taxonomy, Nexson, Flag
def compare(t1, t2):
print 'comparing', t1, 'to', t2
retired = 0
became_hidden = 0
became_unhidden = 0
became_extinct = 0
became_unextinct = 0
became_suppressed = 0
became_unsuppressed = 0
kept = 0
novel = 0
tax1 = Taxonomy.getTaxonomy(t1, 'x')
tax1.inferFlags()
tax2 = Taxonomy.getTaxonomy(t2, 'x')
tax2.inferFlags()
for taxon in tax1.taxa():
probe = tax2.lookupId(taxon.id)
if probe == None:
retired += 1
elif probe.isAnnotatedHidden() and not taxon.isAnnotatedHidden():
became_hidden += 1
elif not probe.isAnnotatedHidden() and taxon.isAnnotatedHidden():
became_unhidden += 1
elif probe.isExtinct() and not taxon.isExtinct():
became_extinct += 1
elif not probe.isExtinct() and taxon.isExtinct():
became_unextinct += 1
elif probe.isHidden() and not taxon.isHidden():
became_suppressed += 1
elif not probe.isHidden() and taxon.isHidden():
became_unsuppressed += 1
else:
kept += 1
for taxon in tax2.taxa():
if tax1.lookupId(taxon.id) == None:
novel += 1
print
print 'id retired:', retired
print 'newly hidden:', became_hidden
print 'no longer hidden:', became_unhidden
print 'newly extinct:', became_extinct
print 'no longer extinct:', became_unextinct
print 'newly otherwise suppressed:', became_suppressed
print 'no longer otherwise suppressed:', became_unsuppressed
print 'new:', novel
print 'no change in status:', kept
compare(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
1760471 | <reponame>sjswerdloff/pymedphys
# Copyright (C) 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from pymedphys._imports import numpy as np
from pymedphys._imports import pandas as pd
#######################################################################################################################
# Summary: Define a function which assigns color values depending on whether or not the values between systems match.
# Input: Table of values which you wish to compare.
# Results: match = green
# mismatch = red
# uncomparable = yellow
def colour_results(val): # pylint: disable = too-many-return-statements
not_in = [
"field_type",
"machine",
"rx",
"technique",
"tolerance",
"modality",
"technique",
"couch_lat [cm]",
"couch_lng [cm]",
"couch_vrt [cm]",
]
# set any values which cannot accurately be compared as yellow (#FDFF8A)
if val.name in not_in:
return ["background-color: #FDFF8A", "background-color: #FDFF8A"]
# begin comparing everything else, if they match make green (#C1FFC1), else red (#EE6363)
elif isinstance(val[0], str) and isinstance(val[1], str):
if val[0].lower() == val[1].lower():
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
elif str(val[0]).strip() == "":
val[0] = 0
if val[0] == val[1]:
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
elif str(val[1]).strip() == "":
val[1] = 0
if val[0] == val[1]:
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
elif val[0] is None:
val[0] = 0
if val[0] == val[1]:
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
elif val[1] is None:
val[1] = 0
if val[0] == val[1]:
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
elif isinstance(val[0], datetime.date) or isinstance(val[1], datetime.date):
if val[0] == val[1]:
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
elif isinstance(val[0], float) and isinstance(val[1], str):
if val[0] == val[1]:
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
else:
if np.round(float(val[0]), 2) == np.round(float(val[1]), 2):
val[0] = np.round(float(val[0]), 2)
val[1] = np.round(float(val[1]), 2)
return ["background-color: #C1FFC1", "background-color: #C1FFC1"]
else:
return ["background-color: #EE6363", "background-color: #EE6363"]
#######################################################################################################################
# """
# Summary: Define a function which collects general prescription information for a patient.
# Results: Creates a pandas dataframe with 2 columns (one Dicom, one Mosaiq) for each prescription.
# """
def get_general_info(dicom_table, mos_table):
mosaiq_table = []
dic_table = []
general_info_data = []
used_prescriptions = []
general_info_index = []
general_info_columns = [
"mrn",
"first_name",
"last_name",
"site",
"total_dose",
"fraction_dose",
"fractions",
"target",
]
for field in dicom_table["field_label"]:
if dicom_table.iloc[int(field) - 1]["dose_reference"] not in used_prescriptions:
for label in general_info_columns:
mosaiq_table.append(mos_table.iloc[int(field) - 1][label])
dic_table.append(dicom_table.iloc[int(field) - 1][label])
general_info_index.append(
"Prescription "
+ str(dicom_table.iloc[int(field) - 1]["dose_reference"])
+ " DICOM"
)
general_info_index.append(
"Prescription "
+ str(dicom_table.iloc[int(field) - 1]["dose_reference"])
+ " Mosaiq"
)
used_prescriptions.append(
dicom_table.iloc[int(field) - 1]["dose_reference"]
)
general_info_data.append(dic_table)
general_info_data.append(mosaiq_table)
dic_table = []
mosaiq_table = []
else:
pass
general_info_df = pd.DataFrame(data=general_info_data, columns=general_info_columns)
general_info_df["dose_index"] = pd.Series(general_info_index).values
general_info_df = general_info_df.set_index("dose_index", drop=True)
general_info_df = general_info_df.transpose()
return general_info_df
#######################################################################################################################
#
# """
# Summary: Define a function which compares two dataframes and produces an excel spreadsheet of the results.
# Input: One dataframe from DICOM, one dataframe from Mosaiq.
# Results: Produces a dataframe giving a side by side comparison of the two systems
# """
def compare_to_mosaiq(dicom_table, mos_table):
values_table = pd.DataFrame()
to_be_compared = dicom_table.columns
mos_index = mos_table.columns
dicom_df = pd.DataFrame()
mosaiq_df = pd.DataFrame()
for field in range(len(dicom_table)):
for label in to_be_compared:
# check that the corresponding value exists in Mosaiq
if label in mos_index:
add_dicom = pd.DataFrame(
[dicom_table.iloc[field][label]], columns=[label]
)
add_mosaiq = pd.DataFrame(
[mos_table.iloc[field][label]], columns=[label]
)
dicom_df = pd.concat([dicom_df, add_dicom], axis=1)
mosaiq_df = pd.concat([mosaiq_df, add_mosaiq], axis=1)
# continue if the value is not in Mosaiq
else:
continue
values_table = values_table.append(dicom_df, ignore_index=True)
values_table = values_table.append(mosaiq_df, ignore_index=True)
dicom_df = pd.DataFrame()
mosaiq_df = pd.DataFrame()
values_index = []
for value in dicom_table[:]["field_name"]:
values_index.append(value + "_DICOM")
values_index.append(value + "_MOSAIQ")
values_table["beam_index"] = pd.Series(values_index).values
values_table = values_table.set_index("beam_index", drop=True)
# values_table = values_table.round(2)
return values_table
#######################################################################################################################
def weekly_check_colour_results(val):
failures = [
"Unverified Treatment",
"Partial Treatment",
"Treatment Overridden",
"New Field Delivered",
"Prescription Altered",
"Site Setup Altered",
]
failure_flag = 0
for failure in failures:
# begin comparing everything else, if they match make green (#C1FFC1), else red (#EE6363)
if failure in set(val):
failure_flag += 1
else:
failure_flag += 0
if failure_flag == 0:
return ["background-color: #C1FFC1"] * len(val)
else:
return ["background-color: #EE6363"] * len(val)
def specific_patient_weekly_check_colour_results(val):
failures = ["was_overridden", "new_field", "partial_tx"]
failure_flag = 0
for failure in failures:
# begin comparing everything else, if they match make green (#C1FFC1), else red (#EE6363)
if val[failure] is True:
failure_flag += 1
else:
failure_flag += 0
if failure_flag == 0:
return ["background-color: #C1FFC1"] * len(val)
else:
return ["background-color: #EE6363"] * len(val)
def constraint_check_colour_results(val):
if val["Type"] != "Average Score" and val["Type"] != "Total Score":
diff = val["Dose [Gy]"] - val["Actual Dose [Gy]"]
limit = val["Dose [Gy]"] / 10
if val["Actual Dose [Gy]"] > val["Dose [Gy]"]:
return ["background-color: #EE6363"] * len(val)
elif 0 < diff < limit:
return ["background-color: #FDFF8A"] * len(val)
else:
return ["background-color: #C1FFC1"] * len(val)
else:
if val["Score"] > 0:
return ["background-color: #C1FFC1"] * len(val)
else:
return ["background-color: #EE6363"] * len(val)
| StarcoderdataPython |
12820918 | import os
import pytest
import numpy as np
@pytest.fixture
def rng():
default_test_seed = 1 # the default seed to start pseudo-random tests
return np.random.default_rng(default_test_seed)
def run_all_tests(*args):
""" Invoke pytest, forwarding options to pytest.main """
pytest.main([os.path.dirname(__file__)] + list(args))
| StarcoderdataPython |
4969097 | '''Test fixtures for dagster-airflow.
These make very heavy use of fixture dependency and scope. If you're unfamiliar with pytest
fixtures, read: https://docs.pytest.org/en/latest/fixture.html.
'''
# pylint doesn't understand the way that pytest constructs fixture dependnecies
# pylint: disable=redefined-outer-name, unused-argument
import os
import shutil
import subprocess
import tempfile
import uuid
import docker
import pytest
from dagster import check
from dagster.utils import load_yaml_from_path, mkdir_p, pushd, script_relative_path
# Will be set in environment by pipeline.py -> tox.ini to:
# ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-1.amazonaws.com/dagster-airflow-demo:${BUILDKITE_BUILD_ID}
IMAGE = os.environ.get('DAGSTER_AIRFLOW_DOCKER_IMAGE')
@pytest.fixture(scope='module')
def airflow_home():
'''Check that AIRFLOW_HOME is set, and return it'''
airflow_home_dir = os.getenv('AIRFLOW_HOME')
assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?'
airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir))
return airflow_home_dir
@pytest.fixture(scope='module')
def temp_dir():
'''Context manager for temporary directories.
pytest implicitly wraps in try/except.
'''
dir_path = os.path.join('/tmp', str(uuid.uuid4()))
mkdir_p(dir_path)
yield dir_path
shutil.rmtree(dir_path)
@pytest.fixture(scope='module')
def clean_airflow_home(airflow_home):
'''Ensure that the existing contents of AIRFLOW_HOME do not interfere with test.'''
airflow_dags_path = os.path.join(airflow_home, 'dags')
# Ensure Airflow DAGs folder exists
if not os.path.exists(airflow_dags_path):
os.makedirs(airflow_dags_path)
tempdir_path = tempfile.mkdtemp()
# Move existing DAGs aside for test
dags_files = os.listdir(airflow_dags_path)
for dag_file in dags_files:
shutil.move(os.path.join(airflow_dags_path, dag_file), tempdir_path)
yield
# Clean up DAGs produced by test
shutil.rmtree(airflow_dags_path)
os.makedirs(airflow_dags_path)
# Move original DAGs back
file_paths = os.listdir(tempdir_path)
for file_path in file_paths:
shutil.move(
os.path.join(tempdir_path, file_path), os.path.join(airflow_dags_path, file_path)
)
shutil.rmtree(tempdir_path)
@pytest.fixture(scope='session')
def docker_client():
'''Instantiate a Docker Python client.'''
try:
client = docker.from_env()
client.info()
except docker.errors.APIError:
# pylint: disable=protected-access
check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url('')))
return client
@pytest.fixture(scope='session')
def build_docker_image(docker_client):
with pushd(script_relative_path('test_project')):
subprocess.check_output(['./build.sh'], shell=True)
return IMAGE
@pytest.fixture(scope='session')
def docker_image(docker_client, build_docker_image):
'''Check that the airflow image exists.'''
try:
docker_client.images.get(build_docker_image)
except docker.errors.ImageNotFound:
check.failed(
'Couldn\'t find docker image {image} required for test: please run the script at '
'{script_path}'.format(
image=build_docker_image, script_path=script_relative_path('test_project/build.sh')
)
)
return build_docker_image
@pytest.fixture(scope='module')
def dags_path(airflow_home):
'''Abspath to the magic Airflow DAGs folder.'''
path = os.path.join(airflow_home, 'dags', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def plugins_path(airflow_home):
'''Abspath to the magic Airflow plugins folder.'''
path = os.path.join(airflow_home, 'plugins', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def environment_dict(s3_bucket):
env_dict = load_yaml_from_path(script_relative_path('test_project/env.yaml'))
env_dict['storage'] = {'s3': {'s3_bucket': s3_bucket}}
yield env_dict
@pytest.fixture(scope='session')
def s3_bucket():
yield 'dagster-scratch-80542c2'
| StarcoderdataPython |
1674424 | <reponame>Matej-Chmel/KVContest-data-test-suite
from random import randint
from src.common import storage
from src.dataset_generator import data
class Implementation:
cyc_cmd = None
I = Implementation
def add_line() -> str:
key, val = None, None
while True:
cmd_tuple = next(I.cyc_cmd)
if cmd_tuple[0] == 'H':
h_len = next(data.cyc_H_len)
key = ' '.join([
next(data.new_key_generator())
if next(data.cyc_key_is_new)
else data.existing_key_else_new()
for i in range(h_len)
])
else:
try:
key = next(data.new_key_generator()) if cmd_tuple[1] else data.existing_key()
except KeyError:
continue
if cmd_tuple[0] == 'S':
val = data.rnd_value(next(data.cyc_val_len))
elif cmd_tuple[0] == 'A':
current_len = len(storage[key])
available_len = data.value_len.max - current_len
if available_len < data.append_len.min:
continue
val = data.rnd_value(randint(data.append_len.min, min([available_len, data.append_len.max])))
return f"{cmd_tuple[0]} {key}{' ' + val if val else ''}"
| StarcoderdataPython |
3247266 | from vars import *
print(" -> Please change input files manually if needed!")
csv_CH1 = CSV_FOLDER + "weird_mems_CH1.csv"
csv_CH2 = CSV_FOLDER + "weird_mems_CH2.csv"
fig, (ax1, ax2) = plt.subplots(2)
ax1.set_xlabel("Zeit [s]")
ax1.set_ylabel("Spannung [mV]")
ax2.set_xlabel("Zeit [s]")
ax2.set_ylabel("Spannung [mV]")
# CH1
plt.subplot(2,1,1)
x_CH1 = np.genfromtxt(csv_CH1, delimiter=",", skip_header=8, usecols=1) / 1000000
y_CH1 = np.genfromtxt(csv_CH1, delimiter=",", skip_header=8, usecols=2)
total_time = x_CH1[-1]
#ax1.set_title("Ausgangsspannung des MEMS-Mikrofons")
ax1.grid()
ax1.plot(x_CH1, y_CH1, 'k', linewidth=0.5)
ax1.set_xlim(xmin=0,xmax=total_time)
# CH2
plt.subplot(2,1,2)
y_CH2 = np.genfromtxt(csv_CH2, delimiter=",", skip_header=8, usecols=2)
#ax2.set_title("Ausgangsspannung des ATtinys")
ax2.grid()
ax2.plot(x_CH1, y_CH2, 'k', linewidth=0.5)
ax2.set_xlim(xmin=0,xmax=total_time)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
4948386 | <gh_stars>0
def perms(n):
if not n:
return
for i in xrange(2**n):
s = bin(i)[2:]
s = "0" * (n-len(s)) + s
yield s
print list(perms(15)) | StarcoderdataPython |
1893661 | <gh_stars>0
# say_hi
# Created by JKChang
# 10/04/2018, 09:11
# Tag:
# Description: In this mission you should write a function that introduce a person with a given parameters in attributes.
#
# Input: Two arguments. String and positive integer.
#
# Output: String.
def say_hi(name, age):
"""
Hi!
"""
return ('Hi. My name is %s and I\'m %d years old' % (name, age))
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert say_hi("Alex", 32) == "Hi. My name is Alex and I'm 32 years old", "First"
assert say_hi("Frank", 68) == "Hi. My name is Frank and I'm 68 years old", "Second"
print('Done. Time to Check.')
| StarcoderdataPython |
8052473 | notice = """
Feature and speed test
for a Pure Python graphics library
that saves to a bitmap
-----------------------------------
| Copyright 2022 by <NAME> |
| [<EMAIL>] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| The primary author and any |
| any subsequent code contributors |
| shall not be liable in any event |
| for incidental or consequential |
| damages in connection with, or |
| arising out from the use of this |
| code in current form or with any |
| modifications. |
|-----------------------------------|
| Contact primary author |
| if you plan to use this |
| in a commercial product at |
| <EMAIL> |
|-----------------------------------|
| Educational or hobby use is |
| highly encouraged... |
| have fun coding ! |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
addvect,
adjustcolordicttopal,
beziercurve,
bottomrightcoord,
bspline,
bsplinevert,
centercoord,
conevertandsurface,
convertselection2BMP,
copyrect,
cubevert,
cylindervertandsurface,
decahedvertandsurface,
drawvec,
fillbackgroundwithgrad,
filledgradrect,
filledrect,
font8x14,
font8x8,
getcolorname2RGBdict,
getdefaultlumrange,
getIFSparams,
getRGBfactors,
getshapesidedict,
gradcircle,
gradellipse,
gradthickcircle,
gradthickellipserot,
gradthickroundline,
gradvert,
hexahedravert,
icosahedvertandsurface,
IFS,
loadBMP,
mandelbrot,
mandelparamdict,
newBMP,
octahedravert,
pasterect,
piechart,
plot3Dsolid,
plot8bitpatternastext,
plotbmpastext,
plotfilledflower,
plotlines,
plotpoly,
plotreversestring,
plotstring,
plotstringsideway,
plotstringupsidedown,
plotstringvertical,
rectangle,
regpolygonvert,
RGB2int,
rotvec3D,
saveBMP,
spherevertandsurface,
spiralcontrolpointsvert,
surfplot3Dvertandsurface,
tetrahedravert,
trans,
XYaxis,
XYscatterplot
)
import subprocess as proc
from time import(
process_time_ns as _time_ns
)
from random import randint
from os import path
def elaspedtimeinseconds(inittime):
return (_time_ns() - inittime) / 1000000000
def hhmmsselaspedtime(inittime):
secs, ns = \
divmod((_time_ns() - inittime), 1000000000)
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return (((f'{str(hrs).zfill(2)}:' + str(mins).zfill(2)) + ':') +
str(secs).zfill(2) + '.') + str(ns)
def main():
rootdir = path.dirname(__file__)
plotbmpastext(loadBMP(rootdir + '/assets/pp.bmp')) #load logo
print(notice)
demostart = _time_ns()
starttime = _time_ns()
mx = 1024
my = 768
bmp = newBMP(mx, my, 24)
print('New bitmap in ' + hhmmsselaspedtime(starttime))
maxpt = bottomrightcoord(bmp)
cenpt = centercoord(bmp) #bitmap dependent coords
c = getcolorname2RGBdict()
cf = getRGBfactors()
lum = getdefaultlumrange() #color info
adjustcolordicttopal(bmp, c)
starttime = _time_ns()
fillbackgroundwithgrad(bmp,
lum['maxasc'], cf['blue'], 0)
print('Background gradient test done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
rectangle(bmp, 1, 1,
maxpt[0] - 1,
maxpt[1] - 1,
c['white'])
filledgradrect(bmp,
395, 5, 955, 41,
lum['upperdesc'],
cf['orange'], 1)
filledrect(bmp,
395, 44, 955, 55,
c['darkblue'])
print('Rectangle tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
hc = spiralcontrolpointsvert(
350, 180, 5, 1.1, 5)#we will use this later to make smooth spiral
gradthickroundline(bmp,
[800, 620], # end point 1
[800, 600], # end point 2
7, # thickness
lum['upperdesc'], # how bright the gradient
cf['darkred']) # color of the gradient
plotlines(bmp, hc, c['yellow'])
cp = [50, 150]#all arrows must point outward from cp or bug report please
drawvec(bmp, cp, [50, 100], 0, c['white'])#color dict more readable than color int
drawvec(bmp, cp, [100, 150], 0, c['green'])#but if you want to use color int
drawvec(bmp, cp, [50, 200], 0, c['red'])#the functions will work too
drawvec(bmp, cp, [5, 150], 0, c['blue'])#up to the color supported
drawvec(bmp, cp, [75, 175], 0, c['yellow'])# by the loaded bitmap file
drawvec(bmp, cp, [25, 125], 0, c['magenta'])
drawvec(bmp, cp, [25, 175], 0, c['orange'])
drawvec(bmp, cp, [75, 125], 0, c['gray'])
print('Line tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
fnt = font8x8
strtest = 'abcdefghijklmnopqrstuvwxyz\n0123456789\'":;.,?!~`@#$%^&()[]{}_*+-/=<>\nABCDEFGHIJKLMNOPQRSTUVWXYZ'
plotstring(bmp, 400, 10,
'My Python GL test',
4, 1, 0, c['lightgray'],
fnt)
plotstring(bmp, 400, 45,
'Copyright 2021 by <NAME> (<EMAIL>)',1,1,0,c['brightwhite'],fnt)
plotstring(bmp, 300, 64,
strtest, 1, 0, 0,
c['brightgreen'],
fnt)
plotstringupsidedown(bmp,
10, 737,
strtest, 1, 0, 0,
c['brightgreen'],
fnt)
plotreversestring(bmp,
300, 100,
strtest, 1, 0, 0,
c['brightgreen'],
fnt)
fnt = font8x14
plotstringvertical(bmp,
970, 30,
'Matrix text',
2, 0, 0, c['green'],
fnt)
plotstringsideway(bmp, 970, 730,
strtest, 1, 0, 0,
c['white'], fnt)
print('Text tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
#be careful with these variables
# or the object goes offscreen
d = 200 # distance of the observer
# from the screen
tvect = [0, 0, 100] #3D translation vector
sd = getshapesidedict()
pts = tetrahedravert(80)
shapes = [[[trans(cubevert(30),
pts[3]),
sd["cube"]],
cf["darkblue"], True,
c['black']],
[[trans(tetrahedravert(30),
pts[2]),
sd["tetrahedra"]],
cf["darkred"], True,
c['white']],
[[trans(octahedravert(20),
pts[1]),
sd["octahedra"]],
cf["yellow"], True,
c['darkgray']],
[[trans(hexahedravert(30),
pts[0]),
sd["hexahedra"]],
cf["darkgreen"], True,
c['darkgreen']]]
for s in shapes:
plot3Dsolid(bmp,
s[0], True,
s[1], s[2], s[3],
rotvec3D(10, 5, 5),
tvect, d,
addvect(cenpt, [-160, -10]))
plot3Dsolid(bmp,
decahedvertandsurface(25),
True, cf['brightred'], False,
0, rotvec3D(7, 77, 20),
tvect, d, addvect(cenpt, [280, -250]))
plot3Dsolid(bmp,
icosahedvertandsurface(25),
True, cf['brightwhite'], False,
0, rotvec3D(70, 7, 20),
tvect, d, addvect(cenpt, [+60, -130]))
plot3Dsolid(bmp,
spherevertandsurface([5, 0, 0], 60, 10),
True, cf['brightwhite'], False,
0, rotvec3D(190, 145, 70),
tvect, d, addvect(cenpt, [300, -50]))
plot3Dsolid(bmp,
cylindervertandsurface([1,0,0], 20, 10, 5),
True, cf['brightyellow'], True,
RGB2int(20, 20, 0), rotvec3D(60, 74, 72),
tvect, d, addvect(cenpt,[-200, -50]))
plot3Dsolid(bmp,
conevertandsurface([1, 0, 0], 20, 15, 5),
True, cf['brightorange'],
False, RGB2int(20, 20, 0),
rotvec3D(6, 67 ,2),
tvect, d, addvect(cenpt, [-300, -150]))
fnxy = lambda x, y: x & y
plot3Dsolid(bmp,
surfplot3Dvertandsurface (
-35, -35, 35, 35, 5, fnxy),
True, cf['brightcyan'],
True, 0, rotvec3D(20, 67, 30),
tvect, d, addvect(cenpt, [-420, -25]))
print('3D tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
mandelpar = mandelparamdict()
mandelbrot(bmp, 10, 600, 130, 735,
mandelpar['maxeqdim'],
cf['brightgreen'], 255)
p = getIFSparams()#same here add more to this dict
IFS(bmp, p['fern'],
170, 600, 270, 730, 12, 12, 30, 30,
c['lightgreen'], 10000)
IFS(bmp, p['tree'],
270, 600, 370, 730, 100, 100, 0, 0,
c['brown'], 10000)
IFS(bmp,p['cantortree'],
370, 600, 450, 730, 100, 100, 0, 0,
c['lightcyan'], 10000)
IFS(bmp, p['sierpinskitriangle'],
450, 600, 670, 730, 100, 100, 0, 0,
c['cyan'], 10000)
print('Fractal tests done in ',
hhmmsselaspedtime(starttime))
p = regpolygonvert(250, 80, 40, 6, 0)
starttime = _time_ns()
beziercurve(bmp, p, 3, c['brightorange'])
beziercurve(bmp, hc, 0, c['lightred'])
print('Bezier curve tests done in ',
hhmmsselaspedtime(starttime) )
starttime = _time_ns() #bspline follow control points better than bezier
bspline(bmp, p, 0, c['red'], True, True)
bspline(bmp, hc, 0, c['brightwhite'], True, True)
gradvert(bmp,
bsplinevert(
spiralcontrolpointsvert(
165, 135, 7, 1.2, 3),
False,
False),
5,
lum['upperdesc'],
cf['brightwhite'])
print('Bspline tests done in ',
hhmmsselaspedtime(starttime) )
starttime = _time_ns()
gradcircle(bmp, 900, 140, 30,
lum['maxdesc'], cf['brightyellow'])
gradthickcircle(bmp, 900, 550, 40, 8,
lum['upperdesc'], cf['lightred'])
print('Circle tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
gradellipse(bmp, 750, 550, 20, 40,
lum['upperdesc'], cf['brightorange'])
gradthickellipserot(bmp, 790, 700, 50, 30, 45, 5,
lum['upperdesc'], cf['yellow'])
print('Ellipse tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
pdata = []#[[20,c['red']],[30,c['brightyellow']],...]
for color in c:
pdata.append([1,c[color]])
piedata = piechart(bmp, 75, 540, 45, pdata)
print('Arc and pie chart tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
plotpoly(bmp, p, c['brightcyan'])
print('Polygon tests done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
plotfilledflower(bmp, 40, 40, 30, 5, 45,
lum['maxasc'], cf['brightyellow'])
print('Filled flower test done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns() #[x,y,rad=constant for sample can be variable,isfilled]
XYdata = []#[[45,45,5,c['brightcyan'],True],[100,60,5,c['brightgreen'],True],..]
for color in c:
XYdata.append(
[randint(20, 140),
randint(30,70),
5, c[color], True])
XYdata.append(
[randint(20, 140),
randint(30, 70),
5, c[color], False])
XYcoordinfo = XYaxis(bmp,
[172, 598],[40, 40],
[666, 398],[20, 30],
[10, 10],
c['brightwhite'],
c['gray'],
True,
c['darkgreen'])
XYscatterplot(bmp, XYdata, XYcoordinfo,
True, c['green'])
print('XY scatterplot test done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
buff = copyrect(bmp, 3, 3, 80, 80)
pasterect(bmp, buff, 858, 611)
print('Copy paste done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
nbmp = convertselection2BMP(buff)
nfile = rootdir +'/assets/flower.bmp'
saveBMP(nfile, nbmp)
print('Save selection to ',
nfile,' done in ',
hhmmsselaspedtime(starttime))
starttime = _time_ns()
file = 'test.bmp' # some random filename
saveBMP(file, bmp) # dump byte array to file
print('Saved ' + file + ' in ',
hhmmsselaspedtime(starttime))
print('Demo done in ',
hhmmsselaspedtime(demostart))
print('\nAll done close mspaint to finish')
ret = proc.call('mspaint ' + file) # replace with another editor if Unix
print('\nThe')
print(plot8bitpatternastext([0x00,0x00,0x38,0x6C,0x6C,0x38,0x76,0xDC,0xCC,0xCC,0x76,0x00,0x00,0x00],'&',' '))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3414459 | <reponame>gigasquid/gluon-nlp
import numpy as np
from numpy.testing import assert_allclose
import mxnet as mx
from gluonnlp.data import batchify
import pytest
def test_pad():
padded = batchify.Pad(pad_val=-1)([mx.nd.array([]), mx.nd.arange(1)]).asnumpy().flatten().tolist()
assert padded == [-1.0, 0.0]
@pytest.mark.parametrize('odtype', [np.uint8, np.int32, np.int64,
np.float16, np.float32, np.float64])
@pytest.mark.parametrize('idtype', [np.uint8, np.int32, np.int64,
np.float16, np.float32, np.float64])
@pytest.mark.parametrize('pass_dtype', [False, True])
def test_stack_batchify(odtype, idtype, pass_dtype):
dat = [np.random.randint(5, size=(10,)).astype(idtype) for _ in range(10)]
batchify_fn = batchify.Stack(dtype=odtype if pass_dtype else None)
batchify_out = batchify_fn(dat).asnumpy()
npy_out = np.array(dat)
assert_allclose(batchify_out, npy_out)
assert batchify_out.dtype == npy_out.dtype if not pass_dtype else odtype
def test_pad_wrap_batchify():
def _verify_padded_arr(padded_arr, original_arr, pad_axis, pad_val, pad_length, dtype):
ndim = original_arr.ndim
slices_data = [slice(None) for _ in range(ndim)]
slices_data[pad_axis] = slice(original_arr.shape[axis])
assert_allclose(padded_arr[tuple(slices_data)], original_arr)
if original_arr.shape[pad_axis] < pad_length:
slices_pad_val = [slice(None) for _ in range(ndim)]
slices_pad_val[axis] = slice(original_arr.shape[pad_axis], None)
pad_val_in_arr = padded_arr[tuple(slices_pad_val)]
assert_allclose(pad_val_in_arr, (np.ones_like(pad_val_in_arr) * pad_val).astype(dtype))
batch_size = 6
for ndim in range(1, 3):
for axis in range(-ndim, ndim):
for length_min, length_max in [(3, 4), (3, 7)]:
for pad_val in [-1, 0]:
for dtype in [np.uint8, np.int32, np.int64, np.float16, np.float32, np.float64]:
# Each instance contains a single array
for _dtype in [None, dtype]:
shapes = [[2 for _ in range(ndim)] for _ in range(batch_size)]
for i in range(len(shapes)):
shapes[i][axis] = np.random.randint(length_min, length_max)
random_data_npy = [np.random.normal(0, 1, shape).astype(dtype)
for shape in shapes]
batchify_fn = batchify.Pad(axis=axis, pad_val=pad_val, ret_length=True, dtype=_dtype)
batch_data, valid_length = batchify_fn(random_data_npy)
batch_data_use_mx, valid_length_use_mx = batchify_fn(
[mx.nd.array(ele, dtype=dtype) for ele in random_data_npy])
assert_allclose(batch_data_use_mx.asnumpy(), batch_data.asnumpy())
assert_allclose(valid_length_use_mx.asnumpy(), valid_length.asnumpy())
assert batch_data.dtype == batch_data_use_mx.dtype == dtype
assert valid_length.dtype == valid_length_use_mx.dtype == np.int32
valid_length = valid_length.asnumpy()
batch_data = batch_data.asnumpy()
for i in range(batch_size):
assert (valid_length[i] == shapes[i][axis])
pad_length = max(shape[axis] for shape in shapes)
_verify_padded_arr(batch_data[i], random_data_npy[i], axis, pad_val, pad_length, dtype)
# Each instance contains 3 arrays, we pad part of them according to index
TOTAL_ELE_NUM = 3
for pad_index in [[0], [1], [2], [0, 1], [1, 2], [0, 1, 2]]:
shapes = [[[2 for _ in range(ndim)] for _ in range(batch_size)]
for _ in range(TOTAL_ELE_NUM)]
for j in pad_index:
for i in range(batch_size):
shapes[j][i][axis] = np.random.randint(length_min, length_max)
random_data_npy = [tuple(np.random.normal(0, 1, shapes[j][i]).astype(dtype)
for j in range(TOTAL_ELE_NUM)) for i in range(batch_size)]
batchify_fn = []
for j in range(TOTAL_ELE_NUM):
if j in pad_index:
batchify_fn.append(batchify.Pad(axis=axis, pad_val=pad_val, ret_length=True,
dtype=_dtype))
else:
batchify_fn.append(batchify.Stack(dtype=_dtype))
batchify_fn = batchify.Tuple(batchify_fn)
ret_use_npy = batchify_fn(random_data_npy)
ret_use_mx = batchify_fn(
[tuple(mx.nd.array(ele[i], dtype=dtype) for i in range(TOTAL_ELE_NUM)) for ele in
random_data_npy])
for i in range(TOTAL_ELE_NUM):
if i in pad_index:
assert ret_use_npy[i][0].dtype == ret_use_mx[i][0].dtype == dtype
assert ret_use_npy[i][1].dtype == ret_use_mx[i][1].dtype == np.int32
assert_allclose(ret_use_npy[i][0].asnumpy(),
ret_use_mx[i][0].asnumpy())
assert_allclose(ret_use_npy[i][1].asnumpy(),
ret_use_mx[i][1].asnumpy())
assert (ret_use_npy[i][1].shape == (batch_size,))
else:
assert ret_use_npy[i].dtype == ret_use_mx[i].dtype == dtype
assert_allclose(ret_use_npy[i].asnumpy(), ret_use_mx[i].asnumpy())
for i in range(batch_size):
for j in range(TOTAL_ELE_NUM):
if j in pad_index:
batch_data, valid_length = ret_use_npy[j][0].asnumpy(), \
ret_use_npy[j][1].asnumpy()
assert (valid_length[i] == shapes[j][i][axis])
else:
batch_data = ret_use_npy[j].asnumpy()
pad_length = max(ele[j].shape[axis] for ele in random_data_npy)
_verify_padded_arr(batch_data[i], random_data_npy[i][j], axis, pad_val,
pad_length,
dtype)
for _dtype in [np.float16, np.float32]:
shapes = [[2 for _ in range(ndim)] for _ in range(batch_size)]
for i in range(len(shapes)):
shapes[i][axis] = np.random.randint(length_min, length_max)
random_data_npy = [np.random.normal(0, 1, shape).astype(dtype)
for shape in shapes]
batchify_fn = batchify.Pad(axis=axis, pad_val=pad_val, ret_length=True, dtype=_dtype)
batch_data, valid_length = batchify_fn(random_data_npy)
batch_data_use_mx, valid_length_use_mx = batchify_fn(
[mx.nd.array(ele, dtype=dtype) for ele in random_data_npy])
assert_allclose(valid_length_use_mx.asnumpy(), valid_length.asnumpy())
assert batch_data.dtype == batch_data_use_mx.dtype == _dtype
assert valid_length.dtype == valid_length_use_mx.dtype == np.int32
| StarcoderdataPython |
3375652 | from plant import CraneMoveTime
from simulatorutils import *
from schedule import Schedule
class Simulator(object):
def __init__(self, plant):
self.plant = plant
self.graph = []
self.createGraph()
def createGraph(self):
for m in self.plant.machines:
mList = []
for q in range(m.quantity):
mList.append(MachineNode(m))
self.graph.append(mList)
if m != self.plant.machines[-1]:
self.graph.append(TraverseNode())
def machineIndexInGraph(self, machineName):
for i, m in enumerate(self.graph):
if type(m) == list and m[0].machine.name == machineName:
return i
return None
def minTimeFinish(self, machineNodeList):
max = -1
for m in machineNodeList:
if m.currentOrder != None:
if m.currentOrder[1] > max:
max = m.currentOrder[1]
return max
def simulate(self, inSchedule, delay):
assert type(inSchedule) == Schedule
assert len(inSchedule.schedule) == 0
assert len(inSchedule.finishTimes) == 0
schedule = inSchedule.startTimes[:]
schedule.sort(lambda a, b: cmp(a[2], b[2]))
t = schedule[0][2]
last = schedule[-1][2] + schedule[-1][0].recipe.calcMinProcTime()
while t <= last + delay:
for i, s in enumerate(schedule):
if s == None:
continue
if s[2] <= t:
entered = False
currentMachineIndex = 0
currentMachine = s[0].currentMachine
if currentMachine != "":
currentMachineIndex = self.machineIndexInGraph(currentMachine)
machine = self.graph[currentMachineIndex][0].machine
if [z in machine.setOfBreaks() for z in
range(t, t + s[0].recipe[machine.name])].count(True) == 0:
for n in self.graph[currentMachineIndex]:
if n.currentOrder == None:
n.currentOrder = [s[0], s[0].recipe[n.machine.name]]
schedule[i] = None
entered = True
# print n.currentOrder[0], "entered", n.machine, "at time", t
inSchedule.schedule.append((n.currentOrder[0], str(n.machine.name), t))
break
if entered == False:
# print n.currentOrder[0], "could not enter", n.machine, "at time", t
s[2] += 1
delay += 1
for i, n in enumerate(self.graph):
if type(n) == list:
for m in n:
if m.currentOrder != None:
if m.currentOrder[1] != 0:
m.currentOrder[1] -= 1
else:
if n == self.graph[-1]:
# print "Order", m.currentOrder[0], "finished at time", t
inSchedule.finishTimes.append((m.currentOrder[0], t))
m.currentOrder = None
else:
self.graph[i + 1].orders.append(
[m.currentOrder[0], CraneMoveTime])
# print "Order", m.currentOrder[0], \
# "left", m.machine, "at time", t
m.currentOrder = None
else:
for j, o in enumerate(n.orders):
if o == None:
continue
if o[1] > 0:
o[1] -= 1
else:
machine = self.graph[i + 1][0].machine
if [z in machine.setOfBreaks() for z in
range(t, t + o[0].recipe[machine.name])].count(True) == 0:
for m in self.graph[i + 1]:
if m.currentOrder == None:
if m.machine.precedence == False:
m.currentOrder = [o[0], o[0].recipe[m.machine.name]]
# print "Order", o[0], "entered", m.machine, "at time", t
else:
time = max(self.minTimeFinish(self.graph[i + 1]),
o[0].recipe[m.machine.name])
m.currentOrder = [o[0], time]
if time != o[0].recipe[m.machine.name]:
# print "Order", o[0], "entered", m.machine, \
# at time", t, "with overtime", \
time - o[0].recipe[m.machine.name]
else:
# print "Order", o[0], "entered", m.machine, "at time", t
pass
inSchedule.schedule.append((m.currentOrder[0], str(m.machine.name), t))
if o[1] < 0:
# print "Order", o[0], "delayed", abs(o[1]), \
# "before", m.machine
delay += 1
n.orders[j] = None
break
else:
delay += 1
t += 1
| StarcoderdataPython |
1916385 | <gh_stars>0
default_app_config = 'core.actuator.apps.ActuatorConfig' | StarcoderdataPython |
103985 | <gh_stars>0
import sys
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
sys.path.append("packages")
#from Harvest.harvest import Harvest, HarvestError
import os
from harvest import Harvest, HarvestError
from datetime import datetime, timedelta
import time
#import simplejson as json
import json
import mysql.connector
from mysql.connector import errorcode
#import pprint
# Harvest Setup
harvest_creds = {'uri': os.getenv("HARVEST_URI"),
'email': os.getenv("HARVEST_EMAIL"),
'password': os.getenv("HARVEST_PASSWORD")}
URI = harvest_creds['uri']
EMAIL = harvest_creds['email']
PASS = harvest_creds['password']
h = Harvest(URI,EMAIL,PASS)
# Var Setup
user_hours={}
user_names={}
project_hours={}
timesheet_punches={}
email_html=""
# Yesterday - adjust to your liking
end = datetime.today().replace( hour=0, minute=0, second=0 )
start = end + timedelta(-1)
#mysql_creds = json.loads(open('mysql.json').read())
#mysql_creds = json.loads(os.getenv("MYSQL"))
mysql_creds = {'user': os.getenv("MYSQL_USER"),
'password': os.getenv("MYSQL_PASSWORD"),
'database': os.getenv("MYSQL_DATABASE"),
'host': os.getenv("MYSQL_HOST"),
'port': os.getenv("MYSQL_PORT", 3306)}
cnx = mysql.connector.connect(user=mysql_creds['user'],
password=mysql_creds['password'],
host=mysql_creds['host'],
database=mysql_creds['database'],
port=mysql_creds['port'])
cursor = cnx.cursor()
try:
for user in h.users():
user_hours[user.email] = 0
user_names[user.email] = user.first_name + " " + user.last_name
for entry in user.entries( start, end ):
if(not entry.adjustment_record):
user_hours[user.email] += entry.hours
project = h.project(entry.project_id)
client = h.client(project.client_id)
task = h.task(entry.task_id)
if(project_hours.has_key(project.name)):
project_hours[project.name] += entry.hours
else:
project_hours[project.name] = entry.hours
add_entry = ("INSERT INTO timesheet "
"(id, project_id, task_id, user_id, hours, "
"notes, client, created_at, updated_at, project, task) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )")
entry_data = (entry.id, project.id, entry.task_id, entry.user_id, entry.hours,
entry.notes, client.name, entry.created_at, entry.updated_at, project.name, task.name)
cursor.execute(add_entry, entry_data)
#print cursor.lastrowid
cnx.commit()
except HarvestError:
print "error"
cursor.close()
cnx.close()
| StarcoderdataPython |
6642202 | # -*- coding: utf-8 -*-
"""
@Project :
@FileName:
@Author :penghr
@Time :202x/xx/xx xx:xx
@Desc :
"""
import math
import cv2
import numpy as np
import scipy.spatial
import torch
import torch.nn as nn
import torch.nn.functional as F
def LMDS_counting(fmap, img_name, f_loc):
input_max = torch.max(fmap).item()
''' find local maxima'''
keep = nn.functional.max_pool2d(fmap, (3, 3), stride=1, padding=1)
keep = (keep == fmap).float()
fmap = keep * fmap
'''set the pixel valur of local maxima as 1 for counting'''
fmap[fmap < 100.0 / 255.0 * input_max] = 0
fmap[fmap > 0] = 1
''' negative sample'''
if input_max < 0.1:
fmap = fmap * 0
count = int(torch.sum(fmap).item())
kpoint = fmap.data.squeeze(0).squeeze(0).cpu().numpy()
f_loc.write('{} {} '.format(img_name, count))
return count, kpoint, f_loc
def generate_point_map(kpoint, f_loc, rate=1):
'''obtain the location coordinates'''
pred_coord = np.nonzero(kpoint)
point_map = np.zeros((int(kpoint.shape[0] * rate), int(kpoint.shape[1] * rate), 3), dtype="uint8") + 255 # 22
# count = len(pred_coor[0])
coord_list = []
for i in range(0, len(pred_coord[0])):
h = int(pred_coord[0][i] * rate)
w = int(pred_coord[1][i] * rate)
coord_list.append([w, h])
cv2.circle(point_map, (w, h), 2, (0, 0, 0), -1)
for data in coord_list:
f_loc.write('{} {} '.format(math.floor(data[0]), math.floor(data[1])))
f_loc.write('\n')
return point_map
def generate_bounding_boxes(kpoint, fname, resize):
'''change the data path'''
Img_data = cv2.imread(fname)
# ori_Img_data = Img_data.copy()
Img_data = cv2.resize(Img_data, resize)
'''generate sigma'''
pts = np.array(list(zip(np.nonzero(kpoint)[1], np.nonzero(kpoint)[0])))
leafsize = 2048
# build kdtree
tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
distances, locations = tree.query(pts, k=4)
for index, pt in enumerate(pts):
pt2d = np.zeros(kpoint.shape, dtype=np.float32)
pt2d[pt[1], pt[0]] = 1.
if np.sum(kpoint) > 1:
sigma = (distances[index][1] + distances[index][2] + distances[index][3]) * 0.1
else:
sigma = np.average(np.array(kpoint.shape)) / 2. / 2. # case: 1 point
sigma = min(sigma, min(Img_data.shape[0], Img_data.shape[1]) * 0.05)
if sigma < 6:
t = 2
else:
t = 2
Img_data = cv2.rectangle(Img_data, (
int((pt[0] * Img_data.shape[1] / resize[0] - sigma)), int((pt[1] * Img_data.shape[0] / resize[1] - sigma))),
(int((pt[0] * Img_data.shape[1] / resize[0] + sigma)),
int((pt[1] * Img_data.shape[0] / resize[1] + sigma))), (0, 255, 0), t)
return Img_data
| StarcoderdataPython |
3525034 | import asyncio
from src.init import init
loop = asyncio.get_event_loop()
loop.run_until_complete(init())
print('Finished...') | StarcoderdataPython |
4990253 | <reponame>stannida/netflix-wrapped
import imdb
def get_genres(name):
genres = {}
ia = imdb.IMDb()
movies = ia.search_movie(name)
_id = movies[0].movieID
movie = ia.get_movie(_id)
display(movie)
if movie['genres']:
return movie['genres']
| StarcoderdataPython |
6649774 | <gh_stars>0
from models.cells.esn_cell import ESNCell as ESNCell_numpy
from models.cells.esn_cell_torch import ESNCell as ESNCell_torch
from models.cells.gru_cell import GRUCell
from models.cells.lstm_cell import LSTMCell
from models.cells.rnn_cell import RNNCell
def get_cell(type, reservoir_size, radius, sparsity, sigma_input, W_scaling=1, flip_sign=False, resample=True):
if type == 'GRU':
return GRUCell(reservoir_size)
elif type == 'LSTM':
return LSTMCell(reservoir_size)
elif type == 'RNN':
return RNNCell(reservoir_size)
elif type == 'ESN':
return ESNCell_numpy(reservoir_size, radius, sparsity, sigma_input, W_scaling, flip_sign, resample)
elif type == 'ESN_torch':
return ESNCell_torch(reservoir_size, radius, sparsity, sigma_input)
else:
raise RuntimeError('Unknown cell type.')
| StarcoderdataPython |
1778246 | <gh_stars>10-100
#
# Copyright (c) 2020 Saarland University.
#
# This file is part of AM Parser
# (see https://github.com/coli-saar/am-parser/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from allennlp.nn.util import get_text_field_mask, get_range_vector
from allennlp.nn.util import get_device_of, get_lengths_from_binary_sequence_mask
from typing import Dict, Optional, Tuple, Any, List
import torch
from graph_dependency_parser.components.losses.base import EdgeLoss, EdgeExistenceLoss, EdgeLabelLoss
SMALL_FLOAT = 1e-13
def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
Taken from AllenNLP version 0.8.4 and modified to use different definitions of "small number".
On some machines the default of 1e-45 is too small: see https://github.com/coli-saar/am-parser/issues/94
``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you ``nans``.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely
# just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it
# becomes 0 - this is just the smallest value we can actually use.
vector = vector + (mask + SMALL_FLOAT).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
@EdgeExistenceLoss.register("dm_edge_loss")
class DMLoss (EdgeExistenceLoss):
"""
Dozat & Manning - Loss.
"""
def loss(self, edge_scores: torch.Tensor,
head_indices: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
"""
Computes the edge loss for a sequence given gold head indices and tags.
Parameters
----------
edge_scores : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
head_indices : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The indices of the heads for every word.
head_tags : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
Returns
-------
arc_nll : ``torch.Tensor``, required.
The negative log likelihood from the arc loss.
"""
float_mask = mask.float()
batch_size, sequence_length, _ = edge_scores.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(edge_scores)).unsqueeze(1)
# shape (batch_size, sequence_length, sequence_length)
normalised_arc_logits = masked_log_softmax(edge_scores,
mask) * float_mask.unsqueeze(2) * float_mask.unsqueeze(1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(edge_scores))
child_index = timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
# shape (batch_size, sequence_length)
arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
arc_loss = arc_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
arc_nll = -arc_loss.sum()
if self.normalize_wrt_seq_len:
arc_nll /= valid_positions.float()
return arc_nll
@EdgeLabelLoss.register("dm_label_loss")
class DMLabelLoss(EdgeLabelLoss):
def loss(self, edge_label_logits:torch.Tensor, mask:torch.Tensor, head_tags:torch.Tensor) -> torch.Tensor:
"""
Computes the arc and tag loss for a sequence given gold head indices and tags.
Parameters
----------
edge_label_logits : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, num_head_tags),
that contains raw predictions for incoming edge labels
head_tags : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
Returns
-------
tag_nll : ``torch.Tensor``, required.
The negative log likelihood from the edge label loss.
"""
float_mask = mask.float()
batch_size, sequence_length, _ = edge_label_logits.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(edge_label_logits)).unsqueeze(1)
# shape (batch_size, sequence_length, num_head_tags)
normalised_edge_label_logits = masked_log_softmax(edge_label_logits,
mask.unsqueeze(-1)) * float_mask.unsqueeze(-1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(edge_label_logits))
child_index = timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
# shape (batch_size, sequence_length)
tag_loss = normalised_edge_label_logits[range_vector, child_index, head_tags]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
tag_loss = tag_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
if self.normalize_wrt_seq_len:
return -tag_loss.sum() / valid_positions.float()
else:
return -tag_loss.sum()
| StarcoderdataPython |
6478301 | <gh_stars>1-10
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten
import pickle
from keras.optimizers import Adam
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import time
import random
start_time = time.clock()
np.random.seed(7)
random.seed(7)
filename = '../Combined Trajectory_Label_Geolife/Revised_KerasData_Smoothing.pickle'
with open(filename, mode='rb') as f:
TotalInput, FinalLabel = pickle.load(f, encoding='latin1') # Also can use the encoding 'iso-8859-1'
NoClass = len(list(set(np.ndarray.flatten(FinalLabel))))
Threshold = len(TotalInput[0, 0, :, 0])
# Making training and test data: 80% Training, 20% Test
Train_X, Test_X, Train_Y, Test_Y_ori = train_test_split(TotalInput, FinalLabel, test_size=0.20, random_state=7)
Train_Y = keras.utils.to_categorical(Train_Y, num_classes=NoClass)
Test_Y = keras.utils.to_categorical(Test_Y_ori, num_classes=NoClass)
# Model and Compile
model = Sequential()
model.add(Conv2D(32, (1, 3), strides=(1, 1), padding='same', activation='relu', input_shape=(1, Threshold, 4)))
model.add(Conv2D(32, (1, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(1, 2)))
model.add(Conv2D(64, (1, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(Conv2D(64, (1, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(1, 2)))
model.add(Conv2D(128, (1, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(Conv2D(128, (1, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(1, 2)))
model.add(Dropout(.5))
model.add(Flatten())
A = model.output_shape
model.add(Dense(int(A[1] * 1/4.), activation='relu'))
model.add(Dropout(.5))
model.add(Dense(NoClass, activation='softmax'))
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Ensemble configuration
score = []
Pred = []
for i in range(7):
np.random.seed((i + 1) * 2)
Number = np.random.choice(len(Train_X), size=len(Train_X), replace=True, p=None)
Ens_Train_X = np.zeros((len(Train_X), 1, Threshold, 4), dtype=float)
Ens_Train_Y = np.zeros((len(Train_Y), 1), dtype=float)
counter = 0
for j in Number:
Ens_Train_X[counter, :, :, :] = Train_X[j, :, :, :]
Ens_Train_Y[counter, 0] = Train_Y[j, 0]
counter += 1
Ens_Train_Y = keras.utils.to_categorical(Ens_Train_Y, num_classes=NoClass)
model.fit(Ens_Train_X, Ens_Train_Y, epochs=100, batch_size=64, shuffle=False)
score.append(model.evaluate(Test_X, Test_Y, batch_size=64))
Pred.append(model.predict(Test_X, batch_size=64))
# Calculating the accuracy, precision, recall.
CombinedPred = np.mean(Pred, axis=0)
Pred_Label = np.argmax(CombinedPred, axis=1)
counter = 0
for i in range(len(Pred_Label)):
if Pred_Label[i] == Test_Y_ori[i]:
counter += 1
EnsembleAccuracy = counter * 1./len(Pred_Label)
PredictedPositive = []
for i in range(NoClass):
AA = np.where(Pred_Label == i)[0]
PredictedPositive.append(AA)
ActualPositive = []
for i in range(NoClass):
AA = np.where(Test_Y_ori == i)[0]
ActualPositive.append(AA)
TruePositive = []
FalsePositive = []
for i in range(NoClass):
AA = []
BB = []
for j in PredictedPositive[i]:
if Pred_Label[j] == Test_Y_ori[j]:
AA.append(j)
else:
BB.append(j)
TruePositive.append(AA)
FalsePositive.append(BB)
Precision = []
Recall = []
for i in range(NoClass):
Precision.append(len(TruePositive[i]) * 1./len(PredictedPositive[i]))
Recall.append(len(TruePositive[i]) * 1./len(ActualPositive[i]))
ConfusionM = confusion_matrix(list(Test_Y_ori), Pred_Label, labels=range(NoClass))
print(score)
print('Ensemble Accuracy: ', EnsembleAccuracy)
print('Confusion Matrix: ', ConfusionM)
print("Recall", Recall)
print('Precision', Precision)
print(time.clock() - start_time, "seconds")
| StarcoderdataPython |
4920890 | <reponame>smujuzi/Consumer-Protection-Portal
# Generated by Django 3.0.6 on 2020-06-03 10:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20200603_1320'),
]
operations = [
migrations.AlterField(
model_name='account',
name='role',
field=models.CharField(choices=[('1', 'complainant'), ('2', 'admin'), ('3', 'I.T. Officer'), ('4', 'Service Desk'), ('5', 'Director Legal')], default='complainant', max_length=200),
),
]
| StarcoderdataPython |
1622165 | <reponame>deveil/mrq<gh_stars>100-1000
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from past.utils import old_div
import time
from mrq.queue import Queue
import pytest
import os
import random
@pytest.mark.parametrize(["p_max_latency", "p_min_observed_latency", "p_max_observed_latency"], [
[1, -0.3, 1],
[0.01, -1, 0.02]
])
def test_job_max_latency(worker, p_max_latency, p_min_observed_latency, p_max_observed_latency):
worker.start(flags=" --ensure_indexes --greenlets=1 --max_latency=%s" % (p_max_latency), trace=False)
def get_latency():
t = time.time()
return worker.send_task("tests.tasks.general.GetTime", {}) - t
# Warm up the worker
get_latency()
# This is the latency induced by our test system & general task work
# We're on the same machine so even in different processes time.time() should be pretty reliable
base_latency = get_latency()
print("Base latency: %ss" % base_latency)
min_latency = min([get_latency() for _ in range(0, 20)])
print("FYI, min latency = %ss" % min_latency)
# Sleep a while with an idle worker to make the poll interval go up
latencies = []
for i in range(6):
time.sleep(5)
latency = get_latency() - min_latency
print("Observed latency (corrected): %ss" % latency)
latencies.append(latency)
avg_latency = old_div(float(sum(latencies)), len(latencies))
print("Average observed latency: %ss" % avg_latency)
assert p_min_observed_latency <= avg_latency < p_max_observed_latency
@pytest.mark.parametrize(["p_latency", "p_min", "p_max"], [
[0, 0, 3],
["0.05", 4, 30],
["0.05-0.1", 4, 40]
])
def test_network_latency(worker, p_latency, p_min, p_max):
worker.start(flags=" --max_latency=1 --mongodb_logs 0 --report_interval 10000 --add_network_latency=%s" % (p_latency), trace=False)
start_time = time.time()
for _ in range(5):
worker.send_task("tests.tasks.general.MongoInsert",
{"x": 1},
block=False)
worker.wait_for_idle()
total_time = time.time() - start_time
assert p_min < total_time < p_max
def benchmark_task(worker, taskpath, taskparams, tasks=1000, greenlets=50, processes=0, max_seconds=10, profile=False, quiet=True, raw=False, queues="default", config=None):
worker.start(flags="--ensure_indexes --processes %s --greenlets %s%s%s%s" % (
processes,
greenlets,
" --profile" if profile else "",
" --quiet" if quiet else "",
" --config %s" % config if config else ""
), queues=queues, trace=False)
# Warm up the workers with one simple task.
print("Warming up workers...")
worker.send_tasks("tests.tasks.general.Add", [{"a": i, "b": 0, "sleep": 0} for i in range(greenlets * min(1, processes))])
print("Starting benchmark...")
start_time = time.time()
# result = worker.send_tasks("tests.tasks.general.Add",
# [{"a": i, "b": 0, "sleep": 0} for i in range(n_tasks)])
if raw:
result = worker.send_raw_tasks(taskpath, taskparams)
else:
result = worker.send_tasks(taskpath, taskparams)
total_time = time.time() - start_time
print("%s tasks done with %s greenlets and %s processes in %0.3f seconds : %0.2f jobs/second!" % (tasks, greenlets, processes, total_time, old_div(tasks, total_time)))
assert total_time < max_seconds
worker.stop()
# print subprocess.check_output("ps -ef", shell=True)
return result, total_time
@pytest.mark.parametrize(["p_processes"], [[0], [5]])
def test_performance_simpleadds_regular(worker, p_processes):
n_tasks = 10000
n_greenlets = 30
n_processes = p_processes
max_seconds = 36
result, total_time = benchmark_task(worker,
"tests.tasks.general.Add",
[{"a": i, "b": 0, "sleep": 0}
for i in range(n_tasks)],
tasks=n_tasks,
profile=False,
greenlets=n_greenlets,
processes=n_processes,
max_seconds=max_seconds)
# ... and return correct results
assert result == list(range(n_tasks))
@pytest.mark.parametrize(["p_queue", "p_greenlets"], [x1 + x2 for x1 in [
["testperformance_raw"],
["testperformance_set"],
["testperformance_timed_set"]
] for x2 in [
[100]
]])
def test_performance_simpleadds_raw(worker, p_queue, p_greenlets):
n_tasks = 10000
n_greenlets = p_greenlets
n_processes = 0
max_seconds = 35
result, total_time = benchmark_task(worker,
p_queue,
[str(i) for i in range(n_tasks)],
tasks=n_tasks,
greenlets=n_greenlets,
processes=n_processes,
max_seconds=max_seconds,
raw=True,
queues=p_queue,
config="tests/fixtures/config-raw1.py")
# TODO add network latency
def test_performance_httpstatic_fast(worker, httpstatic):
httpstatic.start()
n_tasks = 1000
n_greenlets = 50
max_seconds = 10
result, total_time = benchmark_task(worker,
"tests.tasks.general.Fetch",
[{"url": "http://127.0.0.1:8081/"}
for _ in range(n_tasks)],
tasks=n_tasks,
greenlets=n_greenlets,
max_seconds=max_seconds,
profile=False)
def test_performance_writeconcern(worker_mongodb_with_journal):
return pytest.skip("Journaled MongoDB not stable enough")
if os.environ.get("STACK_STARTED"):
return pytest.skip()
worker = worker_mongodb_with_journal
n_tasks = 500
n_greenlets = 1
n_processes = 0
max_seconds = 35
result, total_time_acknowledged = benchmark_task(
worker,
"tests.tasks.general.LargeResult",
[{
"size": 100000,
"status_success_update_w": 1,
"status_success_update_j": True,
"sleep": 0
} for i in range(n_tasks)],
tasks=n_tasks,
greenlets=n_greenlets,
processes=n_processes,
max_seconds=max_seconds
)
print(total_time_acknowledged)
result, total_time_unacknowledged = benchmark_task(
worker,
"tests.tasks.general.LargeResult",
[{
"size": 100000,
"status_success_update_w": 0,
"status_success_update_j": None,
"sleep": 0
} for i in range(n_tasks)],
tasks=n_tasks,
greenlets=n_greenlets,
processes=n_processes,
max_seconds=max_seconds
)
print("total_time_acknowledged: ", total_time_acknowledged)
print("total_time_unacknowledged: ", total_time_unacknowledged)
# Make sure it's faster.
assert total_time_unacknowledged < total_time_acknowledged * 0.9
# def test_performance_httpstatic_external(worker):
# n_tasks = 1000
# n_greenlets = 100
# max_seconds = 25
# url = "http://bing.com/favicon.ico"
# url = "http://ox-mockserver.herokuapp.com/ipheaders"
# url = "http://ox-mockserver.herokuapp.com/timeout?timeout=1000"
# result, total_time = benchmark_task(worker,
# "tests.tasks.general.Fetch",
# [{"url": url} for _ in range(n_tasks)],
# tasks=n_tasks,
# greenlets=n_greenlets,
# max_seconds=max_seconds, quiet=False)
def test_performance_queue_cancel_requeue(worker):
worker.start(trace=False)
n_tasks = 10000
start_time = time.time()
worker.send_tasks(
"tests.tasks.general.Add",
[{"a": i, "b": 0, "sleep": 0} for i in range(n_tasks)],
queue="noexec",
block=False
)
queue_time = time.time() - start_time
print("Queued %s tasks in %s seconds (%s/s)" % (n_tasks, queue_time, old_div(float(n_tasks), queue_time)))
assert queue_time < 2
assert Queue("noexec").size() == n_tasks
assert worker.mongodb_jobs.mrq_jobs.count() == n_tasks
assert worker.mongodb_jobs.mrq_jobs.find(
{"status": "queued"}).count() == n_tasks
# Then cancel them all
start_time = time.time()
res = worker.send_task(
"mrq.basetasks.utils.JobAction",
{"queue": "noexec", "action": "cancel"},
block=True
)
assert res["cancelled"] == n_tasks
queue_time = time.time() - start_time
print("Cancelled %s tasks in %s seconds (%s/s)" % (n_tasks, queue_time, old_div(float(n_tasks), queue_time)))
assert queue_time < 5
assert worker.mongodb_jobs.mrq_jobs.find(
{"status": "cancel"}).count() == n_tasks
# Special case because we cancelled by queue: they should have been
# removed from redis.
assert Queue("noexec").size() == 0
# Then requeue them all
start_time = time.time()
res = worker.send_task(
"mrq.basetasks.utils.JobAction",
{"queue": "noexec", "action": "requeue"},
block=True
)
queue_time = time.time() - start_time
print("Requeued %s tasks in %s seconds (%s/s)" % (n_tasks, queue_time, old_div(float(n_tasks), queue_time)))
assert queue_time < 2
assert worker.mongodb_jobs.mrq_jobs.find(
{"status": "queued"}).count() == n_tasks
# They should be back in the queue
assert Queue("noexec").size() == n_tasks
assert res["requeued"] == n_tasks
@pytest.mark.parametrize(["p_queue_type", "p_greenlets", "p_min_efficiency"], [
["regular", 30, 0.8],
["raw", 30, 0.8],
["raw_nostorage", 30, 0.9]
])
def test_worker_efficiency(worker, p_queue_type, p_greenlets, p_min_efficiency):
if p_queue_type == "regular":
worker.start(trace=False, flags="--ensure_indexes --greenlets %s" % p_greenlets, queues="default")
elif p_queue_type == "raw":
worker.start(trace=False, flags="--ensure_indexes --greenlets %s --config tests/fixtures/config-raw1.py" % p_greenlets,
queues="testperformance_efficiency_raw")
elif p_queue_type == "raw_nostorage":
worker.start(trace=False, flags="--greenlets %s --config tests/fixtures/config-raw1.py" % p_greenlets,
queues="testperformance_efficiency_nostorage_raw")
sleep_times = [float(ms) * 4 / 1000 for ms in range(0, 500)]
random.shuffle(sleep_times)
total_sleep_time = sum(sleep_times)
count_jobs = len(sleep_times)
start_time = time.time()
if p_queue_type == "regular":
worker.send_tasks(
"tests.tasks.general.Add",
[{"a": 1, "b": 2, "sleep": s} for s in sleep_times]
)
elif p_queue_type == "raw":
worker.send_raw_tasks("testperformance_efficiency_raw", sleep_times)
elif p_queue_type == "raw_nostorage":
worker.send_raw_tasks("testperformance_efficiency_nostorage_raw", sleep_times)
total_time = time.time() - start_time
if p_queue_type == "raw_nostorage":
assert worker.mongodb_jobs.mrq_jobs.count() == 0
else:
assert worker.mongodb_jobs.mrq_jobs.find({"status": "success"}).count() == count_jobs
perfect_time = (total_sleep_time / p_greenlets) + 1 # + 1 to compensate for the worker stopping time w/ decreasing job count
print("Total time for %d jobs with %0.4fs of sleeping time + %d greenlets : %0.4fs (%0.2f%% efficiency)" % (
count_jobs, total_sleep_time, p_greenlets, total_time, perfect_time * 100 / total_time
))
# We can't be perfectly efficient!
assert (perfect_time - 1) < total_time
# But we should be at least 80% efficient
assert perfect_time > total_time * p_min_efficiency
| StarcoderdataPython |
5135306 | import unittest
from robopager.robopager import parse_checklist, PDInteraction
from robopager.check_type.daily_email_check import CheckEmails
from robopager.check_type.intraday_latency_check import CheckWF
from robopager.check_type import intraday_latency_check
from datetime import datetime
import pytz
from unittest.mock import patch, MagicMock
from datacoco_core.config import config
class TestCommon(unittest.TestCase):
def test_parse_checklist(self):
"""
test reading of config yaml file and its output format
:return:
"""
config = "tests/test_data/test.yaml"
expected = {
"checklist_parse_test": {
"type": "batchy",
"pd_description": "Test for parsing checklist",
"pd_service": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"wf_name": "checklist_parse_test",
"check_time": "09:00",
"poll_sec": 180,
"latency_min": 60,
}
}
found = parse_checklist(config)
self.assertDictEqual(expected, found)
class TestBatchyWF(unittest.TestCase):
def setUp(self):
self.b = CheckWF("test_robopager", "0.0.0.0", "8050")
@patch("datacoco_batch.batch.Batch.get_status")
@patch.object(
intraday_latency_check, "datetime", MagicMock(wraps=datetime)
)
def test_check_batchy_wf_success(self, mock_get_status):
mock_get_status.return_value = {
"global": {
"batch_start": "2020-03-04T11:20:00.00",
"status": "success",
}
}
naive_time = datetime(2020, 3, 4, 11, 22, 0, 0)
intraday_latency_check.datetime.now.return_value = pytz.utc.localize(
naive_time
)
failure_count, result = self.b.check_batchy_wf(max_latency=3)
self.assertEqual(failure_count, 0)
self.assertEqual(result["alert_level"], "SUCCESS")
class TestPDInteraction(unittest.TestCase):
def setUp(self):
conf = config("tests/test_data/test_etl.cfg")
sub_domain = conf["pager_duty"]["subdomain"]
api_access_key = conf["pager_duty"]["api_access_key"]
self.pd_service = conf["pager_duty"]["test_pd_service"]
self.pd = PDInteraction(sub_domain, api_access_key)
def test_trigger_incident(self):
with patch("robopager.robopager.ri", create=True) as ri:
resp = self.pd.trigger_incident(
self.pd_service,
"test_robopager",
"test_robopager",
check="test",
override=True,
)
print("resp: {}".format(resp))
self.assertEqual(resp, 200)
class TestCheckEmails(unittest.TestCase):
def setUp(self):
self.e = CheckEmails(
"username",
"password",
"Successful - Job1 Completed",
["<EMAIL>"],
"US/Easter",
)
self.e.subjects = ["Successful - Job1 Completed"]
self.e.emails_received = [
"Successful - Job2 Completed",
"Successful - Job3 Completed",
"Successful - Job4 Completed",
"Successful - Job1 Completed",
]
print(self.e.subjects)
def test_check_missing_emails(self):
failure_count, results = self.e.check_missing_emails()
self.assertEqual(failure_count, 0)
self.assertEqual(results, [{"Successful - Job1 Completed": "ok"}])
def test_parse_gmail_dates(self):
gdate = "Wed, 2 Jan 2019 01:47:40 -0500"
date = self.e.parse_gmail_dates(gdate)
self.assertEqual(date, datetime(2019, 1, 2, 1, 47, 40))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3271933 | # def order():
# return 1
def filter(x):
print("filter called")
print(x)
def detect(x):
if len(x) < 4: return False
if x[0] == 3 and x[1] == 0 and x[2] == 0 and x[3] == 19 : return True
return False
| StarcoderdataPython |
1924868 | <gh_stars>0
import re
from itertools import product
with open("day14.txt", "r") as f:
data = f.read().splitlines()
def apply_mask(mask, value):
# Convert
binary_value = f"{value:>036b}"
masked_value = [
value if mask_value == "0" else "1" if mask_value == "1" else "X"
for value, mask_value in zip(binary_value, mask)
]
# Deal with floating bits
floating_bits = [i for i, bit in enumerate(masked_value) if bit == "X"]
results = []
for bits in product(range(2), repeat=masked_value.count("X")):
address = masked_value.copy()
for idx, bit in enumerate(floating_bits):
address[bit] = str(bits[idx])
results.append("".join(address))
return [int(address, 2) for address in results]
memory = {}
for line in data:
if "mask" in line:
mask = line.split(" = ")[-1]
else:
address, value = re.findall("(\d+)", line)
all_addresses = apply_mask(mask, int(address))
for address in all_addresses:
memory[address] = int(value)
print(sum(memory.values()))
| StarcoderdataPython |
12846871 | <gh_stars>0
import speedtest
# Lets test Zuku.
# Its getting frustrating now....
test = speedtest.Speedtest()
print("Loading server list...")
test.get_servers() # Get list of servers
print("Getting best server...")
best = test.get_best_server()
print(f"Found: {best['host']} located in : {best['country']}")
print("Performing download test..")
download_result = test.download()
print("Performing upload test..")
upload_result = test.upload()
print("Getting ping")
ping_result = test.results.ping
# Print all information.
print(f"Download speed: {download_result / 1024 / 1024:.2f} mb/s ") # Changing bits to mbs
print(f"Upload speed: {upload_result / 1024 / 1024:.2f} mb/s ")
print(f"Ping: {ping_result:.2f} ms")
| StarcoderdataPython |
9622101 | <reponame>paipaitou/bili2.0
import bili_statistics
import printer
import asyncio
from typing import Optional
import notifier
from cmd import Cmd
import getopt
from tasks.utils import UtilsTask
from tasks.bili_console import (
PrintGiftbagsTask,
PrintMedalsTask,
PrintMainBiliDailyJobTask,
PrintLiveBiliDailyJobTask,
PrintMainBiliUserInfoTask,
PrintLiveBiliUserInfoTask,
PrintJudgeTask,
PrintCapsuleTask,
OpenCapsuleTask,
SendDanmuTask,
PrintUserStatusTask
)
from tasks.custom import SendLatiaoTask, BuyLatiaoTask, BuyMedalTask
class FuncCore:
def __init__(self, function, *args):
self.function = function
self.args = args
async def exec(self):
args = list(self.args)
# 递归
for i, arg in enumerate(args):
if isinstance(arg, FuncCore):
args[i] = await arg.exec()
if asyncio.iscoroutinefunction(self.function):
return await self.function(*args)
return self.function(*args)
def convert2int(orig) -> Optional[int]:
try:
return int(orig)
except (ValueError, TypeError):
return None
class BiliConsole(Cmd):
prompt = ''
def __init__(self, loop: asyncio.AbstractEventLoop, room_id, printer_danmu):
self.loop = loop
self.default_roomid = room_id
self._printer_danmu = printer_danmu
super().__init__()
@staticmethod
def guide_of_console():
print(' __________________ ')
print('| 欢迎使用本控制台 |')
print('| 1 输出本次统计数据 |')
print('| 2 查看目前拥有礼物的统计 |')
print('| 3 查看持有勋章状态 |')
print('| 4 检查主站今日任务的情况 |')
print('| 5 检查直播分站今日任务的情况 |')
print('| 6 获取主站个人的基本信息 |')
print('| 7 获取直播分站个人的基本信息 |')
print('| 8 检查风纪委今日自动投票的情况 |')
print('| 9 检查脚本判断的用户小黑屋情况 |')
print('|11 当前拥有的扭蛋币 |')
print('|12 开扭蛋币(一、十、百) |')
print('|13 直播间的长短号码的转化 |')
print('|14 发送弹幕 |')
print('|15 切换监听的直播间 |')
print('|16 控制弹幕的开关 |')
print('|21 赠指定总数的辣条到房间 |')
print('|22 银瓜子全部购买辣条并送到房间 |')
print('|23 购买勋章(使用银瓜子或者硬币)|')
print('  ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ')
def default(self, line):
self.guide_of_console()
def emptyline(self):
self.guide_of_console()
# pattern = '-u:-p:' u(user_id):0,1…;n(num);p(point)指roomid(烂命名因为-r不合适)
def parse(self, arg, pattern, default_u=0, set_roomid=False):
args = arg.split()
try:
opts, args = getopt.getopt(args, pattern)
except getopt.GetoptError:
return []
dict_results = {opt_name: opt_value for opt_name, opt_value in opts}
opt_names = pattern.split(':')[:-1]
results = []
for opt_name in opt_names:
opt_value = dict_results.get(opt_name)
if opt_name == '-u':
int_value = convert2int(opt_value)
if int_value is not None:
results.append(int_value)
else:
results.append(default_u)
# -2是一个灾难性的东西
# results.append(-2)
elif opt_name == '-n':
int_value = convert2int(opt_value)
if int_value is not None:
results.append(int_value)
else:
results.append(0)
elif opt_name == '-p':
int_value = convert2int(opt_value)
if int_value is not None:
room_id = int_value
else:
room_id = self.default_roomid
if set_roomid:
self.default_roomid = room_id
results.append(self.fetch_real_roomid(room_id))
else:
results.append(opt_value)
return results
def do_1(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(bili_statistics.print_statistics, user_id))
def do_2(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintGiftbagsTask, user_id))
def do_3(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintMedalsTask, user_id))
def do_4(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintMainBiliDailyJobTask, user_id))
def do_5(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintLiveBiliDailyJobTask, user_id))
def do_6(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintMainBiliUserInfoTask, user_id))
def do_7(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintLiveBiliUserInfoTask, user_id))
def do_8(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintJudgeTask, user_id))
def do_9(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintUserStatusTask, user_id))
def do_11(self, arg):
user_id, = self.parse(arg, '-u:')
self.exec_func_threads(
FuncCore(notifier.exec_task, PrintCapsuleTask, user_id))
def do_12(self, arg):
user_id, num_opened = self.parse(arg, '-u:-n:')
self.exec_func_threads(
FuncCore(notifier.exec_task, OpenCapsuleTask, user_id, num_opened))
def do_13(self, arg):
real_roomid, = self.parse(arg, '-p:')
self.exec_func_threads(
FuncCore(notifier.exec_func, UtilsTask.get_real_roomid, real_roomid))
def do_14(self, arg):
user_id, msg, real_roomid = self.parse(arg, '-u:-m:-p:')
self.exec_func_threads(
FuncCore(notifier.exec_task, SendDanmuTask, user_id, msg, real_roomid))
def do_15(self, arg):
real_roomid, = self.parse(arg, '-p:', set_roomid=True)
self.exec_func_threads(
FuncCore(self._printer_danmu.reset_roomid, real_roomid))
def do_16(self, arg):
ctrl, = self.parse(arg, '-c:')
if ctrl == 'T':
self.exec_func_threads(
FuncCore(printer.control_printer, True))
else:
self.exec_func_threads(
FuncCore(printer.control_printer, False))
def do_21(self, arg):
real_roomid, num_max = self.parse(arg, '-p:-n:')
self.exec_func_threads(
FuncCore(notifier.exec_task, SendLatiaoTask, real_roomid, num_max))
def do_22(self, arg):
real_roomid, num_wanted = self.parse(arg, '-p:-n:')
self.exec_func_threads(
FuncCore(notifier.exec_task, BuyLatiaoTask, real_roomid, num_wanted))
def do_23(self, arg):
user_id, coin_type, real_roomid = self.parse(arg, '-u:-c:-p:') # coin_type = 'silver' / 'metal'
self.exec_func_threads(
FuncCore(notifier.exec_task, BuyMedalTask, user_id, real_roomid, coin_type))
@staticmethod
def fetch_real_roomid(room_id):
return FuncCore(notifier.exec_func, UtilsTask.get_real_roomid, room_id)
# 直接执行,不需要user_id
def exec_func_threads(self, func_core: FuncCore):
asyncio.run_coroutine_threadsafe(self.exec_func(func_core), self.loop)
@staticmethod
async def exec_func(func_core: FuncCore):
await func_core.exec()
| StarcoderdataPython |
4963520 | """
This module contains various utility functions
"""
import os
def get_url_base() -> str:
"""
Returns the base URL for the API which can be overridden from the URL_BASE environment variable
"""
url_base = "https://battleshapi.pythonanywhere.com/api/aircraft_carrier"
if os.getenv('URL_BASE') is not None:
url_base = os.getenv('URL_BASE')
return url_base
| StarcoderdataPython |
11229120 | from __future__ import unicode_literals
import datetime
from django.core.validators import RegexValidator
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from waldur_ansible.common import serializers as common_serializers
from waldur_core.core import models as core_models, serializers as core_serializers
from waldur_core.structure import permissions as structure_permissions, serializers as structure_serializers, models as structure_models
from . import models, utils
REQUEST_TYPES_PLAIN_NAMES = {
models.PythonManagement: 'overall',
models.PythonManagementInitializeRequest: 'initialization',
models.PythonManagementSynchronizeRequest: 'synchronization',
models.PythonManagementFindVirtualEnvsRequest: 'virtual_envs_search',
models.PythonManagementFindInstalledLibrariesRequest: 'installed_libraries_search',
models.PythonManagementDeleteRequest: 'python_management_deletion',
models.PythonManagementDeleteVirtualEnvRequest: 'virtual_environment_deletion',
}
directory_and_library_allowed_pattern = '^[a-zA-Z0-9\-_]+$'
class InstalledPackageSerializer(core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer):
name = serializers.RegexField(directory_and_library_allowed_pattern)
class Meta(object):
model = models.InstalledLibrary
fields = ('name', 'version', 'uuid',)
read_only_fields = ('uuid',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class VirtualEnvironmentSerializer(core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer):
name = serializers.RegexField(directory_and_library_allowed_pattern)
installed_libraries = InstalledPackageSerializer(many=True)
class Meta(object):
model = models.VirtualEnvironment
fields = ('name', 'uuid', 'installed_libraries', 'jupyter_hub_global',)
read_only_fields = ('uuid',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class PythonManagementRequestMixin(common_serializers.BaseApplicationSerializer):
request_type = serializers.SerializerMethodField()
state = serializers.SerializerMethodField()
output = serializers.SerializerMethodField()
class Meta(object):
model = NotImplemented
fields = ('uuid', 'output', 'state', 'created', 'modified', 'request_type',)
read_only_fields = ('uuid', 'output', 'state', 'created', 'modified', 'request_type',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_output(self, obj):
if self.context.get('select_output'):
return obj.output
else:
return None
def get_request_type(self, obj):
return REQUEST_TYPES_PLAIN_NAMES.get(type(obj))
def get_state(self, obj):
return obj.human_readable_state
class PythonManagementInitializeRequestSerializer(PythonManagementRequestMixin):
class Meta(PythonManagementRequestMixin.Meta):
model = models.PythonManagementInitializeRequest
class PythonManagementFindVirtualEnvsRequestSerializer(PythonManagementRequestMixin):
class Meta(PythonManagementRequestMixin.Meta):
model = models.PythonManagementFindVirtualEnvsRequest
class PythonManagementFindInstalledLibrariesRequestSerializer(PythonManagementRequestMixin):
class Meta(PythonManagementRequestMixin.Meta):
model = models.PythonManagementFindInstalledLibrariesRequest
fields = PythonManagementRequestMixin.Meta.fields + ('virtual_env_name',)
class PythonManagementDeleteRequestSerializer(PythonManagementRequestMixin):
class Meta(PythonManagementRequestMixin.Meta):
model = models.PythonManagementDeleteRequest
class PythonManagementDeleteVirtualEnvRequestSerializer(PythonManagementRequestMixin):
class Meta(PythonManagementRequestMixin.Meta):
model = models.PythonManagementDeleteVirtualEnvRequest
fields = PythonManagementRequestMixin.Meta.fields + ('virtual_env_name',)
class PythonManagementSynchronizeRequestSerializer(PythonManagementRequestMixin):
libraries_to_install = serializers.JSONField(default=dict)
libraries_to_remove = serializers.JSONField(default=dict)
class Meta(PythonManagementRequestMixin.Meta):
model = models.PythonManagementSynchronizeRequest
fields = PythonManagementRequestMixin.Meta.fields \
+ ('libraries_to_install', 'libraries_to_remove', 'virtual_env_name')
class PythonManagementSerializer(
common_serializers.BaseApplicationSerializer,
structure_serializers.PermissionFieldFilteringMixin):
REQUEST_IN_PROGRESS_STATES = (core_models.StateMixin.States.CREATION_SCHEDULED, core_models.StateMixin.States.CREATING)
state = serializers.SerializerMethodField()
virtual_environments = VirtualEnvironmentSerializer(many=True)
virtual_envs_dir_path = serializers.CharField(max_length=255, validators=[
RegexValidator(
regex=directory_and_library_allowed_pattern,
message=_('Virtual environments root directory has invalid format!'),
),
])
name = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
instance = core_serializers.GenericRelatedField(
related_models=structure_models.ResourceMixin.get_all_models(), required=False)
instance_name = serializers.ReadOnlyField(source='instance.name')
class Meta(object):
model = models.PythonManagement
fields = ('url', 'uuid', 'virtual_envs_dir_path', 'system_user',
'state', 'created', 'modified', 'virtual_environments', 'python_version', 'name', 'type', 'instance', 'instance_name',)
read_only_fields = ('request_states', 'created', 'modified', 'python_version', 'type', 'name', 'url',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_name(self, python_management):
instance_name = python_management.instance.name if python_management.instance else 'removed instance'
return 'Python Management - %s - %s' % (instance_name, python_management.virtual_envs_dir_path)
def get_type(self, python_management):
return 'python_management'
def get_filtered_field_names(self):
return 'project'
def get_state(self, python_management):
states = []
initialize_request = utils.execute_safely(
lambda: models.PythonManagementInitializeRequest.objects.filter(python_management=python_management).latest('id'))
if initialize_request and self.is_in_progress_or_errored(initialize_request):
return [self.build_state(initialize_request)]
states.extend(self.get_request_state(
utils.execute_safely(lambda: models.PythonManagementDeleteRequest.objects.filter(python_management=python_management).latest('id'))))
states.extend(
self.get_request_state(
utils.execute_safely(
lambda: models.PythonManagementDeleteVirtualEnvRequest.objects.filter(python_management=python_management).latest('id'))))
states.extend(self.build_search_requests_states(python_management))
states.extend(self.build_states_from_last_group_of_the_request(python_management, models.PythonManagementSynchronizeRequest))
if not states:
return core_models.StateMixin(state=core_models.StateMixin.States.OK).human_readable_state
else:
creation_scheduled_state = core_models.StateMixin(state=core_models.StateMixin.States.CREATION_SCHEDULED).human_readable_state
creating_state = core_models.StateMixin(state=core_models.StateMixin.States.CREATING).human_readable_state
erred_state = core_models.StateMixin(state=core_models.StateMixin.States.ERRED).human_readable_state
if creating_state in states:
return creating_state
elif creation_scheduled_state in states:
return creation_scheduled_state
elif erred_state in states:
return erred_state
def build_search_requests_states(self, python_management):
states = []
states.extend(
self.get_request_state(
utils.execute_safely(
lambda: models.PythonManagementFindVirtualEnvsRequest.objects
.filter(python_management=python_management).latest('id'))))
states.extend(self.build_states_from_last_group_of_the_request(python_management, models.PythonManagementFindInstalledLibrariesRequest))
return states
def get_request_state(self, request):
if request and self.is_in_progress_or_errored(request):
return [self.build_state(request)]
else:
return []
def build_states_from_last_group_of_the_request(self, python_management, request_class):
states = []
requests = request_class.objects.filter(python_management=python_management).order_by('-id')
last_request_group = self.get_last_requests_group(requests)
for request in last_request_group:
if self.is_in_progress_or_errored(request):
states.append(self.build_state(request))
return states
def get_last_requests_group(self, requests):
last_request_group = []
last_request_time = None
for request in requests:
if not last_request_time:
last_request_time = request.created - datetime.timedelta(minutes=1)
if request.created < last_request_time:
break
last_request_group.append(request)
return last_request_group
def is_in_progress_or_errored(self, request):
return request.state in PythonManagementSerializer.REQUEST_IN_PROGRESS_STATES \
or request.state == core_models.StateMixin.States.ERRED
def build_state(self, request, state=None):
request_state = state if state else request
return request_state.human_readable_state
@transaction.atomic
def create(self, validated_data):
python_management = models.PythonManagement(
user=validated_data.get('user'),
virtual_envs_dir_path=validated_data.get('virtual_envs_dir_path'),
instance=validated_data.get('instance'),
project=validated_data.get('instance').service_project_link.project,
python_version='3',
system_user=validated_data.get('system_user'))
python_management.save()
return python_management
def validate(self, attrs):
super(PythonManagementSerializer, self).validate(attrs)
if not self.instance:
attrs['user'] = self.context['request'].user
self.check_resource_type(attrs)
self.check_project_permissions(attrs)
return attrs
def check_resource_type(self, attrs):
if not issubclass(type(attrs['instance']), structure_models.VirtualMachine):
raise exceptions.ValidationError(_('Please specify a virtual machine, not just any resource.'))
def check_project_permissions(self, attrs):
if self.instance:
project = self.instance.project
else:
project = attrs['instance'].service_project_link.project
if not structure_permissions._has_admin_access(self.context['request'].user, project):
raise exceptions.PermissionDenied()
class CachedRepositoryPythonLibrarySerializer(
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.CachedRepositoryPythonLibrary
fields = ('name', 'uuid')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
| StarcoderdataPython |
9750549 | # merge linked lists into a single list in sorted order
from dsame.linkedLists.problems.BaseLinkedList import BaseLinkedList
def merge_lls_recursive(a, b):
if not a:
return b
if not b:
return a
if a.data <= b.data:
result = a
result.next = merge_lls_recursive(a.next, b)
else:
result = b
result.next = merge_lls_recursive(a, b.next)
return result
if __name__ == '__main__':
bll1 = BaseLinkedList()
bll2 = BaseLinkedList()
head1 = bll1.initializebll(bll1)
head2 = bll2.insert_at_end(bll2.head, 2)
head2 = bll2.insert_at_end(head2, 8)
head2 = bll2.insert_at_end(head2, 9)
merged = merge_lls_recursive(head2, head1)
while merged:
print(merged.data)
merged = merged.next | StarcoderdataPython |
6466957 | """Test the LTI select view."""
from html import unescape
import json
from logging import Logger
import random
import re
from unittest import mock
import uuid
from django.test import TestCase
from django.utils import timezone
from rest_framework_simplejwt.tokens import AccessToken
from ..factories import DocumentFactory, PlaylistFactory, VideoFactory
from ..models import Playlist
from .utils import generate_passport_and_signed_lti_parameters
# We don't enforce arguments documentation in tests
# pylint: disable=unused-argument,too-many-lines
class SelectLTIViewTestCase(TestCase):
"""Test the select LTI view in the ``core`` app of the Marsha project."""
maxDiff = None
def test_views_lti_select_student(self):
"""Error 403 raised if a student initiates the request."""
lti_parameters, _ = generate_passport_and_signed_lti_parameters(
url="http://testserver/lti/select/",
lti_parameters={
"roles": "student",
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
},
)
response = self.client.post(
"/lti/select/", lti_parameters, HTTP_REFERER="https://testserver"
)
self.assertEqual(response.status_code, 403)
def test_views_lti_select(self):
"""Validate the context passed to the frontend app for a LTI Content selection."""
lti_consumer_parameters = {
"roles": random.choice(["instructor", "administrator"]),
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
}
lti_parameters, passport = generate_passport_and_signed_lti_parameters(
url="http://testserver/lti/select/",
lti_parameters=lti_consumer_parameters,
)
resolutions = [144]
playlist = PlaylistFactory(
lti_id=lti_parameters.get("context_id"),
consumer_site=passport.consumer_site,
)
video = VideoFactory(
playlist=playlist,
uploaded_on=timezone.now(),
resolutions=resolutions,
)
document = DocumentFactory(
playlist=playlist,
uploaded_on=timezone.now(),
)
response = self.client.post(
"/lti/select/",
lti_parameters,
HTTP_REFERER="http://testserver",
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html>")
match = re.search(
'<div id="marsha-frontend-data" data-context="(.*)">',
response.content.decode("utf-8"),
)
context = json.loads(unescape(match.group(1)))
self.assertEqual(
context.get("videos")[0].get("lti_url"),
f"http://testserver/lti/videos/{video.id}",
)
self.assertEqual(
context.get("documents")[0].get("lti_url"),
f"http://testserver/lti/documents/{document.id}",
)
new_document_url = context.get("new_document_url")
new_uuid = re.search(
"http://testserver/lti/documents/(.*)", new_document_url
).group(1)
self.assertEqual(uuid.UUID(new_uuid).version, 4)
self.assertEqual(
new_document_url, f"http://testserver/lti/documents/{new_uuid}"
)
self.assertEqual(
context.get("new_video_url"), f"http://testserver/lti/videos/{new_uuid}"
)
form_data = context.get("lti_select_form_data")
jwt_token = AccessToken(form_data.get("jwt"))
lti_parameters.update({"lti_message_type": "ContentItemSelection"})
self.assertEqual(jwt_token.get("lti_select_form_data"), lti_parameters)
def test_views_lti_select_behind_tls_termination_proxy(self):
"""Validate the context passed to the frontend app for a LTI Content selection."""
lti_consumer_parameters = {
"roles": random.choice(["instructor", "administrator"]),
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
}
lti_parameters, passport = generate_passport_and_signed_lti_parameters(
url="https://testserver/lti/select/",
lti_parameters=lti_consumer_parameters,
)
resolutions = [144]
playlist = PlaylistFactory(
lti_id=lti_parameters.get("context_id"),
consumer_site=passport.consumer_site,
)
video = VideoFactory(
playlist=playlist,
uploaded_on=timezone.now(),
resolutions=resolutions,
)
document = DocumentFactory(
playlist=playlist,
uploaded_on=timezone.now(),
)
response = self.client.post(
"/lti/select/",
lti_parameters,
HTTP_REFERER="http://testserver",
HTTP_X_FORWARDED_PROTO="https",
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html>")
match = re.search(
'<div id="marsha-frontend-data" data-context="(.*)">',
response.content.decode("utf-8"),
)
context = json.loads(unescape(match.group(1)))
self.assertEqual(
context.get("videos")[0].get("lti_url"),
f"https://testserver/lti/videos/{video.id}",
)
self.assertEqual(
context.get("documents")[0].get("lti_url"),
f"https://testserver/lti/documents/{document.id}",
)
new_document_url = context.get("new_document_url")
new_uuid = re.search(
"https://testserver/lti/documents/(.*)", new_document_url
).group(1)
self.assertEqual(uuid.UUID(new_uuid).version, 4)
self.assertEqual(
new_document_url, f"https://testserver/lti/documents/{new_uuid}"
)
self.assertEqual(
context.get("new_video_url"), f"https://testserver/lti/videos/{new_uuid}"
)
form_data = context.get("lti_select_form_data")
jwt_token = AccessToken(form_data.get("jwt"))
lti_parameters.update({"lti_message_type": "ContentItemSelection"})
self.assertEqual(jwt_token.get("lti_select_form_data"), lti_parameters)
def test_views_lti_select_no_playlist(self):
"""A playlist should be created if it does not exist for the current consumer site."""
lti_parameters, passport = generate_passport_and_signed_lti_parameters(
url="http://testserver/lti/select/",
lti_parameters={
"roles": random.choice(["instructor", "administrator"]),
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
},
)
response = self.client.post(
"/lti/select/", lti_parameters, HTTP_REFERER="https://testserver"
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html>")
content = response.content.decode("utf-8")
match = re.search(
'<div id="marsha-frontend-data" data-context="(.*)">', content
)
context = json.loads(unescape(match.group(1)))
self.assertEqual(Playlist.objects.count(), 1)
self.assertEqual(
passport.consumer_site.playlists.first().lti_id,
lti_parameters.get("context_id"),
)
self.assertEqual(len(context.get("videos")), 0)
self.assertEqual(len(context.get("documents")), 0)
# second call should not create new playlist
self.client.post(
"/lti/select/", lti_parameters, HTTP_REFERER="https://testserver"
)
self.assertEqual(Playlist.objects.count(), 1)
def test_views_lti_select_static_base_url(self):
"""Meta tag public-path should be the STATIC_URL settings with js/build/ at the end."""
lti_parameters, _ = generate_passport_and_signed_lti_parameters(
url="http://testserver/lti/select/",
lti_parameters={
"roles": random.choice(["instructor", "administrator"]),
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
},
)
response = self.client.post(
"/lti/select/", lti_parameters, HTTP_REFERER="https://testserver"
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html>")
self.assertContains(
response, '<meta name="public-path" value="/static/js/build/" />'
)
@mock.patch.object(Logger, "warning")
def test_views_lti_select_wrong_signature(self, mock_logger):
"""Wrong signature should display an error."""
lti_parameters, _ = generate_passport_and_signed_lti_parameters(
url="http://testserver/lti/select/",
lti_parameters={
"roles": random.choice(["instructor", "administrator"]),
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
},
)
lti_parameters["oauth_signature"] = "{:s}a".format(
lti_parameters["oauth_signature"]
)
response = self.client.post(
"/lti/select/", lti_parameters, HTTP_REFERER="https://testserver"
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html>")
content = response.content.decode("utf-8")
match = re.search(
'<div id="marsha-frontend-data" data-context="(.*)">', content
)
context = json.loads(unescape(match.group(1)))
self.assertEqual(context.get("state"), "error")
mock_logger.assert_called_once_with(
"OAuth error: Please check your key and secret"
)
@mock.patch.object(Logger, "warning")
def test_views_lti_select_wrong_referer(self, mock_logger):
"""Wrong referer should display an error."""
lti_parameters, _ = generate_passport_and_signed_lti_parameters(
url="http://testserver/lti/select/",
lti_parameters={
"roles": random.choice(["instructor", "administrator"]),
"content_item_return_url": "https://lti-consumer.site/lti",
"context_id": "sent_lti_context_id",
},
)
response = self.client.post(
"/lti/select/", lti_parameters, HTTP_REFERER="https://wrongserver"
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<html>")
content = response.content.decode("utf-8")
match = re.search(
'<div id="marsha-frontend-data" data-context="(.*)">', content
)
context = json.loads(unescape(match.group(1)))
self.assertEqual(context.get("state"), "error")
mock_logger.assert_called_once_with(
"Host domain (wrongserver) does not match registered passport (testserver)."
)
| StarcoderdataPython |
1969379 | <reponame>tzole1155/moai<gh_stars>1-10
from moai.validation.metrics.image.psnr import PSNR
__all__ = [
"PSNR",
] | StarcoderdataPython |
5174265 | <filename>src/binheap.py
"""Implements a max binary heap."""
class BinHeap(object):
"""Structure for values in a max binary heap.
A max binary heap is a complete binary tree where each level of the
tree is greater than the level below it. A min heap has the lowest
values at the top.
"""
def __init__(self, iterable=None, is_max_heap=True):
"""Construct a new binary heap."""
self._values = [None]
self.is_max_heap = is_max_heap
if isinstance(iterable, (str, list, tuple)):
for item in iterable:
self.push(item)
elif iterable is not None:
raise TypeError('Iterable must be a str, list, or tuple.')
@property
def _size(self):
"""Get the number of items in the binary heap."""
return len(self._values) - 1
def push(self, val):
"""Put a new value into the binary heap."""
self._values.append(val)
i = self._size
while self._pi(i) and self._comp(val, self._values[self._pi(i)]):
p = self._pi(i)
self._values[p], self._values[i] = self._values[i], self._values[p]
i = p
def pop(self):
"""Remove top from the binary heap."""
if len(self._values) < 2:
raise IndexError('Can not pop from empty heap.')
if len(self._values) == 2:
return self._values.pop()
top = self._values[1]
x = self._values[1] = self._values.pop()
i = 1
lc = self._values[self._lci(i):self._lci(i) + 1]
rc = self._values[self._rci(i):self._rci(i) + 1]
while (lc and self._comp(lc[0], x)) or (rc and self._comp(rc[0], x)):
if rc and self._comp(rc[0], lc[0]):
c = self._rci(i)
else:
c = self._lci(i)
self._values[c], self._values[i] = self._values[i], self._values[c]
i = c
lc = self._values[self._lci(i):self._lci(i) + 1]
rc = self._values[self._rci(i):self._rci(i) + 1]
return top
def _pi(self, idx):
"""Find the index for the parent of the given index."""
return idx // 2
def _lci(self, idx):
"""Find the index for the left child of the given index."""
return idx * 2
def _rci(self, idx):
"""Find the index for the right child of the given index."""
return idx * 2 + 1
def _comp(self, val1, val2):
"""Compare two values based on if the heap is max or min.
In max heap, val1 > val2. In min heap, val1 < val2.
"""
if self.is_max_heap:
return val1 > val2
return val1 < val2
| StarcoderdataPython |
223440 | <filename>backend/src/account/permissions.py
from rest_framework import permissions
class AdminOnly(permissions.BasePermission):
"""
Only allow admin user to access this endpoint
"""
def has_permission(self, request, view):
message = "Non-admin user not allowed"
return request.user.is_admin
class StaffOnly(permissions.BasePermission):
"""
Only allow staff user (driver and manager) to access this endpoint
"""
def has_permission(self, request, view):
message = "Non-staff user not allowed"
return request.user.is_staff
| StarcoderdataPython |
9664544 | """
========================================
Cell Tracking (:mod:`tracking.core`)
========================================
.. currentmodule:: tracking.core
TITAN cell tracking
================
.. autosummary::
:toctree: generated/
Cell_tracks
"""
#from .cell_tracking import Cell_tracks
from .tracks import Cell_tracks
from .visualization import animate
from . import testing
__all__ = [s for s in dir() if not s.startswith('_')]
| StarcoderdataPython |
3475163 | <filename>envs/deadlineSchedulingEnv.py<gh_stars>1-10
'''
Environment to calculate the Whittle index values as a deep reinforcement
learning environment modelled after the OpenAi Gym API.
From the paper:
"Deadline Scheduling as Restless Bandits"
'''
import gym
import math
import time
import torch
import random
import datetime
import numpy as np
import pandas as pd
from gym import spaces
#from stable_baselines.common.env_checker import check_env #this package throws errors. it's normal. requires python 3.6.
class deadlineSchedulingEnv(gym.Env):
metadata = {'render.modes': ['human']}
'''
Custom Gym environment modelled after "deadline scheduling as restless bandits" paper RMAB description.
The environment represents one position in the N-length queue.
'''
def __init__(self, seed, numEpisodes, episodeLimit, maxDeadline, maxLoad, newJobProb,
processingCost, train, batchSize, noiseVar):
super(deadlineSchedulingEnv, self).__init__()
self.seed = seed
self.myRandomPRNG = random.Random(self.seed)
self.G = np.random.RandomState(self.seed) # create a special PRNG for a class instantiation
self.observationSize = 2
self.arm = {0:[1, 1, 1]} # first: laxity T (D in the paper). Second: load B. Third: deadline d. initalized to all ones
self.newJobProb = newJobProb
self.noiseVar = noiseVar
self.numEpisodes = numEpisodes
self.currentEpisode = 0
self.episodeTime = 0
self.episodeLimit = episodeLimit
self.train = train
self.processingCost = processingCost
self.maxDeadline = maxDeadline
self.maxLoad = maxLoad
self.batchSize = batchSize
self.miniBatchCounter = 0
self.loadIndex = 0
lowState = np.zeros(self.observationSize, dtype=np.float32)
highState = np.full(self.observationSize, [self.maxDeadline, self.maxLoad], dtype=np.float32)
self.action_space = spaces.Discrete(2)
self.state_space = spaces.Box(lowState, highState, dtype=np.float32)
self.createStateTable()
# gives the added noise value for each state sampled from a Gaussian distribution
self.noiseVector = self.G.normal(0, np.sqrt(self.noiseVar), np.shape(self.stateArray)[0]*2)
def _calReward(self, action, state):
''' separate function that only retrieves the reward without changing the state.
For sampling the reward function. '''
currentState = np.array([self.arm[0][0], self.arm[0][1]], dtype=np.float32)
if action == 1:
noise = self.noiseVector[self._findStateIndex(currentState)]
if (currentState[1] == 0) and (currentState[0] == 0):
reward = 0
elif (currentState[1] >= 0) and (currentState[0] > 1):
reward = (1 - self.processingCost)
currentState[0] -= 1
currentState[1] -= 1
if currentState[1] < 0:
reward = 0
elif (currentState[1] >= 0) and (currentState[0] == 1):
reward = ((1 - self.processingCost) - 0.2*(((currentState[1]) - 1)**2))
if (currentState[1] == 0):
reward = 0
elif action == 0:
noise = self.noiseVector[self._findStateIndex(currentState)+np.shape(self.stateArray)[0]]
if (currentState[1] == 0) and (currentState[0] == 0):
reward = 0
elif (currentState[1] >= 0) and (currentState[0] > 1):
reward = 0
elif (currentState[1] >= 0) and (currentState[0] == 1):
reward = -0.2*(((currentState[1]))**2)
reward = reward + noise*reward
return reward
def _calRewardAndState(self, action):
''' function to calculate the reward and next state. '''
currentState = np.array([self.arm[0][0], self.arm[0][1]], dtype=np.float32)
if action == 1:
noise = self.noiseVector[self._findStateIndex(currentState)]
if (self.arm[0][1] == 0) and (self.arm[0][0] == 0):
reward = 0
nextState = self._newArrival()
elif (self.arm[0][1] >= 0) and (self.arm[0][0] > 1):
reward = (1 - self.processingCost)
self.arm[0][0] -= 1
self.arm[0][1] -= 1
if self.arm[0][1] < 0:
self.arm[0][1] = 0
reward = 0
nextState = np.array([self.arm[0][0], self.arm[0][1]], dtype=np.float32)
elif (self.arm[0][1] >= 0) and (self.arm[0][0] == 1):
reward = ((1 - self.processingCost) - 0.2*(((self.arm[0][1]) - 1)**2))
if (self.arm[0][1] == 0):
reward = 0
self.arm[0][1] = 0
self.arm[0][0] = 0
nextState = self._newArrival()
elif action == 0:
noise = self.noiseVector[self._findStateIndex(currentState)+np.shape(self.stateArray)[0]]
if (self.arm[0][1] == 0) and (self.arm[0][0] == 0):
reward = 0
nextState = self._newArrival()
elif (self.arm[0][1] >= 0) and (self.arm[0][0] > 1):
reward = 0
self.arm[0][0] -= 1
nextState = np.array([self.arm[0][0], self.arm[0][1]], dtype=np.float32)
elif (self.arm[0][1] >= 0) and (self.arm[0][0] == 1):
reward = -0.2*(((self.arm[0][1]))**2)
self.arm[0][1] = 0
self.arm[0][0] = 0
nextState = self._newArrival()
reward = reward + noise*reward
return nextState, reward
def _findStateIndex(self, state):
stateLocation = np.where((self.stateArray == state).all(axis=1))[0][0]
return stateLocation
def createStateTable(self):
stateArray = []
for B in range(self.maxLoad+1):
for T in range(self.maxDeadline+1):
state = [T,B]
stateArray.append(state)
self.stateArray = np.array(stateArray, dtype=np.float32)
def step(self, action):
''' standard Gym function for taking an action. Provides the next state, reward, and episode termination signal.'''
assert self.action_space.contains(action)
assert action in [0,1]
self.episodeTime += 1
nextState, reward = self._calRewardAndState(action)
if self.train:
done = bool(self.episodeTime == self.episodeLimit)
else:
done = False
if done:
self.currentEpisode += 1
self.episodeTime = 0
if self.train == False:
self.currentEpisode = 0
info = {}
return nextState, reward, done, info
def _newArrival(self):
''' function for new load arrivals during an episode.'''
job = self.jobList[self.episodeTime-1]
if job == 1:
self.arm[0][2] = self.deadline[self.loadIndex]
self.arm[0][0] = self.timeUntilDeadline[self.loadIndex]
self.arm[0][1] = self.load[self.loadIndex]
self.loadIndex += 1
elif job == 0:
self.arm[0][2] = 0
self.arm[0][0] = 0
self.arm[0][1] = 0
else:
print('ERROR. Value not in range...')
exit(1)
state = np.array([self.arm[0][0], self.arm[0][1]], dtype=np.float32)
return state
def reset(self):
''' standard Gym function for reseting the state for a new episode.'''
self.loadIndex = 0
if self.miniBatchCounter % self.batchSize == 0:
self.jobList = self.G.choice([1,0], p=[self.newJobProb, 1 - self.newJobProb], size=self.episodeLimit)
self.deadline = self.G.randint(1, self.maxDeadline+1, size=self.episodeLimit)
self.timeUntilDeadline = self.deadline.copy()
self.load = self.G.randint(1, self.maxLoad+1, size=self.episodeLimit)
self.arm[0][2] = self.deadline[0]
self.arm[0][0] = self.timeUntilDeadline[0]
self.arm[0][1] = self.load[0]
self.miniBatchCounter = 0
else:
self.arm[0][2] = self.deadline[0]
self.arm[0][0] = self.timeUntilDeadline[0]
self.arm[0][1] = self.load[0]
self.episodeTime = 0
initialState = np.array([self.arm[0][0], self.arm[0][1]], dtype=np.float32)
self.loadIndex += 1
self.miniBatchCounter += 1
return initialState
#########################################################################################
'''
For environment validation purposes, the below code checks if the nextstate, reward matches
what is expected given a dummy action.
'''
'''
SEED = 50
env = deadlineSchedulingEnv(seed = SEED, numEpisodes = 6, episodeLimit = 20, maxDeadline = 12,
maxLoad=9, newJobProb=0.7, train=True, processingCost = 0.5, batchSize = 1, noiseVar = 0.0)
observation = env.reset()
#check_env(env, warn=True)
x = np.array([1,1,0,0,1])
x = np.tile(x, 10000)
#x = np.random.choice([1,0], size=1000)
n_steps = np.size(x)
start = time.time()
for step in range(n_steps):
nextState, reward, done, info = env.step(x[step])
print(f'action: {x[step]} nextstate: {nextState} reward: {reward} done: {done}')
print("---------------------------------------------------------")
if done:
print(f'Finished episode {env.currentEpisode}/{env.numEpisodes}')
if env.currentEpisode < env.numEpisodes:
nextState = env.reset()
if env.currentEpisode == env.numEpisodes:
break
print(f'-------------------------------------\nDone. Time taken: {time.time() - start:.4f} seconds')
''' | StarcoderdataPython |
9754379 | import logging
from dataclasses import dataclass
from aiohttp import ClientSession
VALUE_MAPPING = {
'0': {
'value': 0,
'desc': 'Keine Belastung',
},
'0-1': {
'value': 1,
'desc': 'Keine bis geringe Belastung',
},
'1': {
'value': 2,
'desc': 'Geringe Belastung',
},
'1-2': {
'value': 3,
'desc': 'Geringe bis mittlere Belastung',
},
'2': {
'value': 4,
'desc': 'Mittlere Belastung',
},
'2-3': {
'value': 5,
'desc': 'Mittlere bis hohe Belastung',
},
'3': {
'value': 6,
'desc': 'Hohe Belastung',
},
}
@dataclass
class DwdPollenInfo:
name: str
today: int
today_raw: str
today_desc: str
tomorrow: int
tomorrow_raw: str
tomorrow_desc: str
@staticmethod
def from_json(item: dict):
results = []
for key in item:
info = DwdPollenInfo(
name=key,
today_raw=item[key]['today'],
today=VALUE_MAPPING[item[key]['today']]['value'],
today_desc=VALUE_MAPPING[item[key]['today']]['desc'],
tomorrow_raw=item[key]['tomorrow'],
tomorrow=VALUE_MAPPING[item[key]['tomorrow']]['value'],
tomorrow_desc=VALUE_MAPPING[item[key]['tomorrow']]['desc'],
)
results.append(info)
return results
@dataclass
class DwdPollen:
URL = 'https://opendata.dwd.de/climate_environment/health/alerts/s31fg.json'
sender: str
name: str
last_update: str
next_update: str
region_id: str
region_name: str
partregion_id: str
partregion_name: str
pollen: [DwdPollenInfo]
@staticmethod
def from_json(item: dict, sender: str, name: str, last_update: str, next_update: str):
return DwdPollen(
sender=sender,
name=name,
last_update=last_update,
next_update=next_update,
region_id=item['region_id'],
region_name=item['region_name'],
partregion_id=item['partregion_id'],
partregion_name=item['partregion_name'],
pollen=DwdPollenInfo.from_json(item['Pollen']),
)
DEFAULT_SOURCE = DwdPollen
async def get_data(session: ClientSession, *, source=DEFAULT_SOURCE):
"""Fetch DWD pollen info."""
resp = await session.get(source.URL)
data = await resp.json(content_type=None)
results = []
for item in data['content']:
try:
results.append(
source.from_json(item, data['sender'], data['name'], data['last_update'], data['next_update']))
except KeyError:
logging.getLogger(__name__).warning('Got wrong data: %s', item)
return results
| StarcoderdataPython |
3305151 | <reponame>Ry4nW/python-wars
class Solution:
def solve(self, matrix):
try:
for i in range(len(matrix[0])):
for j in range(len(matrix)):
if matrix[j][i] == 1:
return i
except:
pass
return -1
| StarcoderdataPython |
8072782 | <reponame>rijalanupraj/halkapan<filename>userprofile/signals.py
# External Import
from django.db.models.signals import post_save, pre_save
from django.conf import settings
from django.dispatch import receiver
import os
import time
# Internal Import
from .models import Profile
User = settings.AUTH_USER_MODEL
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(pre_save, sender=Profile)
def delete_old_file(sender, instance, **kwargs):
if instance._state.adding and not instance.pk:
return False
try:
old_image = sender.objects.get(pk=instance.pk).image
except sender.DoesNotExist:
return False
new_image = instance.image
if not old_image == new_image:
if os.path.isfile(old_image.path):
if not "default-profile-picture.jpg" in old_image.path:
os.remove(old_image.path)
| StarcoderdataPython |
363404 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 09:00:23 2018
@author: r.dewinter
"""
import numpy as np
import matplotlib.pyplot as plt
from hypervolume import hypervolume
from paretofrontFeasible import paretofrontFeasible
import os
#plt.plot(objectivesMOGA[:,0],objectivesMOGA[:,1],'ro',c='r')
#plt.plot(objectivesSPEA2[:,0],objectivesSPEA2[:,1],'ro',c='b')
#plt.plot(objectivesNSGAII[:,0],objectivesNSGAII[:,1],'ro',c='g')
#plt.plot(objectivesCEGO[:,0],objectivesCEGO[:,1],'ro',c='m')
hyp = []
fname = 'optimize ship'
ref = np.array([5000,2])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,9:-3]
constraints[:,:4] = constraints[:,:4]*-1+1
constraints[:,4:] = constraints[:,4:]*-1
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'SRD'
ref = np.array([7000,1700])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,10:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'TBTD'
ref = np.array([0.1,100000])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,6:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'WB'
ref = np.array([350,0.1])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,7:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'DBD'
ref = np.array([5,50])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,7:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'SPD'
ref = np.array([16,19000,-260000])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,9:-4]
objectives = data[1:,-4:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'WP'
ref = np.array([83000, 1350, 2.85, 15989825, 25000])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,6:13]
objectives = data[1:,13:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
############################ artificial
hyp = []
fname = 'BNH'
ref = np.array([140,50])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,-5:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'CEXP'
ref = np.array([1,9])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,-5:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'C3DTLZ4'
ref = np.array([3,3])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,-5:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'SRN'
ref = np.array([301,72])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,-5:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'TNK'
ref = np.array([3,3])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,-5:-3]
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'OSY'
ref = np.array([0,386])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = -1*data[1:,-9:-3] #>0
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'CTP1'
ref = np.array([1,2])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = -1*data[1:,-5:-3] #>0
objectives = data[1:,-3:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp))
hyp = []
fname = 'CSI'
ref = np.array([42,4.5,13])
for file in os.listdir(fname):
data = np.genfromtxt(fname+'/'+file, delimiter=',')
constraints = data[1:,-14:-4]
objectives = data[1:,-4:-1]
feasible = np.sum(constraints<=0,axis=1) == constraints.shape[1]
hyp.append(hypervolume(objectives[feasible],ref))
print(fname,np.mean(hyp))
print(fname,np.max(hyp))
print(fname,np.std(hyp)) | StarcoderdataPython |
11242580 | # coding: utf-8
'''
# Criteria
## Pathogenic
### Pathogenic Very Strong
* PVS1 null variant (nonsense, frameshift, canonical ±1 or 2 splice sites, initiation codon, single or multiexon deletion) in a gene where LOF is a known mechanism of disease
### Pathogenic Strong
* PS1 Same amino acid change as a previously established pathogenic variant regardless of nucleotide change
* PS2 De novo (both maternity and paternity confirmed) in a patient with the disease and no family history
* PS3 Well-established in vitro or in vivo functional studies supportive of a damaging effect on the gene or gene product
* PS4 The prevalence of the variant in affected individuals is significantly increased compared with the prevalence in controls
### Pathogenic Moderate
* PM1 Located in a mutational hot spot and/or critical and well-established functional domain (e.g., active site of an enzyme) without benign variation
* PM2 Absent from controls (or at extremely low frequency if recessive) (Table 6) in Exome Sequencing Project, 1000 Genomes Project, or Exome Aggregation Consortium
* PM3 For recessive disorders, detected in trans with a pathogenic variant
* PM4 Protein length changes as a result of in-frame deletions/insertions in a nonrepeat region or stop-loss variants
* PM5 Novel missense change at an amino acid residue where a different missense change determined to be pathogenic has been seen before
* PM6 Assumed de novo, but without confirmation of paternity and maternity
### Pathogenic Supporting
* PP1 Cosegregation with disease in multiple affected family members in a gene definitively known to cause the disease
* PP2 Missense variant in a gene that has a low rate of benign missense variation and in which missense variants are a common mechanism of disease
* PP3 Multiple lines of computational evidence support a deleterious effect on the gene or gene product (conservation, evolutionary, splicing impact, etc.)
* PP4 Patient’s phenotype or family history is highly specific for a disease with a single genetic etiology
* PP5 Reputable source recently reports variant as pathogenic, but the evidence is not available to the laboratory to perform an independent evaluation
## Benign
### Benign Stand-alone
* BA1 Allele frequency is >5% in Exome Sequencing Project, 1000 Genomes Project, or Exome Aggregation Consortium
### Benign Strong
* BS1 Allele frequency is greater than expected for disorder
* BS2 Observed in a healthy adult individual for a recessive (homozygous), dominant (heterozygous), or X-linked (hemizygous) disorder, with full penetrance expected at an early age
* BS3 Well-established in vitro or in vivo functional studies show no damaging effect on protein function or splicing
* BS4 Lack of segregation in affected members of a family
### Benign Supporting
* BP1 Missense variant in a gene for which primarily truncating variants are known to cause disease
* BP2 Observed in trans with a pathogenic variant for a fully penetrant dominant gene/disorder or observed in cis with a pathogenic variant in any inheritance pattern
* BP3 In-frame deletions/insertions in a repetitive region without a known function
* BP4 Multiple lines of computational evidence suggest no impact on gene or gene product (conservation, evolutionary, splicing impact, etc.)
* BP5 Variant found in a case with an alternate molecular basis for disease
* BP6 Reputable source recently reports variant as benign, but the evidence is not available to the laboratory to perform an independent evaluation
* BP7 A synonymous (silent) variant for which splicing prediction algorithms predict no impact to the splice consensus sequence nor the creation of a new splice site AND the nucleotide is not highly conserved
'''
import os
import json
import pandas as pd
from picus import data
data_path = data.__path__[0]
class EvidenceCollection:
def __init__(self):
self.evidences = ['PVS1', 'PS1', 'PS2', 'PS3', 'PS4', 'PM1', 'PM2', 'PM3', 'PM4', 'PM5', 'PM6', 'PP1', 'PP2', 'PP3', 'PP4', 'PP5', 'BA1', 'BS1', 'BS2', 'BS3', 'BS4', 'BP1', 'BP2', 'BP3', 'BP4', 'BP5', 'BP6', 'BP7']
lof_genes = pd.read_csv(
os.path.join(data_path, 'lof_genes.tsv'),
sep='\t'
)
self.lof_genes = lof_genes['gene_symbol'].tolist()
null_variants = pd.read_csv(
os.path.join(data_path, 'variant_consequences.tsv'),
sep='\t'
)
self.null_variants = null_variants[null_variants['IMPACT'] == 'HIGH']['SO term'].tolist()
# PS4 from InterVar (Change it later)
ps4_ids = pd.read_csv(
os.path.join(data_path, 'PS4.tsv'),
sep='\t'
)
self.ps4_ids = ps4_ids['id'].tolist()
# PP2 Gene list from InterVar
pp2_genes = pd.read_csv(
os.path.join(data_path, 'PP2.tsv'),
sep='\t'
)
self.pp2_genes = pp2_genes['gene_symbol'].tolist()
bp1_genes = pd.read_csv(
os.path.join(data_path, 'BP1.tsv'),
sep='\t'
)
self.bp1_genes = bp1_genes['gene_symbol'].tolist()
self.clinvar = pd.read_csv(
os.path.join(data_path, 'clinvar.csv'),
low_memory=False,
)
aa_pos = self.clinvar['hgvs_p'].str.split(
r'((\w+)\.(\d+)?:p\.[a-zA-Z]+(\d+))', expand=True)
self.clinvar['np'] = aa_pos[2]
self.clinvar['np_ver'] = aa_pos[3]
self.clinvar['np_pos'] = aa_pos[4]
self.clinvar['change'] = aa_pos[5]
clinvar_missense = self.clinvar[self.clinvar['change'] != '='][['np', 'np_ver', 'np_pos', 'hgvs_c', 'clinical_significance']]
self.clinvar_missense = clinvar_missense.rename(
columns={
'clinical_significance': 'clin_sig',
'hgvs_c': 'hgvsc_c'
},
)
def collect_evidences(self, df):
# merge to check for PS1
df = pd.merge(
df,
self.clinvar[['CHR', 'POS', 'cALT',
'hgvs_p', 'clinical_significance']],
how='left',
on=['CHR', 'POS'],
)
# for PM5
# This gives error for non_coding when there is no hgsvp
aa_pos = df.hgvsp.astype(str).str.split(
r'((\w+)\.(\d+)?:p\.[a-zA-Z]+(\d+))', expand=True)
print(aa_pos)
df['np'] = aa_pos[2]
df['np_ver'] = aa_pos[3]
df['np_pos'] = aa_pos[4]
print(df['np'])
print(df['np_ver'])
print(df['np_pos'])
# BROKEN ###
# merge on split hgvsp
# df = pd.merge(
# df,
# self.clinvar_missense,
# indicator=True,
# how='left',
# on=['np', 'np_ver', 'np_pos']
# )
# To Do (evidences to collect)
df['PVS1'] = df.apply(self.get_PVS1, axis=1)
df['PS1'] = df.apply(self.get_PS1, axis=1)
df['PS2'] = df.apply(self.get_PS2, axis=1)
df['PS3'] = df.apply(self.get_PS3, axis=1)
df['PS4'] = df.apply(self.get_PS4, axis=1)
df['PM1'] = df.apply(self.get_PM1, axis=1)
df['PM2'] = df.apply(self.get_PM2, axis=1)
df['PM3'] = df.apply(self.get_PM3, axis=1)
df['PM4'] = df.apply(self.get_PM4, axis=1)
df['PM5'] = df.apply(self.get_PM5, axis=1)
df['PM6'] = df.apply(self.get_PM6, axis=1)
df['PP1'] = df.apply(self.get_PP1, axis=1)
df['PP2'] = df.apply(self.get_PP2, axis=1)
df['PP3'] = df.apply(self.get_PP3, axis=1)
df['PP4'] = df.apply(self.get_PP4, axis=1)
df['PP5'] = df.apply(self.get_PP5, axis=1)
df['BA1'] = df.apply(self.get_BA1, axis=1)
df['BS1'] = df.apply(self.get_BS1, axis=1)
df['BS2'] = df.apply(self.get_BS2, axis=1)
df['BS3'] = df.apply(self.get_BS3, axis=1)
df['BS4'] = df.apply(self.get_BS4, axis=1)
df['BP1'] = df.apply(self.get_BP1, axis=1)
df['BP2'] = df.apply(self.get_BP2, axis=1)
df['BP3'] = df.apply(self.get_BP3, axis=1)
df['BP4'] = df.apply(self.get_BP4, axis=1)
df['BP5'] = df.apply(self.get_BP5, axis=1)
df['BP6'] = df.apply(self.get_BP6, axis=1)
df['BP7'] = df.apply(self.get_BP7, axis=1)
df['evidences'] = '{' + \
'"PVS1": ' + df['PVS1'] + \
', "PS1": ' + df['PS1'] + \
', "PS2": ' + df['PS2'] + \
', "PS3": ' + df['PS3'] + \
', "PS4": ' + df['PS4'] + \
', "PM1": ' + df['PM1'] + \
', "PM2": ' + df['PM2'] + \
', "PM3": ' + df['PM3'] + \
', "PM4": ' + df['PM4'] + \
', "PM5": ' + df['PM5'] + \
', "PM6": ' + df['PM6'] + \
', "PP1": ' + df['PP1'] + \
', "PP2": ' + df['PP2'] + \
', "PP3": ' + df['PP3'] + \
', "PP4": ' + df['PP4'] + \
', "PP5": ' + df['PP5'] + \
', "BA1": ' + df['BA1'] + \
', "BS1": ' + df['BS1'] + \
', "BS2": ' + df['BS2'] + \
', "BS3": ' + df['BS3'] + \
', "BS4": ' + df['BS4'] + \
', "BP1": ' + df['BP1'] + \
', "BP2": ' + df['BP2'] + \
', "BP3": ' + df['BP3'] + \
', "BP4": ' + df['BP4'] + \
', "BP5": ' + df['BP5'] + \
', "BP6": ' + df['BP6'] + \
', "BP7": ' + df['BP7'] + \
'}'
df.drop(self.evidences, axis=1, inplace=True)
return df
def flat_evidences(self, evidences):
evidences_dict = json.loads(evidences)
evidence_str = ''
for evidence in self.evidences:
if evidences_dict[evidence] == 1:
evidence_str += '{} '.format(evidence)
return evidence_str
# Pathogenic
# Pathogenic Very Strong
def get_PVS1(self, df):
if df['gene_symbol'] in self.lof_genes and df['transcript_consequence_terms'] in self.null_variants:
return '1'
else:
return '0'
# Pathogenic Strong
def get_PS1(self, df):
if df['clinical_significance'] == 'Pathogenic' and df['hgvsp'] == df['hgvs_p'] and df['ALT'] != df['cALT']:
return '1'
else:
return '0'
def get_PS2(self, df):
return '0'
def get_PS3(self, df):
return '0'
def get_PS4(self, df):
if df['id'] in self.ps4_ids:
return '1'
else:
return '0'
# Pathogenic Moderate
def get_PM1(self, df):
return '0'
def get_PM2(self, df):
if df['gnomad'] < .001:
return '1'
else:
return '0'
def get_PM3(self, df):
return '0'
def get_PM4(self, df):
if df['transcript_consequence_terms'] in ["inframe_insertion", "inframe_deletion", "stop_lost"]:
return '1'
else:
return '0'
def get_PM5(self, df):
if df['gnomad'] < 0.001 and \
df['transcript_consequence_terms'] == 'missense_variant' and \
df['clin_sig'] == 'Pathogenic' and \
df['hgvsc'] != df['hgvsc_c'] and \
df['_merge'] == 'both':
return '1'
else:
return '0'
def get_PM6(self, df):
return '0'
# Pathogenic Supporting
def get_PP1(self, df):
return '0'
def get_PP2(self, df):
if df['gene_symbol'] in self.pp2_genes and df['transcript_consequence_terms'] == 'missense_variant':
return '1'
else:
return '0'
def get_PP3(self, df):
'''
cutoffs from
https://www.ensembl.org/info/genome/variation/prediction/protein_function.html
'''
if df['sift_score'] < 0.05 and df['polyphen_score'] > 0.908:
return '1'
else:
return '0'
def get_PP4(self, df):
return '0'
def get_PP5(self, df):
if df['clinical_significance'] == 'Pathogenic':
return '1'
else:
return '0'
# Benign
# Benign Stand-alone
def get_BA1(self, df):
if df['minor_allele_freq'] > 0.05 or df['gnomad'] > 0.05:
return '1'
else:
return '0'
# Benign Strong
def get_BS1(self, df):
return '0'
def get_BS2(self, df):
return '0'
def get_BS3(self, df):
return '0'
def get_BS4(self, df):
return '0'
# Benign Supporting
def get_BP1(self, df):
if df['gene_symbol'] in self.bp1_genes and df['transcript_consequence_terms'] == 'missense_variant':
return '1'
else:
return '0'
return '0'
def get_BP2(self, df):
return '0'
def get_BP3(self, df):
return '0'
def get_BP4(self, df):
'''
cutoffs from
https://www.ensembl.org/info/genome/variation/prediction/protein_function.html
'''
if df['sift_score'] >= 0.05 and df['polyphen_score'] <= 0.446:
return '1'
else:
return '0'
def get_BP5(self, df):
return '0'
def get_BP6(self, df):
if df['clinical_significance'] == 'Benign':
return '1'
else:
return '0'
def get_BP7(self, df):
return '0'
| StarcoderdataPython |
3547975 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from oslo_serialization import base64 as oslo_base64
from oslo_serialization import jsonutils as json
import six
import sys
import testtools
import time
from testtools import testcase
from barbican.plugin.interface import secret_store as ss
from barbican.plugin.util import translations
from barbican.tests import keys
from barbican.tests import utils
from functionaltests.api import base
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.behaviors import secretstores_behaviors
from functionaltests.api.v1.models import secret_models
from functionaltests.common import config
CONF = config.get_config()
admin_a = CONF.rbac_users.admin_a
admin_b = CONF.rbac_users.admin_b
def get_pem_content(pem):
b64_content = translations.get_pem_components(pem)[1]
return oslo_base64.decode_as_bytes(b64_content)
def get_private_key_req():
return {'name': 'myprivatekey',
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64',
'algorithm': 'rsa',
'bit_length': 2048,
'secret_type': 'private',
'payload': oslo_base64.encode_as_bytes(keys.get_private_key_pem())}
def get_public_key_req():
return {'name': 'mypublickey',
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64',
'algorithm': 'rsa',
'bit_length': 2048,
'secret_type': 'public',
'payload': oslo_base64.encode_as_bytes(keys.get_public_key_pem())}
def get_certificate_req():
return {'name': 'mycertificate',
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64',
'algorithm': 'rsa',
'bit_length': 2048,
'secret_type': 'certificate',
'payload': oslo_base64.encode_as_bytes(keys.get_certificate_pem())}
def get_passphrase_req():
return {'name': 'mypassphrase',
'payload_content_type': 'text/plain',
'secret_type': 'passphrase',
'payload': 'mysecretpassphrase'}
def get_default_data():
return {
"name": "AES key",
"expiration": "2020-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload": get_default_payload(),
"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64",
}
def get_default_payload():
return b"AQIDBAUGBwgBAgMEBQYHCAECAwQFBgcIAQIDBAUGBwg="
@utils.parameterized_test_case
class SecretsTestCase(base.TestCase):
def setUp(self):
super(SecretsTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
# make a local mutable copies of the default data to prevent
# possible data contamination if (when?) the data contains
# any nested dicts.
# TODO(tdink) Move to a config file
self.default_secret_create_data = get_default_data()
self.default_secret_create_all_none_data = {
"name": None,
"expiration": None,
"algorithm": None,
"bit_length": None,
"mode": None,
"payload": None,
"payload_content_type": None,
"payload_content_encoding": None,
}
self.default_secret_create_emptystrings_data = {
"name": '',
"expiration": '',
"algorithm": '',
"bit_length": '',
"mode": '',
"payload": '',
"payload_content_type": '',
"payload_content_encoding": '',
}
self.default_secret_create_two_phase_data = {
"name": "AES key",
"expiration": "2020-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
}
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(SecretsTestCase, self).tearDown()
@testcase.attr('negative')
def test_secret_create_with_only_content_type_no_payload(self):
"""Create secret with valid content type but no payload."""
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
test_model.payload_content_type = 'application/octet-stream'
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@testcase.attr('positive')
def test_secret_create_then_check_content_types(self):
"""Check that set content-type attribute is retained in metadata."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.get_secret_metadata(secret_ref)
self.assertEqual(200, resp.status_code)
content_types = resp.model.content_types
self.assertIsNotNone(content_types)
self.assertIn('default', content_types)
self.assertEqual(content_types['default'],
test_model.payload_content_type)
@testcase.attr('positive')
def test_secret_create_all_none(self):
"""Covers case of a POST request with no JSON data."""
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@testcase.attr('negative')
def test_secret_get_secret_doesnt_exist(self):
"""GET a non-existent secret.
Should return a 404.
"""
resp = self.behaviors.get_secret_metadata('not_a_uuid')
self.assertEqual(404, resp.status_code)
@testcase.attr('negative')
def test_secret_get_secret_payload_doesnt_exist(self):
"""GET a non-existent payload.
Should return a 404.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.get_secret(secret_ref, 'text/plain')
self.assertEqual(404, resp.status_code)
@testcase.attr('positive')
def test_secret_get_payload_no_accept_header(self):
"""GET a secret payload, do not pass in accept header.
Should return a 200.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
get_resp = self.behaviors.get_secret(
secret_ref,
payload_content_type='',
omit_headers=['Accept'])
self.assertEqual(200, get_resp.status_code)
self.assertEqual(test_model.payload,
oslo_base64.encode_as_bytes(get_resp.content))
@testcase.attr('negative')
def test_secret_delete_doesnt_exist(self):
"""DELETE a non-existent secret.
Should return a 404.
"""
resp = self.behaviors.delete_secret('not_a_uuid', expected_fail=True)
self.assertEqual(404, resp.status_code)
@testcase.attr('negative')
def test_secret_get_invalid_mime_type(self):
"""Covers getting a secret with an invalid mime type."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
resp = self.behaviors.get_secret(secret_ref,
payload_content_type="i/m")
self.assertEqual(406, resp.status_code)
@testcase.attr('negative')
def test_secret_create_with_expiration_passed(self):
"""Create a secret with an expiration that has already passed.
Should return a 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = '2000-01-10T14:58:52.546795'
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@testcase.attr('negative')
def test_secret_create_with_empty_strings(self):
"""Secret create with empty Strings for all attributes.
Should return a 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_emptystrings_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@testcase.attr('negative')
def test_secret_create_with_invalid_content_type(self):
"""Create secret with an invalid content type in HTTP header.
Should return a 415.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
headers = {"Content-Type": "crypto/boom"}
resp, secret_ref = self.behaviors.create_secret(test_model, headers)
self.assertEqual(415, resp.status_code)
@testcase.attr('negative')
def test_secret_create_with_oversized_payload(self):
"""Create a secret that is larger than the max payload size.
Should return a 413 if the secret size is greater than the
maximum allowed size.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload = self.oversized_payload
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(413, resp.status_code)
@testcase.attr('negative')
def test_secret_put_when_payload_doesnt_exist(self):
"""PUT secret to a non-existent secret.
Should return 404.
"""
resp = self.behaviors.update_secret_payload(
secret_ref='not_a_uuid',
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload='testing putting to non-existent secret')
self.assertEqual(404, resp.status_code)
@testcase.attr('negative')
def test_secret_put_when_payload_already_exists(self):
"""PUT against a secret that already has encrypted data.
Should return 409.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload='testing putting data in secret that already has data')
self.assertEqual(409, resp.status_code)
@testcase.attr('negative')
def test_secret_put_two_phase_empty_payload(self):
"""Covers case of putting empty String to a secret.
Should return 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload='')
self.assertEqual(400, put_resp.status_code)
@testcase.attr('negative')
def test_secret_put_two_phase_invalid_content_type(self):
"""PUT with an invalid content type. Should return 415.
Launchpad bug #1208601
- Updated in Barbican blueprint barbican-enforce-content-type
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='crypto/boom',
payload_content_encoding='base64',
payload='invalid content type')
self.assertEqual(415, put_resp.status_code)
@testcase.attr('negative')
def test_secret_put_two_phase_no_payload(self):
"""Covers case of putting null String to a secret.
Should return 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=None)
self.assertEqual(400, put_resp.status_code)
@testcase.attr('negative')
def test_secret_put_two_phase_w_oversized_binary_data_not_utf8(self):
"""PUT with an oversized binary string that isn't UTF-8.
Launchpad bug #1315498.
"""
oversized_payload = bytearray(self.oversized_payload)
# put a value in the middle of the data that does not have a UTF-8
# code point. Using // and 176 to be python3-friendly.
oversized_payload[self.max_payload_size // 2] = 176 # 0xb0
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=oversized_payload)
self.assertEqual(413, put_resp.status_code)
@testcase.attr('negative')
def test_secret_put_two_phase_oversized_payload(self):
"""PUT with oversized payload should return 413.
Covers the case of putting secret data that is larger than the maximum
secret size allowed by Barbican. Beyond that it should return 413.
"""
oversized_payload = self.oversized_payload
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=oversized_payload)
self.assertEqual(413, put_resp.status_code)
@testcase.attr('positive')
def test_secret_put_two_phase_valid_binary_data_not_utf8(self):
"""A string with binary data that doesn't contain UTF-8 code points.
Launchpad bug #1315498.
"""
# put a value in the data that does not have a UTF-8 code point.
data = b'\xb0'
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=str(data))
self.assertEqual(204, put_resp.status_code)
@testcase.attr('positive')
def test_secret_put_two_phase_high_range_unicode_character(self):
"""Tests a high-range unicode character on a two-step PUT.
Launchpad bug #1315498
"""
data = u'\U0001F37A'
data = data.encode('utf-8')
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
put_resp = self.behaviors.update_secret_payload(
secret_ref=secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
payload=data)
self.assertEqual(204, put_resp.status_code)
@testcase.attr('positive')
def test_secret_get_nones_payload_with_a_octet_stream(self):
"""Tests getting a secret with octet-stream."""
test_model = secret_models.SecretModel(
**self.default_secret_create_two_phase_data)
test_model.payload_content_encoding = 'base64'
test_model.payload_content_type = 'application/octet-stream'
test_model.payload = oslo_base64.encode_as_bytes('abcdef')
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
get_resp = self.behaviors.get_secret(
secret_ref,
payload_content_type=test_model.payload_content_type,
payload_content_encoding=test_model.payload_content_encoding)
self.assertEqual(200, get_resp.status_code)
self.assertEqual(test_model.payload,
oslo_base64.encode_as_bytes(get_resp.content))
@testcase.attr('negative')
def test_secret_create_defaults_bad_content_type_check_message(self):
"""Verifying the returned error message matches the expected form."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = 'plain-text'
resp, secret_ref = self.behaviors.create_secret(test_model)
# first, ensure that the return code is 400
self.assertEqual(400, resp.status_code)
resp_dict = json.loads(resp.content)
self.assertIn(
"Provided object does not match schema 'Secret': "
"payload_content_type is not one of ['text/plain', "
"'text/plain;charset=utf-8', 'text/plain; charset=utf-8', "
"'application/octet-stream'", resp_dict['description'])
self.assertIn("Bad Request", resp_dict['title'])
@testcase.attr('negative')
def test_secret_create_then_expire_then_check(self):
"""Covers case where you try to retrieve a secret that is expired.
This test creates a secret that will soon expire.
After it expires, check it and verify that it is no longer
a valid secret.
"""
# create a secret that expires in 15 seconds
timestamp = utils.create_timestamp_w_tz_and_offset(seconds=15)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = timestamp
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
# now get the secret - will be still valid
get_resp = self.behaviors.get_secret_metadata(secret_ref)
self.assertEqual(200, get_resp.status_code)
# now wait 20 seconds
time.sleep(20)
# now get the secret - should be invalid (expired)
resp = self.behaviors.get_secret_metadata(secret_ref)
self.assertEqual(404, resp.status_code)
@utils.parameterized_dataset({
'alphanumeric': ['1f34ds'],
'punctuation': ['~!@#$%^&*()_+`-={}[]|:;<>,.?'],
'uuid': ['54262d9d-4bc7-4821-8df0-dc2ca8e112bb'],
'len_255': [base.TestCase.max_sized_field],
'empty': [''],
'null': [None]
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_name(self, name):
"""Covers cases of creating secrets with valid names."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.name = name
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'int': [400]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_name(self, name):
"""Create secrets with various invalid names.
Should return 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.name = name
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@testcase.attr('positive', 'non-standard-algorithm')
@testtools.skipIf(utils.is_kmip_enabled(),
"KMIP does not support invalid algorithms")
def test_secret_create_valid_algorithms(self):
"""Creates secrets with various valid algorithms."""
algorithm = 'invalid'
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.algorithm = algorithm
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'int': [400]
})
@testcase.attr('negative')
def test_secret_create_invalid_algorithms(self, algorithm):
"""Creates secrets with various invalid algorithms."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.algorithm = algorithm
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@testtools.skipIf(utils.is_kmip_enabled(),
"KMIP does not support non-standard bit lengths")
@utils.parameterized_dataset({
'sixteen': [16],
'fifteen': [15],
'eight': [8],
'seven': [7],
'one': [1],
'none': [None]
})
@testcase.attr('positive', 'non-standard-algorithm')
def test_secret_create_with_non_standard_bit_length(self, bit_length):
"""Covers cases of creating secrets with valid bit lengths."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.bit_length = bit_length
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'128': [128],
'192': [192],
'256': [256]
})
@testcase.attr('positive')
def test_secret_create_with_valid_bit_length(self, bit_length):
"""Covers cases of creating secrets with valid bit lengths."""
byte_length = bit_length // 8
secret = bytearray(byte_length)
for x in range(0, byte_length):
secret[x] = x
secret64 = oslo_base64.encode_as_bytes(secret)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.bit_length = bit_length
test_model.payload = secret64
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'str_type': ['not-an-int'],
'empty': [''],
'blank': [' '],
'negative_maxint': [-sys.maxsize],
'negative_one': [-1],
'zero': [0]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_bit_length(self, bit_length):
"""Covers cases of creating secrets with invalid bit lengths."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.bit_length = bit_length
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@utils.parameterized_dataset({
'cbc': ['cbc'],
'unknown_positive': ['unknown']
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_mode(self, mode):
"""Covers cases of creating secrets with valid modes."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.mode = mode
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'zero': [0],
'oversized_string': [base.TestCase.oversized_field],
'int': [400]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_mode(self, mode):
"""Covers cases of creating secrets with invalid modes."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.mode = mode
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@utils.parameterized_dataset({
'text_content_type_none_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': None},
# TODO(dmend): Fix content negotiation
# 'utf8_text_content_type_none_encoding': {
# 'payload_content_type': 'text/plain; charset=utf-8',
# 'payload_content_encoding': None},
# 'no_space_utf8_text_content_type_none_encoding': {
# 'payload_content_type': 'text/plain;charset=utf-8',
# 'payload_content_encoding': None},
'octet_content_type_base64_encoding': {
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64'}
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_types_and_encoding(
self, payload_content_type, payload_content_encoding):
"""Creates secrets with various content types and encodings."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
test_model.payload_content_encoding = payload_content_encoding
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
get_resp = self.behaviors.get_secret(
secret_ref,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding)
self.assertEqual(200, get_resp.status_code)
if payload_content_encoding == 'base64':
self.assertEqual(test_model.payload,
oslo_base64.encode_as_bytes(get_resp.content))
else:
self.assertEqual(test_model.payload, get_resp.content)
@utils.parameterized_dataset({
'text_content_type_none_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': None},
# TODO(dmend): Fix content negotiation
# 'utf8_text_content_type_none_encoding': {
# 'payload_content_type': 'text/plain; charset=utf-8',
# 'payload_content_encoding': None},
# 'no_space_utf8_text_content_type_none_encoding': {
# 'payload_content_type': 'text/plain;charset=utf-8',
# 'payload_content_encoding': None},
'octet_content_type_base64_encoding': {
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'base64'}
})
@testcase.attr('positive', 'deprecated')
def test_secret_create_defaults_valid_types_and_encoding_old_way(
self, payload_content_type, payload_content_encoding):
"""Creates secrets with various content types and encodings."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
test_model.payload_content_encoding = payload_content_encoding
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
get_resp = self.behaviors.get_secret_based_on_content_type(
secret_ref,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding)
self.assertEqual(200, get_resp.status_code)
if payload_content_encoding == 'base64':
self.assertEqual(test_model.payload,
oslo_base64.encode_as_bytes(get_resp.content))
else:
self.assertEqual(test_model.payload, get_resp.content)
@utils.parameterized_dataset({
'empty_content_type_and_encoding': {
'payload_content_type': '',
'payload_content_encoding': ''},
'none_content_type_and_encoding': {
'payload_content_type': None,
'payload_content_encoding': None},
'large_string_content_type_and_encoding': {
'payload_content_type': base.TestCase.oversized_field,
'payload_content_encoding': base.TestCase.oversized_field},
'int_content_type_and_encoding': {
'payload_content_type': 123,
'payload_content_encoding': 123},
'none_content_type_base64_content_encoding': {
'payload_content_type': None,
'payload_content_encoding': 'base64'},
'text_content_type_none_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': ''},
'text_no_subtype_content_type_none_content_encoding': {
'payload_content_type': 'text',
'payload_content_encoding': None},
'text_slash_no_subtype_content_type_none_content_encoding': {
'payload_content_type': 'text/',
'payload_content_encoding': None},
'text_content_type_empty_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': ' '},
'text_content_type_spaces_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': ' '},
'text_content_type_base64_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': 'base64'},
'text_and_utf88_content_type_none_content_encoding': {
'payload_content_type': 'text/plain; charset=utf-88',
'payload_content_encoding': None},
'invalid_content_type_base64_content_encoding': {
'payload_content_type': 'invalid',
'payload_content_encoding': 'base64'},
'invalid_content_type_none_content_encoding': {
'payload_content_type': 'invalid',
'payload_content_encoding': None},
'octet_content_type_invalid_content_encoding': {
'payload_content_type': 'application/octet-stream',
'payload_content_encoding': 'invalid'},
'text_content_type_invalid_content_encoding': {
'payload_content_type': 'text/plain',
'payload_content_encoding': 'invalid'},
'none_content_type_invalid_content_encoding': {
'payload_content_type': None,
'payload_content_encoding': 'invalid'},
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_types_and_encoding(
self, payload_content_type, payload_content_encoding):
"""Creating secrets with invalid payload types and encodings."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
test_model.payload_content_encoding = payload_content_encoding
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@utils.parameterized_dataset({
'max_payload_string': [base.TestCase.max_sized_payload]
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_payload(self, payload):
"""Create secrets with a various valid payloads."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
overrides = {"payload": payload}
test_model.override_values(**overrides)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'empty': [''],
'array': [['boom']],
'int': [123],
'none': [None],
'bad_character': [six.unichr(0x0080)],
'bad_characters': [six.unichr(0x1111) + six.unichr(0xffff)]
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_payload(self, payload):
"""Covers creating secrets with various invalid payloads."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
overrides = {"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64",
"payload": payload}
test_model.override_values(**overrides)
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@utils.parameterized_dataset({
'negative_five_long_expire': {
'timezone': '-05:00',
'days': 5},
'positive_five_long_expire': {
'timezone': '+05:00',
'days': 5},
'negative_one_short_expire': {
'timezone': '-01',
'days': 1},
'positive_one_short_expire': {
'timezone': '+01',
'days': 1}
})
@testcase.attr('positive')
def test_secret_create_defaults_valid_expiration(self, timezone, days):
"""Create secrets with a various valid expiration data."""
timestamp = utils.create_timestamp_w_tz_and_offset(timezone=timezone,
days=days)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = timestamp
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
@utils.parameterized_dataset({
'malformed_timezone': {
'timezone': '-5:00',
'days': 0}
})
@testcase.attr('negative')
def test_secret_create_defaults_invalid_expiration(self, timezone, days):
"""Create secrets with various invalid expiration data."""
timestamp = utils.create_timestamp_w_tz_and_offset(timezone=timezone,
days=days)
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.expiration = timestamp
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@testcase.skipIf(not base.conf_host_href_used, 'response href using '
'wsgi request instead of CONF.host_href')
@testcase.attr('positive')
def test_secret_create_change_host_with_header_not_allowed(self, **kwargs):
"""Create a secret with a (possibly) malicious host name in header."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
malicious_hostname = 'some.bad.server.com'
changed_host_header = {'Host': malicious_hostname}
resp, secret_ref = self.behaviors.create_secret(
test_model, extra_headers=changed_host_header)
self.assertEqual(201, resp.status_code)
# get Location field from result and assert that it is NOT the
# malicious one.
regex = '.*{0}.*'.format(malicious_hostname)
self.assertNotRegex(resp.headers['location'], regex)
@testcase.skipIf(base.conf_host_href_used, 'response href using '
'CONF.host_href instead of wsgi request')
@testcase.attr('positive')
def test_secret_get_change_host_with_header_allowed(self, **kwargs):
"""Get secret metadata with alternative proxy host name in header."""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
another_proxy_hostname = 'proxy2.server.com'
changed_host_header = {'Host': another_proxy_hostname}
# In test, cannot pass different host header during create as returned
# secret_ref in response contains that host in url. That url is used in
# deleting that secret during cleanup step
resp, secret_ref = self.behaviors.create_secret(
test_model)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.get_secret_metadata(
secret_ref, extra_headers=changed_host_header)
# Check returned href has provided proxy hostname
regex = '.*{0}.*'.format(another_proxy_hostname)
self.assertRegex(resp.model.secret_ref, regex)
@utils.parameterized_dataset({
'symmetric': ['symmetric',
oslo_base64.decode_as_bytes(
get_default_payload()),
get_default_data()],
'private': ['private',
keys.get_private_key_pem(),
get_private_key_req()],
'public': ['public',
keys.get_public_key_pem(),
get_public_key_req()],
'certificate': ['certificate',
keys.get_certificate_pem(),
get_certificate_req()],
'passphrase': ['<PASSWORD>phrase',
b'<PASSWORD>',
get_passphrase_req()]
})
@testcase.attr('positive')
def test_secret_create_with_secret_type(self, secret_type, expected, spec):
"""Create secrets with various secret types."""
test_model = secret_models.SecretModel(**spec)
test_model.secret_type = secret_type
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.get_secret_metadata(secret_ref)
secret_type_response = resp.model.secret_type
self.assertIsNotNone(secret_type_response)
self.assertEqual(secret_type, secret_type_response)
content_type = spec['payload_content_type']
get_resp = self.behaviors.get_secret(secret_ref,
content_type)
self.assertEqual(expected, get_resp.content)
@utils.parameterized_dataset({
'invalid_http_content_type_characaters_latin': {
'http_content_type': u'\u00c4'.encode('utf-8')},
'invalid_http_content_type_characaters_arabic': {
'http_content_type': u'\u060f'.encode('utf-8')},
'invalid_http_content_type_characaters_cyrillic': {
'http_content_type': u'\u0416'.encode('utf-8')},
'invalid_http_content_type_characaters_replacement_character': {
'http_content_type': u'\ufffd'.encode('utf-8')},
})
@testcase.attr('negative')
def test_secret_create_with_invalid_http_content_type_characters(
self, http_content_type):
"""Attempt to create secrets with invalid unicode characters in the
HTTP request's Content-Type header. Should return a 415.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
headers = {"Content-Type": http_content_type}
resp, secret_ref = self.behaviors.create_secret(test_model, headers)
self.assertEqual(415, resp.status_code)
@utils.parameterized_dataset({
'invalid_http_content_type_characaters_latin': {
'payload_content_type': u'\u00c4'.encode('utf-8')},
'invalid_http_content_type_characaters_arabic': {
'payload_content_type': u'\u060f'.encode('utf-8')},
'invalid_http_content_type_characaters_cyrillic': {
'payload_content_type': u'\u0416'.encode('utf-8')},
'invalid_http_content_type_characaters_replacement_character': {
'payload_content_type': u'\ufffd'.encode('utf-8')},
})
@testcase.attr('negative')
def test_secret_create_with_invalid_payload_content_type_characters(
self, payload_content_type):
"""Attempt to create secrets with non-ascii characters in the
payload's content type attribute. Should return a 400.
"""
test_model = secret_models.SecretModel(
**self.default_secret_create_data)
test_model.payload_content_type = payload_content_type
resp, secret_ref = self.behaviors.create_secret(test_model)
self.assertEqual(400, resp.status_code)
@utils.parameterized_test_case
class ListingSecretsTestCase(base.TestCase):
def setUp(self):
super(ListingSecretsTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(ListingSecretsTestCase, self).tearDown()
@utils.parameterized_dataset({
'query_by_name': {
'secret_1_dict': dict(name="name1"),
'secret_2_dict': dict(name="name2"),
'query_dict': dict(name="name1")
},
'query_by_algorithm': {
'secret_1_dict': dict(algorithm="algorithm1"),
'secret_2_dict': dict(algorithm="algorithm2"),
'query_dict': dict(alg="algorithm1")
},
'query_by_mode': {
'secret_1_dict': dict(mode="mode1"),
'secret_2_dict': dict(mode="mode2"),
'query_dict': dict(mode="mode1")
},
'query_by_bit_length': {
'secret_1_dict': dict(bit_length=1024),
'secret_2_dict': dict(bit_length=2048),
'query_dict': dict(bits=1024)
},
'query_by_secret_type': {
'secret_1_dict': dict(secret_type=ss.SecretType.SYMMETRIC),
'secret_2_dict': dict(secret_type=ss.SecretType.OPAQUE),
'query_dict': dict(secret_type=ss.SecretType.SYMMETRIC)
},
})
@testcase.attr('positive')
def test_secret_list_with_filter(self, secret_1_dict, secret_2_dict,
query_dict):
secret_1 = secret_models.SecretModel(**secret_1_dict)
secret_2 = secret_models.SecretModel(**secret_2_dict)
self.behaviors.create_secret(secret_1)
self.behaviors.create_secret(secret_2)
resp, secrets_list, next_ref, prev_ref = self.behaviors.get_secrets(
**query_dict)
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(secrets_list))
@utils.parameterized_dataset({
'created': {
'date_type': 'created',
},
'updated': {
'date_type': 'updated',
},
'expiration': {
'date_type': 'expiration',
},
})
@testcase.attr('positive')
def test_secret_list_with_date_filter(self, date_type):
expiration_1 = str(
datetime.datetime.utcnow() + datetime.timedelta(days=3))
expiration_2 = str(
datetime.datetime.utcnow() + datetime.timedelta(days=5))
two_phase_model = secret_models.SecretModel(expiration=expiration_1)
resp, secret_ref_1 = self.behaviors.create_secret(two_phase_model)
# Assert that the secret metadata was created successfully
self.assertEqual(201, resp.status_code)
payload = "gF6+lLoF3ohA9aPRpt+6bQ=="
payload_content_type = "application/octet-stream"
payload_content_encoding = "base64"
update_resp = self.behaviors.update_secret_payload(
secret_ref_1, payload=payload,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding)
# Assert that the secret payload was uploaded successfully
self.assertEqual(204, update_resp.status_code)
time.sleep(1)
model = secret_models.SecretModel(expiration=expiration_2)
resp, secret_ref_2 = self.behaviors.create_secret(model)
resp_1 = self.behaviors.get_secret_metadata(secret_ref_1)
resp_2 = self.behaviors.get_secret_metadata(secret_ref_2)
time_to_search_1 = getattr(resp_1.model, date_type)
time_to_search_2 = getattr(resp_2.model, date_type)
# Search for secrets with secret 1's time
query_dict = {date_type: time_to_search_1}
resp, secrets_list, next_ref, prev_ref = self.behaviors.get_secrets(
**query_dict)
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(secrets_list))
self.assertEqual(secret_ref_1, secrets_list[0].secret_ref)
# Search for secrets with time < secret 2, i.e. secret 1
query_dict = {date_type: 'lt:' + time_to_search_2}
resp, secrets_list, next_ref, prev_ref = self.behaviors.get_secrets(
**query_dict)
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(secrets_list))
self.assertEqual(secret_ref_1, secrets_list[0].secret_ref)
# Search for secrets with time <= secret 2, i.e. both secrets
query_dict = {date_type: 'lte:' + time_to_search_2,
'sort': date_type + ':asc'}
resp, secrets_list, next_ref, prev_ref = self.behaviors.get_secrets(
**query_dict)
self.assertEqual(200, resp.status_code)
self.assertEqual(2, len(secrets_list))
self.assertEqual(secret_ref_1, secrets_list[0].secret_ref)
self.assertEqual(secret_ref_2, secrets_list[1].secret_ref)
# Search for secrets with time > secret 1, i.e. secret 2
query_dict = {date_type: 'gt:' + time_to_search_1}
resp, secrets_list, next_ref, prev_ref = self.behaviors.get_secrets(
**query_dict)
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(secrets_list))
self.assertEqual(secret_ref_2, secrets_list[0].secret_ref)
# Search for secrets with time >= secret 1, i.e. both secrets
query_dict = {date_type: 'gte:' + time_to_search_1,
'sort': date_type + ':asc'}
resp, secrets_list, next_ref, prev_ref = self.behaviors.get_secrets(
**query_dict)
self.assertEqual(200, resp.status_code)
self.assertEqual(2, len(secrets_list))
self.assertEqual(secret_ref_1, secrets_list[0].secret_ref)
self.assertEqual(secret_ref_2, secrets_list[1].secret_ref)
class SecretsPagingTestCase(base.PagingTestCase):
def setUp(self):
super(SecretsPagingTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
# make a local mutable copy of the default data to prevent
# possible data contamination
self.create_default_data = get_default_data()
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(SecretsPagingTestCase, self).tearDown()
def create_model(self):
return secret_models.SecretModel(**self.create_default_data)
def create_resources(self, count=0, model=None):
for x in range(0, count):
self.behaviors.create_secret(model)
def get_resources(self, limit=10, offset=0, filter=None):
return self.behaviors.get_secrets(limit=limit, offset=offset,
name=filter)
def set_filter_field(self, unique_str, model):
'''Set the name field which we use in the get_resources '''
model.name = unique_str
class SecretsUnauthedTestCase(base.TestCase):
def setUp(self):
super(SecretsUnauthedTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
self.default_secret_create_data = get_default_data()
self.dummy_secret_ref = 'orders/dummy-7b86-4071-935d-ef6b83729200'
self.dummy_project_id = 'dummy'
resp, self.real_secret_ref = self.behaviors.create_secret(
secret_models.SecretModel(**self.default_secret_create_data)
)
stored_auth = self.client._auth[
self.client._default_user_name].stored_auth
project_id = list(stored_auth.values())[0]['project_id']
self.project_id_header = {
'X-Project-Id': project_id
}
self.dummy_project_id_header = {
'X-Project-Id': self.dummy_project_id
}
def tearDown(self):
self.behaviors.delete_all_created_secrets()
super(SecretsUnauthedTestCase, self).tearDown()
@testcase.attr('negative', 'security')
def test_secret_create_unauthed_no_proj_id(self):
"""Attempt to create a secret without a token or project id
Should return 401
"""
model = secret_models.SecretModel(self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(model, use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_create_unauthed_fake_proj_id(self):
"""Attempt to create a secret with a project id but no token
Should return 401
"""
model = secret_models.SecretModel(self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(
model, extra_headers=self.dummy_project_id_header, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_create_unauthed_real_proj_id(self):
"""Attempt to create a secret with a project id but no token
Should return 401
"""
model = secret_models.SecretModel(self.default_secret_create_data)
resp, secret_ref = self.behaviors.create_secret(
model, extra_headers=self.project_id_header, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_no_proj_id_fake_secret(self):
"""Attempt to read a non-existant secret without a token or project id
Should return 401
"""
resp = self.behaviors.get_secret(
self.dummy_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_no_proj_id_real_secret(self):
"""Attempt to read an existing secret without a token or project id
Should return 401
"""
resp = self.behaviors.get_secret(
self.real_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_fake_proj_id_fake_secret(self):
"""Attempt to get a non-existant secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.dummy_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_fake_proj_id_real_secret(self):
"""Attempt to get an existing secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.real_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_real_proj_id_fake_secret(self):
"""Attempt to get a non-existant secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.dummy_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_get_unauthed_real_proj_id_real_secret(self):
"""Attempt to get an existing secret with a project id but no token
Should return 401
"""
resp = self.behaviors.get_secret(
self.real_secret_ref,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_no_proj_id_fake_secret(self):
"""Attempt to update a non-existant secret without a token or project id
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.dummy_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_no_proj_id_real_secret(self):
"""Attempt to update an existing secret without a token or project id
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.real_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64', use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_fake_proj_id_fake_secret(self):
"""Attempt to update a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.dummy_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_fake_proj_id_real_secret(self):
"""Attempt to update an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.real_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.dummy_project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_real_proj_id_fake_secret(self):
"""Attempt to update a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.dummy_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_put_unauthed_real_proj_id_real_secret(self):
"""Attempt to update an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.update_secret_payload(
self.real_secret_ref, payload=None,
payload_content_type='application/octet-stream',
payload_content_encoding='base64',
extra_headers=self.project_id_header,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_no_proj_id_fake_secret(self):
"""Attempt to delete a non-existant secret without a token or project id
Should return 401
"""
resp = self.behaviors.delete_secret(
self.dummy_secret_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_no_proj_id_real_secret(self):
"""Attempt to delete an existing secret without a token or project id
Should return 401
"""
resp = self.behaviors.delete_secret(
self.real_secret_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_fake_proj_id_fake_secret(self):
"""Attempt to delete a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.dummy_secret_ref,
extra_headers=self.dummy_project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_fake_proj_id_real_secret(self):
"""Attempt to delete an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.real_secret_ref,
extra_headers=self.dummy_project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_real_proj_id_fake_secret(self):
"""Attempt to delete a non-existant secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.dummy_secret_ref,
extra_headers=self.project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_unauthed_real_proj_id_real_secret(self):
"""Attempt to delete an existing secret with a project id, but no token
Should return 401
"""
resp = self.behaviors.delete_secret(
self.real_secret_ref,
extra_headers=self.project_id_header, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
@utils.parameterized_test_case
class SecretsMultipleBackendTestCase(base.TestCase):
def setUp(self):
super(SecretsMultipleBackendTestCase, self).setUp()
self.behaviors = secret_behaviors.SecretBehaviors(self.client)
self.ss_behaviors = secretstores_behaviors.SecretStoresBehaviors(
self.client)
self.default_secret_create_data = get_default_data()
if base.conf_multiple_backends_enabled:
# set preferred secret store for admin_a (project a) user
# and don't set preferred secret store for admin_b (project b) user
resp, stores = self.ss_behaviors.get_all_secret_stores(
user_name=admin_a)
self.assertEqual(200, resp.status_code)
global_ss = None
first_non_global_ss = None
for store in stores['secret_stores']:
if store['global_default']:
global_ss = store['secret_store_ref']
else:
first_non_global_ss = store['secret_store_ref']
break
self.ss_behaviors.set_preferred_secret_store(
first_non_global_ss or global_ss,
user_name=admin_a
)
def tearDown(self):
self.behaviors.delete_all_created_secrets()
if base.conf_multiple_backends_enabled:
self.ss_behaviors.cleanup_preferred_secret_store_entities()
super(SecretsMultipleBackendTestCase, self).tearDown()
@testcase.skipUnless(base.conf_multiple_backends_enabled, 'executed only '
'when multiple backends support is enabled in '
'barbican server side')
@utils.parameterized_dataset({
'symmetric_type_preferred_store': [
admin_a,
'symmetric',
oslo_base64.decode_as_bytes(get_default_payload()),
get_default_data()
],
'private_type_preferred_store': [
admin_a,
'private',
keys.get_private_key_pem(),
get_private_key_req()
],
'public_type_preferred_store': [
admin_a,
'public',
keys.get_public_key_pem(),
get_public_key_req()
],
'certificate_type_preferred_store': [
admin_a,
'certificate',
keys.get_certificate_pem(),
get_certificate_req()
],
'passphrase_type_preferred_store': [
admin_a,
'passphrase',
'mysecretpassphrase',
get_passphrase_req()
],
'symmetric_type_no_preferred_store': [
admin_b,
'symmetric',
oslo_base64.decode_as_bytes(get_default_payload()),
get_default_data()
],
'private_type_no_preferred_store': [
admin_b,
'private',
keys.get_private_key_pem(),
get_private_key_req()
],
'public_type_no_preferred_store': [
admin_b,
'public',
keys.get_public_key_pem(),
get_public_key_req()
],
'certificate_type_no_preferred_store': [
admin_b,
'certificate',
keys.get_certificate_pem(),
get_certificate_req()
],
'passphrase_type_no_preferred_store': [
admin_b,
'passphrase',
b'<PASSWORD>',
get_passphrase_req()
],
})
def test_secret_create_for(self, user_name, secret_type, expected, spec):
"""Create secrets with various secret types with multiple backends."""
test_model = secret_models.SecretModel(**spec)
test_model.secret_type = secret_type
resp, secret_ref = self.behaviors.create_secret(test_model,
user_name=user_name,
admin=user_name)
self.assertEqual(201, resp.status_code)
resp = self.behaviors.get_secret_metadata(secret_ref,
user_name=user_name)
secret_type_response = resp.model.secret_type
self.assertIsNotNone(secret_type_response)
self.assertEqual(secret_type, secret_type_response)
content_type = spec['payload_content_type']
get_resp = self.behaviors.get_secret(secret_ref,
content_type,
user_name=user_name)
self.assertEqual(expected, get_resp.content)
| StarcoderdataPython |
6488295 | from datetime import datetime
import pytz
from django.contrib.postgres.fields import ArrayField
from django.db import models
from osf.models import Node
from osf.models import OSFUser
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.validators import validate_subscription_type
from website.notifications.constants import NOTIFICATION_TYPES
class NotificationSubscription(BaseModel):
primary_identifier_name = '_id'
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.notifications.model.NotificationSubscription'
modm_query = None
migration_page_size = 120000
# /TODO DELETE ME POST MIGRATION
_id = models.CharField(max_length=50, db_index=True) # pxyz_wiki_updated, uabc_comment_replies
event_name = models.CharField(max_length=50) # wiki_updated, comment_replies
user = models.ForeignKey('OSFUser', null=True, related_name='notification_subscriptions', blank=True)
node = models.ForeignKey('Node', null=True, blank=True, related_name='notification_subscriptions')
# Notification types
none = models.ManyToManyField('OSFUser', related_name='+') # reverse relationships
email_digest = models.ManyToManyField('OSFUser', related_name='+') # for these
email_transactional = models.ManyToManyField('OSFUser', related_name='+') # are pointless
@classmethod
def load(cls, q):
# modm doesn't throw exceptions when loading things that don't exist
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple things. It should be customized for complex ones.
:param modm_obj:
:return:
"""
django_obj = cls()
django_obj._id = modm_obj._id
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
@property
def owner(self):
# ~100k have owner==user
if self.user is not None:
return self.user
# ~8k have owner=Node
elif self.node is not None:
return self.node
@owner.setter
def owner(self, value):
if isinstance(value, OSFUser):
self.user = value
elif isinstance(value, Node):
self.node = value
def add_user_to_subscription(self, user, notification_type, save=True):
for nt in NOTIFICATION_TYPES:
if getattr(self, nt).filter(id=user.id).exists():
if nt != notification_type:
getattr(self, nt).remove(user)
else:
if nt == notification_type:
getattr(self, nt).add(user)
if notification_type != 'none' and isinstance(self.owner, Node) and self.owner.parent_node:
user_subs = self.owner.parent_node.child_node_subscriptions
if self.owner._id not in user_subs.setdefault(user._id, []):
user_subs[user._id].append(self.owner._id)
self.owner.parent_node.save()
if save:
self.save()
def remove_user_from_subscription(self, user, save=True):
for notification_type in NOTIFICATION_TYPES:
try:
getattr(self, notification_type, []).remove(user)
except ValueError:
pass
if isinstance(self.owner, Node) and self.owner.parent_node:
try:
self.owner.parent_node.child_node_subscriptions.get(user._id, []).remove(self.owner._id)
self.owner.parent_node.save()
except ValueError:
pass
if save:
self.save()
class NotificationDigest(ObjectIDMixin, BaseModel):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.notifications.model.NotificationDigest'
modm_query = None
# /TODO DELETE ME POST MIGRATION
user = models.ForeignKey('OSFUser', null=True, blank=True)
timestamp = models.DateTimeField()
send_type = models.CharField(max_length=50, db_index=True, validators=[validate_subscription_type, ])
event = models.CharField(max_length=50)
message = models.CharField(max_length=2048)
# TODO: Could this be a m2m with or without an order field?
node_lineage = ArrayField(models.CharField(max_length=5))
| StarcoderdataPython |
5109922 | def groupingDishes(dishes):
d = {}
for l in dishes:
dish = l[0]
for i in l[1:]:
if i not in d:
d[i] = [dish]
else:
d[i] += [dish]
print(d)
out = []
for i in sorted(d):
if len(d[i]) > 1:
out += [[i] + sorted(d[i])]
return out
| StarcoderdataPython |
8088864 | <reponame>takaaki82/Java-Lessons
N = int(input())
a_list = [int(input()) for _ in range(N)]
man = {}
for a in a_list:
if a in man:
man[a] += 1
else:
man[a] = 1
sorted_man = sorted(man.items(), key=lambda x: -x[0])
ans = 0
minus_1 = 0
remain2 = 0
for a, cnt in sorted_man:
if a == 4:
ans += cnt
if a == 3:
ans += cnt
if 1 in man:
if man[1] >= cnt:
minus_1 += cnt
else:
minus_1 += man[1]
if a == 2:
if cnt % 2 == 0:
ans += cnt // 2
else:
ans += cnt // 2
remain2 += 2
if a == 1:
cnt -= minus_1
cnt += remain2
remain2 -= remain2
if cnt > 0:
ans += cnt // 4
if cnt % 4 != 0:
ans += 1
if remain2 > 0:
ans += 1
print(ans)
| StarcoderdataPython |
4998835 | """
[PYTHON NAMING CONVENTION]
module_name, package_name, ClassName, method_name, ExceptionName, function_name,
GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name,
local_var_name.
"""
import sys, os
import cv2
import re
import pprint
import numpy as np
import time, datetime
from modules.utils import ( my_print, quaternion2euler, MyVideo, str2float)
from modules.constants import Constants
try:
import mujoco_py as mjPy
except ImportError as e:
raise error.DependencyNotInstalled( "{}. (HINT: you need to install mujoco_py, \
and also perform the setup instructions here: \
https://github.com/openai/mujoco-py/.)".format( e ) )
class Simulation( ):
"""
Class for the mujoco-simulation
[INHERITANCE]
[DESCRIPTION]
[NOTE]
All of the model files are saved in "models" directory, and we are using "relative directory"
to generate and find the .xml model file. Hence do not change of "model directory" variable within this
"""
MODEL_DIR = Constants.MODEL_DIR
SAVE_DIR = Constants.SAVE_DIR
current_time = 0
controller = None # Control input function
controller_inputs = None
t_step = None # Time step of the simulation
n_steps = 0 # Number of total steps of the simulation
def __init__( self, model_name = None, arg_parse = None ):
"""
Default constructor of THIS class
[ARGUMENTS]
[NAME] [TYPE] [DESCRIPTION]
(1) model_name string The xml model file name for running the MuJoCo simulation.
(2) is_visualized boolean Turn ON/OFF the mjViewer (visualizer) of the simulation. This flag is useful when optimizing a simulation.
(3) arg_parse dictionary Dictionary which contains all the arguments given to the main `run.py` script.
"""
if model_name is None:
self.mjModel = None
self.mjSim = None
self.mjData = None
self.mjViewer = None
self.args = arg_parse
my_print( WARNING = "MODEL FILE NOT GIVEN, PLEASE INPUT XML MODEL FILE WITH `attach_model` MEMBER FUNCTION" )
else:
# If model_name is given, then check if there exist ".xml" at the end, if not, append
model_name = model_name + ".xml" if model_name[ -4: ] != ".xml" else model_name
self.model_name = model_name
# Based on the model_name, construct the simulation.
self.mjModel = mjPy.load_model_from_path( self.MODEL_DIR + model_name ) # Loading xml model as and save it as "model"
self.mjSim = mjPy.MjSim( self.mjModel ) # Construct the simulation environment and save it as "sim"
self.mjData = self.mjSim.data # Construct the basic MuJoCo data and save it as "mjData"
self.mjViewer = mjPy.MjViewerBasic( self.mjSim ) # Construct the basic MuJoCo viewer and save it as "myViewer"
self.args = arg_parse
# Saving the default simulation variables
self.fps = 60 # Frames per second for the mujoco render
self.dt = self.mjModel.opt.timestep # Time step of the simulation [sec]
self.sim_step = 0 # Number of steps of the simulation, in integer [-]
self.update_rate = round( 1 / self.dt / self.fps ) # 1/dt = number of steps N for 1 second simulaiton, dividing this with frames-per-second (fps) gives us the frame step to be updated.
self.g = self.mjModel.opt.gravity # Calling the gravity vector of the simulation environment
# Saving additional model parameters for multiple purposes
self.act_names = self.mjModel.actuator_names
self.geom_names = self.mjModel.geom_names
self.idx_geom_names = [ self.mjModel._geom_name2id[ name ] for name in self.geom_names ]
self.run_time = float( self.args[ 'runTime' ] ) # Run time of the total simulation
self.start_time = float( self.args[ 'startTime' ] ) # Start time of the movements
def attach_model( self, model_name ):
if self.mjModel is not None:
my_print( WARNING = "MODEL FILE EXIST! OVERWRITTING THE WHOLE MUJOCO FILE" )
self.__init__( model_name )
def attach_controller( self, controller ):
"""
Attaching the controller object for running the simulation.
For detailed controller description, please check "input_ctrls.py"
"""
ctrl_name = self.controller.__class__.__name__ # Getting the name of the controller. The controller names are indicated in "input_ctrls.py"
my_print( Controller = ctrl_name )
self.controller = controller
def run( self ):
"""
Running a single simulation.
[INPUT]
[VAR NAME] [TYPE] [DESCRIPTION]
(1) run_time float The whole run time of the simulation.
(2) ctrl_start_time float
"""
# Check if mjModel or mjSim is empty and raise error
if self.mjModel is None or self.mjSim is None:
raise ValueError( "mjModel and mjSim is Empty! Add it before running simulation" )
# Warn the user if input and output function is empty
if self.controller is None:
raise ValueError( "CONTROLLER NOT ATTACHED TO SIMULATION. \
PLEASE REFER TO METHOD 'attach_output_function' and 'attach_controller' " )
if self.args[ 'recordVideo' ]:
vid = MyVideo( fps = self.fps,
vid_dir = self.args[ 'saveDir' ] ) # If args doesn't have saveDir attribute, save vid_dir as None
if self.args[ 'saveData' ]:
file = open( self.args[ 'saveDir' ] + "data_log.txt", "w+" )
# Setting the camera position for the simulation
# [camParameters]: [ 0.17051, 0.21554, -0.82914, 2.78528,-30.68421,162.42105 ]
# [camParameters]: [ -0.10325, 0. , -2.51498, 7.278 ,-45. , 90. ]
if self.args[ 'camPos' ] is not None:
tmp = str2float( self.args[ 'camPos' ] )
self.mjViewer.cam.lookat[ 0:3 ] = tmp[ 0 : 3 ]
self.mjViewer.cam.distance = tmp[ 3 ]
self.mjViewer.cam.elevation = tmp[ 4 ]
self.mjViewer.cam.azimuth = tmp[ 5 ]
while self.current_time <= self.run_time:
# [BACKUP] If we want to save all the details of the simulation
# if self.args[ 'saveData' ]:
# my_print( currentTime = self.current_time,
# geomXYZVelocities = self.mjData.geom_xvelp[ self.idx_geom_names ],
# file = file )
if self.sim_step % self.update_rate == 0:
self.mjViewer.render( ) # Render the simulation
if self.args[ 'verbose' ]:
my_print( camParameters = [ self.mjViewer.cam.lookat[ 0 ], self.mjViewer.cam.lookat[ 1 ], self.mjViewer.cam.lookat[ 2 ],
self.mjViewer.cam.distance, self.mjViewer.cam.elevation, self.mjViewer.cam.azimuth ] )
if self.args[ 'recordVideo' ]:
vid.write( self.mjViewer )
# if self.args[ 'saveData' ]:
# my_print( currentTime = self.current_time,
# jointAngleActual = self.mjData.qpos[ : ],
# jointVelActual = self.mjData.qvel[ : ],
# geomXYZPositions = self.mjData.geom_xpos[ self.idx_geom_names ],
# geomXYZVelocities = self.mjData.geom_xvelp[ self.idx_geom_names ],
# jacobian = self.mjData.get_geom_jacp( "geom_EE" ).reshape( 3, -1 ),
# file = file )
# xvel stands for positional(cartesian) velocity in world frame[REF] https://github.com/openai/mujoco-py/issues/255
# [input controller]
# input_ref: The data array that are aimed to be inputted (e.g., qpos, qvel, qctrl etc.)
# input_idx: The specific index of input_ref data array that should be inputted
# input: The actual input value which is inputted to input_ref
input_ref, input_idx, input = self.controller.input_calc( self.start_time, self.current_time )
input_ref[ input_idx ] = input
# self.ctrl_array = np.vstack( ( self.ctrl_array, input ) ) if self.ctrl_array is not None else input # Saving the whole control input the ctrl_array
# self.q_array = np.vstack( ( self.q_array, self.mjData.qpos[ : self.ctrl_input.n_act ] ) ) if self.q_array is not None else self.mjData.qpos[ : self.ctrl_input.n_act ] # The q position of the model
# self.t_vec = np.append( self.t_vec, self.current_time ) if self.t_vec is not None else self.current_time
self.mjSim.step( ) # Single step update
if( self.is_sim_unstable() ): # Check if simulation is stable
print( "[WARNING] UNSTABLE SIMULATION, HALTED AT {0:f} for at {1:f}".format( self.current_time, self.run_time ) )
self.output_array = [ np.nan ]
break
self.current_time = self.mjData.time # Update the current_time variable of the simulation
if self.sim_step % self.update_rate == 0:
if self.args[ 'saveData' ]:
# Saving all the necessary datas for the simulation
my_print( inputVal = input,
outputVal = output_val if self.output_func is not None else 0,
file = file )
self.sim_step += 1
# [BACKUP] If we want to save all the details of the simulation
# if self.args[ 'saveData' ]:
# # Saving all the necessary datas for the simulation
# my_print( inputVal = input,
# outputVal = output_val if self.output_func is not None else 0,
# file = file )
if self.args[ 'recordVideo' ]:
vid.release( ) # If simulation is finished, wrap-up the video file.
if self.args[ 'saveData' ]:
file.close()
return self.ctrl_array, self.output_array
def save_simulation_data( self, dir ):
"""
Save all the details of the controller parameters, inputs and output of the simulation
"""
if dir is not None and dir[ -1 ] != "/": # Quick Check of whether result_dir has backslash "/" at the end
dir += "/" # Append the backslash
f = open( dir + "simulation_details.txt", "w+" )
pprint.pprint( self.ctrl_input.__dict__, f ) # Using pretty-print (pprint) to flush out the data in a much readable format
print( self.args , file = f ) # Flushing out all the arguments detail.
f.close()
def is_sim_unstable( self ):
thres = 5 * 10 ** 6
if ( max( np.absolute( self.mjData.qpos ) ) > thres ) or \
( max( np.absolute( self.mjData.qvel ) ) > thres ) or \
( max( np.absolute( self.mjData.qacc ) ) > thres ):
return True
else:
return False
def reset( self ):
"""
Reseting the mujoco simulation
"""
self.current_time = 0
self.sim_step = 0
self.controller_inputs = None
self.mjSim.reset( )
| StarcoderdataPython |
8167415 | from __future__ import annotations
from prettyqt import constants, core, gui
from prettyqt.qt import QtWidgets
from prettyqt.utils import InvalidParamError
QtWidgets.QShortcut.__bases__ = (core.Object,)
class Shortcut(QtWidgets.QShortcut):
def __str__(self):
return self.key().toString()
def serialize_field(self):
return dict(
auto_repeat=self.autoRepeat(),
context=self.get_context(),
enabled=self.isEnabled(),
key=self.get_key(),
whats_this=self.whatsThis(),
)
def set_context(self, context: constants.ContextStr):
"""Set shortcut context.
Args:
context: shortcut context
Raises:
InvalidParamError: shortcut context does not exist
"""
if context not in constants.CONTEXT:
raise InvalidParamError(context, constants.CONTEXT)
self.setContext(constants.CONTEXT[context])
def get_context(self) -> constants.ContextStr:
"""Return shortcut context.
Returns:
shortcut context
"""
return constants.CONTEXT.inverse[self.context()]
def get_key(self) -> gui.KeySequence:
"""Return the shortcut's key sequence.
Returns:
Key sequence
"""
return gui.KeySequence(
self.key().toString(), gui.KeySequence.SequenceFormat.PortableText
)
| StarcoderdataPython |
1705208 | from time import sleep
def lucy_apresentacao():
sleep(2)
print()
print()
print('<<<<< CARREGANDO >>>>> ')
print()
sleep(3)
print('Olá, me chamo Lucy, seja bem vindo ao meu ambiente virtual... ')
sleep(3)
print('Para que possamos ter uma experiência agradavel me diga um pouco sobre você... ')
sleep(3)
usuario_apresentacao()
def usuario_apresentacao():
print('Tudo bem, vamos lá me diga seu nome...')
nome = input('>> ').title()
print(f'Legal, {nome}!')
sleep(3)
print('De que cidade você é? ')
cidade = input('>> ').title()
print(f'{cidade}, interesssante. Mas {nome}, em que Estado fica essa cidade? ')
sleep(3)
estado = input('>> ').title()
print(f'Ah sim. {estado} é um Estado maravilhoso, povo muito acolhedor... ')
sleep(3)
print('O que voce curte fazer no seu tempo livre? ')
hobbies = input('>> ')
print(f'Entendi. Seus hobbies entao são {hobbies}...')
sleep(3)
print(f'{nome}, quero lhe levar a uma experiencia atraves do mundo do jogos...')
sleep(3)
print('É só você escolher uma das opções e se divertir...')
sleep(3)
print('E desde já agradeço por sua companhia...')
| StarcoderdataPython |
5089856 | if 3 <= 5: # true
pass
if 3 <= 2: # false
pass
| StarcoderdataPython |
229623 | <filename>examples/red-pitaya/oscillo/python/oscillo.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
from koheron import command
class Oscillo(object):
def __init__(self, client):
self.client = client
self.wfm_size = 8192
self.sampling_rate = 125e6
self.t = np.arange(self.wfm_size)/self.sampling_rate
self.dac = np.zeros((2, self.wfm_size))
self.adc = np.zeros((2, self.wfm_size))
self.spectrum = np.zeros((2, int(self.wfm_size / 2)))
self.avg_spectrum = np.zeros((2, int(self.wfm_size / 2)))
@command()
def set_dac_periods(self, period0, period1):
''' Select the periods played on each address generator
ex: self.set_dac_periods(8192, 4096)
'''
pass
@command()
def set_num_average_min(self, num_average_min):
''' Set the minimum of averages that will be computed on the FPGA
The effective number of averages is >= num_average_min.
'''
pass
@command()
def set_average_period(self, average_period):
''' Set the period of the averaging module and reset the module.
'''
self.period = average_period
@command()
def set_average(self, is_average):
''' is_average = True enables averaging. '''
pass
@command()
def get_num_average(self, channel):
''' Get the number of averages corresponding to the last acquisition. '''
num_average = self.client.recv_uint32()
return num_average
@command()
def get_decimated_data(self, decim_factor, index_low, index_high):
decimated_data = self.client.recv_vector(dtype='float32')
return decimated_data
def get_adc(self):
self.adc = np.reshape(self.get_decimated_data(1, 0, self.wfm_size), (2, self.wfm_size))
def get_spectrum(self):
fft_adc = np.fft.fft(self.adc, axis=1)
self.spectrum = fft_adc[:, 0:self.wfm_size / 2]
def get_avg_spectrum(self, n_avg=1):
self.avg_spectrum = np.zeros((2, int(self.wfm_size / 2)))
for i in range(n_avg):
self.get_adc()
fft_adc = np.abs(np.fft.fft(self.adc, axis=1))
self.avg_spectrum += fft_adc[:, 0:int(self.wfm_size / 2)]
self.avg_spectrum /= n_avg
@command()
def reset_acquisition(self):
pass
@command(funcname='reset')
def reset_dac(self):
pass
def reset(self):
self.reset_dac()
# Modulation
def set_dac(self, channels=[0,1]):
""" Write the BRAM corresponding on the selected channels
(dac0 or dac1) with the array stored in self.dac[channel,:].
ex: self.set_dac(channel=[0])
"""
@command(classname='Modulation')
def set_dac_buffer(self, channel, arr):
pass
for channel in channels:
data = np.int16(16384 * (self.dac[channel,:]))
set_dac_buffer(self, channel, np.uint32(data[1::2] + data[::2] * 65536))
@command(classname='Modulation')
def get_modulation_status(self):
return self.client.recv_tuple('IIffffff')
@command(classname='Modulation')
def set_waveform_type(self, channel, wfm_type):
pass
@command(classname='Modulation')
def set_dac_amplitude(self, channel, amplitude_value):
pass
@command(classname='Modulation')
def set_dac_frequency(self, channel, frequency_value):
pass
@command(classname='Modulation')
def set_dac_offset(self, channel, frequency_value):
pass | StarcoderdataPython |
11285041 | import os
import json
import requests
import datetime
import jsonpickle
import shutil
import urllib
import elasticsearch.helpers
from elasticsearch import Elasticsearch
from .interfaces import SearchEngineInterface
from .utilities import configPath
#from ltr.helpers.handle_resp import resp_msg
def resp_msg(msg, resp, throw=True):
print('{} [Status: {}]'.format(msg, resp.status_code))
if resp.status_code >= 400:
print(resp.text)
if throw:
raise RuntimeError(resp.text)
def pretty(obj):
print(jsonpickle.encode(obj,indent=2))
## -------------------------------------------
## Java-Friendly datetime string format
def timestamp():
return datetime.datetime.now().isoformat() + 'Z'
## -------------------------------------------
## Fix search terms for searchy searchy
def cleanTerm(term):
return term.replace('/','\\/').replace('"','')
## -------------------------------------------
## Pass-through query!
## Just take the query as provided, run it against elasticsearch, and return the raw response
def passthrough(uri):
req = requests.get(uri)
return req.text,req.status_code
class ElasticResp():
def __init__(self, resp):
self.status_code = 400
if 'acknowledged' in resp and resp['acknowledged']:
self.status_code = 200
else:
self.status_code = resp['status']
self.text = json.dumps(resp, indent=2)
class BulkResp():
def __init__(self, resp):
self.status_code = 400
if resp[0] > 0:
self.status_code = 201
class SearchResp():
def __init__(self, resp):
self.status_code = 400
if 'hits' in resp:
self.status_code = 200
else:
self.status_code = resp['status']
self.text = json.dumps(resp, indent=2)
class Elastic(SearchEngineInterface):
## -------------------------------------------
## Index Admin
def indexes(self) -> list:
#List the existing indexes of the given kind
indexes = []
host = self.host
uri = host + "_cat/indices?format=json"
try:
r = requests.get(uri)
if r.status_code == 200:
#Say cheese
indexes = r.json()
indexes = [i["index"].replace(self.postfix,'') for i in indexes if self.postfix in i["index"]]
else:
print('ELASTIC ERROR! Cores could not be listed! Have a nice day.')
print(json.dumps(r.json(),indent=2))
except:
message = 'NETWORK ERROR! Could not connect to Elastic server on',uri,' ... Have a nice day.'
raise ValueError(message)
return indexes
def indexExists(self,name: str) -> bool:
#Returns true if the index exists on the host
host = self.host
uri = host + name
r = requests.get(uri)
if r.status_code == 200:
data = r.json()
if name in data.keys():
return True
return False
def indexCreate(self,timeout=10000) -> bool:
#Creates a new index with a specified configuration
#Set this to true only when core is created
success = False
host = self.host
name = self.name
path = self.root
settings = None
if not self.indexExists(name):
configset_ok = False
try:
if os.path.isdir(self.elastic_home):
shutil.rmtree(self.elastic_home)
#Create the directories to hold the Elastic conf and data
graph_source = configPath('skipchunk/elastic_home/configsets/skipchunk-'+self.kind+'-configset')
shutil.copytree(graph_source,self.elastic_home)
cfg_json_path = self.elastic_home + '/skipchunk-'+self.kind+'-schema.json'
#Create the index in Elastic
with open(cfg_json_path) as src:
settings = json.load(src)
except:
message = 'DISK ERROR! Could not find the schema at ' + graph_source
raise ValueError(message)
if settings:
try:
res = self.es.indices.create(self.name, body=settings)
r = ElasticResp(res)
if r.status_code == 200:
success = True
#Say cheese
print('Index',name,'created!')
else:
print('ELASTIC ERROR! Index',name,'could not be created! Have a nice day.')
print(json.dumps(r.json(),indent=2))
except:
message = 'NETWORK ERROR! Could not connect to Elasticsearch server on',host,' ... Have a nice day.'
raise ValueError(message)
return success
def indexDelete(self):
resp = self.es.indices.delete(index=self.name, ignore=[400, 404])
resp_msg(msg="Deleted index {}".format(self.name), resp=ElasticResp(resp), throw=False)
## -------------------------------------------
## Content Update
def index(self, documents, timeout=10000) -> str:
#Accepts a skipchunk object to index the required data
def bulkDocs(doc_src,name):
for doc in doc_src:
addCmd = {"_index": name,
"_id": doc['id'],
"_source": doc}
yield addCmd
isIndex = self.indexExists(self.name)
if not isIndex:
isIndex = self.indexCreate()
if isIndex:
res = elasticsearch.helpers.bulk(self.es, bulkDocs(documents,self.name), chunk_size=100)
self.es.indices.refresh(index=self.name)
r = BulkResp(res)
if r.status_code<400:
return True
return False
## -------------------------------------------
## Querying
def search(self,querystring, handler: str) -> str:
#Searches the engine for the query
pass
## -------------------------------------------
## Graphing
def parseSuggest(self,field:str,res:dict) -> dict:
#parses an aggregate resultset normalizing against a generic interface
facets = [{"term":f["key"],"weight":f["doc_count"]} for f in res["aggregations"][field]["buckets"]]
return facets
def parseAggregate(self,field:str,res:dict) -> dict:
#parses an aggregate resultset normalizing against a generic interface
facets = [{"label":f["key"],"count":f["doc_count"]} for f in res["aggregations"][field]["buckets"]]
return facets
def conceptVerbConcepts(self,concept:str,verb:str,mincount=1,limit=100) -> list:
# Accepts a verb to find the concepts appearing in the same context
subject = cleanTerm(concept)
verb = cleanTerm(verb)
objects = []
subjects = []
# Get all the docid and sentenceid pairs that contain both the concept AND verb
query = {
"size":10000,
"_source": ["sentenceid"],
"query": {
"bool": {
"must":[
{"bool":{
"should": [
{"match": {
"objectof": verb
}},
{"match": {
"subjectof": verb
}}
]
}},
{
"match":{
"preflabel": subject
}
}
]
}
}
}
res = self.es.search(index=self.name, body=query)
#res = ElasticResp(r)
if res["hits"]["total"]["value"]>0:
# Get all the other concepts that exist in those docid and sentenceid pairs
# http://localhost:8983/solr/osc-blog/select?fl=*&fq=-preflabel%3A%22open%20source%22&fq=sentenceid%3A17%20AND%20docid%3Aafee4d71ccb3e19d36ee2cfddd6da618&q=contenttype%3Aconcept&rows=100
sentences = []
shoulds = []
for doc in res["hits"]["hits"]:
sentenceid = doc["_source"]["sentenceid"]
sentences.append({
"term": {"sentenceid":sentenceid}
})
shoulds.append({
"bool": {
"must": [
{"term": {"contenttype": "concept"}},
{"term": {"sentenceid": sentenceid}}
],
"must_not": [
{"term": {"preflabel": subject}}
]
}
})
field2 = "preflabel"
query2 = {
"size":0,
"query": {
"bool": {
"should": shoulds
}
},
"aggs": {
"preflabel": {
"terms": {
"field": field2
}
}
}
}
res2 = self.es.search(index=self.name, body=query2)
objects = self.parseAggregate(field2,res2)
return objects
def conceptsNearVerb(self,verb:str,mincount=1,limit=100) -> list:
# Accepts a verb to find the concepts appearing in the same context
verb = cleanTerm(verb)
field = "preflabel"
query = {
"size":0,
"query": {
"bool": {
"should": [
{"match": {
"objectof": verb
}},
{"match": {
"subjectof": verb
}}
]
}
},
"aggs": {
"preflabel": {
"terms": {
"field": field
}
}
}
}
res = self.es.search(index=self.name, body=query)
return parseAggregate(field,res)
def verbsNearConcept(self,concept:str,mincount=1,limit=100) -> list:
# Accepts a concept to find the verbs appearing in the same context
concept = cleanTerm(concept)
field1 = "subjectof"
field2 = "objectof"
query1 = {
"size":0,
"query": {
"match_phrase": {
"label": concept
}
},
"aggs": {
"subjectof": {
"terms": {
"field": field1
}
}
}
}
query2 = {
"size":0,
"query": {
"match_phrase": {
"label": concept
}
},
"aggs": {
"objectof": {
"terms": {
"field": field2
}
}
}
}
res1 = self.es.search(index=self.name, body=query1)
subjectofs = self.parseAggregate(field1,res1)
res2 = self.es.search(index=self.name, body=query2)
objectofs = self.parseAggregate(field2,res2)
return subjectofs+objectofs
def suggestConcepts(self,prefix:str,build=False) -> list:
# Suggests a list of concepts given a prefix
field = "preflabel"
query = {
"size":0,
"query": {
"match_phrase_prefix": {
"concept_suggest": prefix
}
},
"aggs": {
"preflabel": {
"terms": {
"field": "preflabel"
}
}
}
}
res = self.es.search(index=self.name, body=query)
return self.parseSuggest(field,res)
def suggestPredicates(self,prefix:str,build=False) -> list:
# Suggests a list of predicates given a prefix
field = "preflabel"
query = {
"size":0,
"query": {
"match_phrase_prefix": {
"predicate_suggest": prefix
}
},
"aggs": {
"preflabel": {
"terms": {
"field": "preflabel"
}
}
}
}
res = self.es.search(index=self.name, body=query)
return self.parseSuggest(field,res)
def summarize(self,mincount=1,limit=100) -> list:
# Summarizes a core
field = "preflabel"
q1 = {
"size":0,
"query": {
"match": {
"contenttype":"concept"
}
},
"aggs": {
"preflabel": {
"terms": {
"size":limit,
"field": "preflabel"
}
}
}
}
q2 = {
"size":0,
"query": {
"match": {
"contenttype":"predicate"
}
},
"aggs": {
"preflabel": {
"terms": {
"size":limit,
"field": "preflabel"
}
}
}
}
res1 = self.es.search(index=self.name, body=q1)
concepts = self.parseAggregate(field,res1)
res2 = self.es.search(index=self.name, body=q2)
predicates = self.parseAggregate(field,res2)
return concepts,predicates
def graph(self,subject:str,objects=5,branches=10) -> list:
# Gets the subject-predicate-object graph for a subject
# Gets the subject-predicate-object graph for a subject
tree = []
verbs = self.verbsNearConcept(subject)[0:branches]
branch = {
"label":subject,
"labeltype":"subject",
"relationships":[]
}
for verb in verbs:
v = verb["label"]
predicate = {
"label":v,
"weight":verb["count"],
"labeltype":"predicate",
"relationships":[]
}
cvc = self.conceptVerbConcepts(subject,v,limit=objects)
for o in cvc:
predicate["relationships"].append({
"label":o["label"],
"weight":o["count"],
"labeltype":"object",
"relationships":[]
})
branch["relationships"].append(predicate)
tree.append(branch)
return tree
def explore(self,term,contenttype="concept",build=False,quiet=False,branches=10) -> list:
# Pretty-prints a graph walk of all suggested concepts and their verbs given a starting term prefix
return []
def __init__(self,host,name,kind,path,postfix,enrich_query=None):
self.host = host
self.name = name + postfix
self.kind = kind
self.path = os.path.abspath(path)
self.postfix = postfix
self.root = os.path.join(self.path, name)
self.elastic_home = os.path.join(self.root, 'elastic_'+self.kind)
self.document_data = os.path.join(self.root, 'documents')
self.elastic_uri = self.host + self.name
self.enrich_query = enrich_query
self.es = Elasticsearch(self.host) | StarcoderdataPython |
5113330 | <reponame>zeendeploy/dsrf
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects to validate the cells in a Flat File."""
import re
from dsrf import constants
from dsrf import error
from dsrf.proto import cell_pb2
class BaseCellValidator(object):
"""Base validator class."""
# The human-readable type of the expected value in the cell. Used in
# exceptions.
_expected_value = ''
def __init__(self, cell_name, logger, required=True, repeated=False):
self.cell_name = cell_name
self.logger = logger
self.required = required
self.repeated = repeated
def validate_value(self, value, row_number, file_name, block_number):
"""Validates and parses the cell value.
Args:
value: The cell value, to validate.
row_number: The cell's row number in the file.
file_name: The cell's file name.
block_number: Integer block number.
Returns:
The cell parsed cell value, if it fits the cell definition.
"""
try:
return self._validate_value(value, row_number, file_name, block_number)
except error.CellValidationFailure as e:
self.logger.error(e)
def _validate_value(self, value, row_number, file_name, block_number):
"""Validates the cell values, by the validator definition.
First check if the cell is required and repeated, and then validates a
single value.
Args:
value: The cell value, to validate.
row_number: The cell's row number in the file.
file_name: The cell's file name.
block_number: Integer block number.
Returns:
The cell parsed cell value, if it fits the cell definition.
"""
# If the cell is required, it can't be empty.
if not value:
if self.required:
raise error.RequiredCellMissing(
self.cell_name, row_number, file_name, block_number, value,
self.get_expected_value())
else:
return value
if not self.repeated:
return self.validate_single_value(
value, row_number, file_name, block_number)
validated_values = []
for val in value.split(constants.REPEATED_VALUE_DELIMITER):
if val:
validated_values.append(
self.validate_single_value(
val, row_number, file_name, block_number))
return validated_values
def validate_single_value(self, value, row_number, file_name, block_number):
pass
def get_expected_value(self):
return self._expected_value
def _raise_validation_failure(self, value, row_number, file_name,
block_number):
raise error.CellValidationFailure(
self.cell_name, row_number, file_name, block_number, value,
self.get_expected_value())
def get_cell_type(self):
return None
class StringValidator(BaseCellValidator):
"""Validates String cells."""
_expected_value = 'a string'
def validate_single_value(self, value, row_number, file_name, block_number):
if not isinstance(value, str):
self._raise_validation_failure(value, row_number, file_name, block_number)
return
try:
return unicode(value, 'utf-8')
except UnicodeDecodeError as e:
raise error.BadUnicodeError(
self.cell_name, row_number, file_name, block_number, value, str(e))
def get_cell_type(self):
return cell_pb2.STRING
class IntegerValidator(BaseCellValidator):
"""Validates Integer cells."""
_expected_value = 'an integer'
def validate_single_value(self, value, row_number, file_name, block_number):
try:
if float(value).is_integer():
if value.find('.') >= 0:
self.logger.warning(
'The cell %s in line number %s (file=%s) is a decimal (%s), but '
'expected to be an integer.'
% (self.cell_name, row_number, file_name, value))
return int(float(value))
except ValueError:
pass
self._raise_validation_failure(value, row_number, file_name, block_number)
def get_cell_type(self):
return cell_pb2.INTEGER
class BooleanValidator(BaseCellValidator):
"""Validates Boolean cells."""
_expected_value = 'a boolean'
def validate_single_value(self, value, row_number, file_name, block_number):
try:
if value.lower() in ['true', 'false']:
return value.lower() == 'true'
except AttributeError:
pass
self._raise_validation_failure(value, row_number, file_name, block_number)
def get_cell_type(self):
return cell_pb2.BOOLEAN
class DecimalValidator(BaseCellValidator):
"""Validates Decimal cells."""
_expected_value = 'a decimal'
def validate_single_value(self, value, row_number, file_name, block_number):
try:
return float(value)
except ValueError:
self._raise_validation_failure(value, row_number, file_name, block_number)
def get_cell_type(self):
return cell_pb2.DECIMAL
class PatternValidator(BaseCellValidator):
"""Validates cells with string pattern."""
def __init__(self, pattern, cell_name, logger, required=True, repeated=False):
super(PatternValidator, self).__init__(
cell_name, logger, required, repeated)
self.pattern = pattern
self._expected_value = 'of the form "%s".' % self.pattern
def validate_single_value(self, value, row_number, file_name, block_number):
re_pattern = re.compile(self.pattern)
try:
if re_pattern.match(value):
return value
except TypeError:
pass
self._raise_validation_failure(value, row_number, file_name, block_number)
def get_cell_type(self):
return cell_pb2.STRING
class FixedStringValidator(BaseCellValidator):
"""Validates fixed string cells (enum)."""
def __init__(
self, valid_values, cell_name, logger, required=True, repeated=False):
super(FixedStringValidator, self).__init__(
cell_name, logger, required, repeated)
self.valid_values = valid_values
# Optimization for faster lookup
self.valid_value_set = set(
[valid_value.upper() for valid_value in valid_values])
self._expected_value = 'one of the following: %s' % self.valid_values
def validate_single_value(self, value, row_number, file_name, block_number):
try:
if value.upper() in self.valid_value_set:
return value.upper()
except AttributeError:
pass
self._raise_validation_failure(value, row_number, file_name, block_number)
def get_cell_type(self):
return cell_pb2.STRING
class DurationValidator(PatternValidator):
"""Validates 'xs:duration' cells."""
def __init__(self, cell_name, logger, required=True, repeated=False):
super(DurationValidator, self).__init__(
constants.DURATION_PATTERN, cell_name, logger, required, repeated)
self._expected_value = 'ISO 8601 duration'
class DateTimeValidator(PatternValidator):
"""Validates 'xs:dateTime' cells."""
def __init__(self, cell_name, logger, required=True, repeated=False):
super(DateTimeValidator, self).__init__(
constants.DATETIME_PATTERN, cell_name, logger, required, repeated)
self._expected_value = 'ISO 8601 dateTime'
| StarcoderdataPython |
5144728 | You are given two 32-bit numbers, N and M, and two bit positions, i and j. Write a method to set all bits between i and j in N equal to M (e.g., M becomes a substring of N located at i and starting at j).
EXAMPLE:
Input: N = 10000000000, M = 10101, i = 2, j = 6
Output: N = 10001010100
_
__________________________________
| StarcoderdataPython |
5120698 | #! /usr/bin/jython
# -*- coding: utf-8 -*-
#
# voldemort_create.py
#
# Sep/10/2013
#
# ----------------------------------------------------------------
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
#
sys.path.append ('/var/www/data_base/common/python_common')
from text_manipulate import dict_display_proc
from text_manipulate import dict_append_proc
#
import java
from voldemort.client import SocketStoreClientFactory
from voldemort.client import ClientConfig
# ----------------------------------------------------------------
def data_prepare_proc ():
dict_aa = {}
#
dict_aa = dict_append_proc (dict_aa,'t3051','和歌山',91624,'2004-10-21')
dict_aa = dict_append_proc (dict_aa,'t3052','海南',17825,'2004-8-12')
dict_aa = dict_append_proc (dict_aa,'t3053','橋本',61439,'2004-2-17')
dict_aa = dict_append_proc (dict_aa,'t3054','有田',69482,'2004-9-9')
dict_aa = dict_append_proc (dict_aa,'t3055','御坊',47351,'2004-8-4')
dict_aa = dict_append_proc (dict_aa,'t3056','田辺',35187,'2004-1-21')
dict_aa = dict_append_proc (dict_aa,'t3057','新宮',81256,'2004-7-23')
dict_aa = dict_append_proc (dict_aa,'t3058','紀の川',23784,'2004-11-26')
dict_aa = dict_append_proc (dict_aa,'t3059','岩出',75823,'2004-12-15')
#
return dict_aa
#
# ----------------------------------------------------------------
def dict_to_voldemort_proc (dict_aa,client):
print ("*** dict_to_voldemort_proc *** start ***")
#
#
for key in dict_aa.keys():
# print (key)
value = dict_aa[key]
json_str=json.dumps (value)
client.put(key, json_str)
# ----------------------------------------------------------------
print ("*** 開始 ***")
#
dict_aa = data_prepare_proc ()
#
factory = SocketStoreClientFactory (ClientConfig().setBootstrapUrls("tcp://localhost:6666"))
#
client = factory.getStoreClient ("cities")
dict_to_voldemort_proc (dict_aa,client)
#
print ("*** 終了 ***")
# ----------------------------------------------------------------
| StarcoderdataPython |
3493094 | <reponame>vcelis/com.northwoodlabradoodles<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.appengine.ext import ndb
from google.appengine.api import memcache
class User(ndb.Model):
"""
Storage for
"""
name = ndb.StringProperty(required=True)
pw = ndb.StringProperty(required=True)
email = ndb.StringProperty()
| StarcoderdataPython |
1659729 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Topic: 通过在类中实现__call__方法,让该类的实例变成可调用对象,即可以在实例对象后面加()来调用该实例对象
Desc : 流畅的Python 第五章示例程序5-8
"""
import random
class BingoCage:
"""
BingoCage的实例使用任何可迭代的对象构建,而且会在
"""
def __init__(self, items):
"""
在本地构建一个副本,防止列表参数的意外副作用
:param items: 任何可迭代对象
"""
self._items = items
# 打乱列表中元素的顺序
random.shuffle(self._items)
def pick(self):
"""
从列表中取出一个元素,如果列表为空,则抛出异常
:return: 取出的元素
"""
try:
return self._items.pop()
except IndexError:
raise LookupError('pick from empty BingoCage')
def __call__(self, *args, **kwargs):
"""直接调用实例对象实现跟pick一样的功能"""
return self.pick()
if __name__ == '__main__':
bingo = BingoCage(list(range(5)))
print(callable(bingo))
print('bingo.pack():%d' % bingo.pick())
print('bingo():%d' % bingo())
| StarcoderdataPython |
1855895 | <gh_stars>1-10
import re
from sys import argv
from algorithm.enclosure import enclosure_check
import csv
import logging
FUNC = re.compile('\S+ \\{[^\\}]+\\}')
ATOM = re.compile('\'[^\']*\'')
class Function:
def __init__(self, text):
self.name, self.body = text.split(' ', 1)
self.patterns = self._find_patterns()
def _find_patterns(self):
sentences = list(filter(lambda s: '=' in s, self.body.split('\n')))
return [s.split(' = ')[0].strip()[1:-1] for s in sentences]
@property
def work_pairs(self):
pairs = []
for i in range(len(self.patterns)):
for j in range(i+1, len(self.patterns)):
pairs.append((self.patterns[i], self.patterns[j]))
return pairs
def __str__(self):
return self.name + ' ' + self.body
def __repr__(self):
return self.name
def program_to_works(path):
logging.info('\033[34mStart with ' + path + '\033[37m')
funcs = funcs_from_file(path)
with open(path[:-4] + '_results.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['function', 'pattern1', 'pattern2', 'result'])
for f in funcs:
logging.info('Work function' + f.name)
for pair in f.work_pairs:
writer.writerow([f.name, pair[0], pair[1], enclosure_check(pair[0], pair[1])])
def funcs_from_file(path):
with open(path) as f:
return tuple(map(
lambda x: Function(x),
FUNC.findall(f.read())))
if __name__ == '__main__':
program_to_works(argv[1])
| StarcoderdataPython |
3574681 |
from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions
import dsz
if ('traceroute' not in cmd_definitions):
dszhopinfo = OpsClass('hopinfo', {'hop': OpsField('hop', dsz.TYPE_INT), 'time': OpsField('time', dsz.TYPE_INT), 'host': OpsField('host', dsz.TYPE_STRING)}, DszObject, single=False)
traceroutecommand = OpsClass('traceroute', {'hopinfo': dszhopinfo}, DszCommandObject)
cmd_definitions['traceroute'] = traceroutecommand | StarcoderdataPython |
334356 | <gh_stars>10-100
from itertools import chain
#: Multiple of notch height
IDEAL_NOTCH_WIDTH = 4
def genFrontPoints(w, h, d, t):
return chain(
genHorizontalLinePoints(0, 0, w, t, 0),
genVerticalLinePoints(w, 0, h, -t, 0),
genHorizontalLinePoints(w, h - t, -w, t, 0),
genVerticalLinePoints(0, h, -h, t, -t),
)
def genBackPoints(w, h, d, t):
return genFrontPoints(w, h, d, t)
def genLeftPoints(w, h, d, t):
return chain(
genHorizontalLinePoints(0, 0, -d, t, -t),
genVerticalLinePoints(-d + t, 0, h, -t, 0),
genHorizontalLinePoints(-d, h - t, d, t, t),
genVerticalLinePoints(-t, h, -h, t, -t),
)
def genRightPoints(w, h, d, t):
return genLeftPoints(w, h, d, t)
def genBottomPoints(w, h, d, t):
return chain(
genHorizontalLinePoints(0, -t, w, t, t),
genVerticalLinePoints(w - t, 0, -d, t, -t),
genHorizontalLinePoints(w, -d + t, -w, -t, -t),
genVerticalLinePoints(t, -d, d, -t, t),
)
def genTopPoints(w, h, d, t):
return chain(
genHorizontalLinePoints(0, 0, w, -t, 0),
genVerticalLinePoints(w, 0, -d, -t, 0),
genHorizontalLinePoints(w, -d, -w, t, 0),
genVerticalLinePoints(0, -d, d, t, 0),
)
def genHorizontalLinePoints(x, y, length, notchHeight, offset):
idealNotch = abs(notchHeight) * IDEAL_NOTCH_WIDTH
notchCount = int(abs(length) / idealNotch)
if notchCount % 2 == 0:
notchCount += 1
notchWidth = length / notchCount
# First point
yield (x + offset, y)
# Two points for every side of a notch
for i in range(1, notchCount):
x = x + notchWidth
yield (x, y if ((i % 2) == 1) else y + notchHeight)
yield (x, y if ((i % 2) == 0) else y + notchHeight)
# Last point is omitted (because it will be the first point of the next side)
def genVerticalLinePoints(x, y, length, notchHeight, offset):
# Symmetrical with the horizontal version, but with x & y swapped
points = genHorizontalLinePoints(y, x, length, notchHeight, offset)
for y, x in points:
yield (x, y)
| StarcoderdataPython |
8004720 | # Generated from Java9.g4 by ANTLR 4.7.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .Java9Parser import Java9Parser
else:
from Java9Parser import Java9Parser
# This class defines a complete generic visitor for a parse tree produced by Java9Parser.
class Java9Visitor(ParseTreeVisitor):
# Visit a parse tree produced by Java9Parser#literal.
def visitLiteral(self, ctx:Java9Parser.LiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primitiveType.
def visitPrimitiveType(self, ctx:Java9Parser.PrimitiveTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#numericType.
def visitNumericType(self, ctx:Java9Parser.NumericTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#integralType.
def visitIntegralType(self, ctx:Java9Parser.IntegralTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#floatingPointType.
def visitFloatingPointType(self, ctx:Java9Parser.FloatingPointTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#referenceType.
def visitReferenceType(self, ctx:Java9Parser.ReferenceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classOrInterfaceType.
def visitClassOrInterfaceType(self, ctx:Java9Parser.ClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classType.
def visitClassType(self, ctx:Java9Parser.ClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classType_lf_classOrInterfaceType.
def visitClassType_lf_classOrInterfaceType(self, ctx:Java9Parser.ClassType_lf_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classType_lfno_classOrInterfaceType.
def visitClassType_lfno_classOrInterfaceType(self, ctx:Java9Parser.ClassType_lfno_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceType.
def visitInterfaceType(self, ctx:Java9Parser.InterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceType_lf_classOrInterfaceType.
def visitInterfaceType_lf_classOrInterfaceType(self, ctx:Java9Parser.InterfaceType_lf_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceType_lfno_classOrInterfaceType.
def visitInterfaceType_lfno_classOrInterfaceType(self, ctx:Java9Parser.InterfaceType_lfno_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeVariable.
def visitTypeVariable(self, ctx:Java9Parser.TypeVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayType.
def visitArrayType(self, ctx:Java9Parser.ArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#dims.
def visitDims(self, ctx:Java9Parser.DimsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeParameter.
def visitTypeParameter(self, ctx:Java9Parser.TypeParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeParameterModifier.
def visitTypeParameterModifier(self, ctx:Java9Parser.TypeParameterModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeBound.
def visitTypeBound(self, ctx:Java9Parser.TypeBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#additionalBound.
def visitAdditionalBound(self, ctx:Java9Parser.AdditionalBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeArguments.
def visitTypeArguments(self, ctx:Java9Parser.TypeArgumentsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeArgumentList.
def visitTypeArgumentList(self, ctx:Java9Parser.TypeArgumentListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeArgument.
def visitTypeArgument(self, ctx:Java9Parser.TypeArgumentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#wildcard.
def visitWildcard(self, ctx:Java9Parser.WildcardContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#wildcardBounds.
def visitWildcardBounds(self, ctx:Java9Parser.WildcardBoundsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#moduleName.
def visitModuleName(self, ctx:Java9Parser.ModuleNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#packageName.
def visitPackageName(self, ctx:Java9Parser.PackageNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeName.
def visitTypeName(self, ctx:Java9Parser.TypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#packageOrTypeName.
def visitPackageOrTypeName(self, ctx:Java9Parser.PackageOrTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#expressionName.
def visitExpressionName(self, ctx:Java9Parser.ExpressionNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodName.
def visitMethodName(self, ctx:Java9Parser.MethodNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ambiguousName.
def visitAmbiguousName(self, ctx:Java9Parser.AmbiguousNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#compilationUnit.
def visitCompilationUnit(self, ctx:Java9Parser.CompilationUnitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ordinaryCompilation.
def visitOrdinaryCompilation(self, ctx:Java9Parser.OrdinaryCompilationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#modularCompilation.
def visitModularCompilation(self, ctx:Java9Parser.ModularCompilationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#packageDeclaration.
def visitPackageDeclaration(self, ctx:Java9Parser.PackageDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#packageModifier.
def visitPackageModifier(self, ctx:Java9Parser.PackageModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#importDeclaration.
def visitImportDeclaration(self, ctx:Java9Parser.ImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#singleTypeImportDeclaration.
def visitSingleTypeImportDeclaration(self, ctx:Java9Parser.SingleTypeImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeImportOnDemandDeclaration.
def visitTypeImportOnDemandDeclaration(self, ctx:Java9Parser.TypeImportOnDemandDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#singleStaticImportDeclaration.
def visitSingleStaticImportDeclaration(self, ctx:Java9Parser.SingleStaticImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#staticImportOnDemandDeclaration.
def visitStaticImportOnDemandDeclaration(self, ctx:Java9Parser.StaticImportOnDemandDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeDeclaration.
def visitTypeDeclaration(self, ctx:Java9Parser.TypeDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#moduleDeclaration.
def visitModuleDeclaration(self, ctx:Java9Parser.ModuleDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#moduleDirective.
def visitModuleDirective(self, ctx:Java9Parser.ModuleDirectiveContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#requiresModifier.
def visitRequiresModifier(self, ctx:Java9Parser.RequiresModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classDeclaration.
def visitClassDeclaration(self, ctx:Java9Parser.ClassDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#normalClassDeclaration.
def visitNormalClassDeclaration(self, ctx:Java9Parser.NormalClassDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classModifier.
def visitClassModifier(self, ctx:Java9Parser.ClassModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeParameters.
def visitTypeParameters(self, ctx:Java9Parser.TypeParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeParameterList.
def visitTypeParameterList(self, ctx:Java9Parser.TypeParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#superclass.
def visitSuperclass(self, ctx:Java9Parser.SuperclassContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#superinterfaces.
def visitSuperinterfaces(self, ctx:Java9Parser.SuperinterfacesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceTypeList.
def visitInterfaceTypeList(self, ctx:Java9Parser.InterfaceTypeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classBody.
def visitClassBody(self, ctx:Java9Parser.ClassBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classBodyDeclaration.
def visitClassBodyDeclaration(self, ctx:Java9Parser.ClassBodyDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classMemberDeclaration.
def visitClassMemberDeclaration(self, ctx:Java9Parser.ClassMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#fieldDeclaration.
def visitFieldDeclaration(self, ctx:Java9Parser.FieldDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#fieldModifier.
def visitFieldModifier(self, ctx:Java9Parser.FieldModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableDeclaratorList.
def visitVariableDeclaratorList(self, ctx:Java9Parser.VariableDeclaratorListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableDeclarator.
def visitVariableDeclarator(self, ctx:Java9Parser.VariableDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableDeclaratorId.
def visitVariableDeclaratorId(self, ctx:Java9Parser.VariableDeclaratorIdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableInitializer.
def visitVariableInitializer(self, ctx:Java9Parser.VariableInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannType.
def visitUnannType(self, ctx:Java9Parser.UnannTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannPrimitiveType.
def visitUnannPrimitiveType(self, ctx:Java9Parser.UnannPrimitiveTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannReferenceType.
def visitUnannReferenceType(self, ctx:Java9Parser.UnannReferenceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannClassOrInterfaceType.
def visitUnannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannClassType.
def visitUnannClassType(self, ctx:Java9Parser.UnannClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannClassType_lf_unannClassOrInterfaceType.
def visitUnannClassType_lf_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassType_lf_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannClassType_lfno_unannClassOrInterfaceType.
def visitUnannClassType_lfno_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassType_lfno_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannInterfaceType.
def visitUnannInterfaceType(self, ctx:Java9Parser.UnannInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannInterfaceType_lf_unannClassOrInterfaceType.
def visitUnannInterfaceType_lf_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannInterfaceType_lf_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannInterfaceType_lfno_unannClassOrInterfaceType.
def visitUnannInterfaceType_lfno_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannInterfaceType_lfno_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannTypeVariable.
def visitUnannTypeVariable(self, ctx:Java9Parser.UnannTypeVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unannArrayType.
def visitUnannArrayType(self, ctx:Java9Parser.UnannArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodDeclaration.
def visitMethodDeclaration(self, ctx:Java9Parser.MethodDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodModifier.
def visitMethodModifier(self, ctx:Java9Parser.MethodModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodHeader.
def visitMethodHeader(self, ctx:Java9Parser.MethodHeaderContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#result.
def visitResult(self, ctx:Java9Parser.ResultContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodDeclarator.
def visitMethodDeclarator(self, ctx:Java9Parser.MethodDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#formalParameterList.
def visitFormalParameterList(self, ctx:Java9Parser.FormalParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#formalParameters.
def visitFormalParameters(self, ctx:Java9Parser.FormalParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#formalParameter.
def visitFormalParameter(self, ctx:Java9Parser.FormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableModifier.
def visitVariableModifier(self, ctx:Java9Parser.VariableModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#lastFormalParameter.
def visitLastFormalParameter(self, ctx:Java9Parser.LastFormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#receiverParameter.
def visitReceiverParameter(self, ctx:Java9Parser.ReceiverParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#throws_.
def visitThrows_(self, ctx:Java9Parser.Throws_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#exceptionTypeList.
def visitExceptionTypeList(self, ctx:Java9Parser.ExceptionTypeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#exceptionType.
def visitExceptionType(self, ctx:Java9Parser.ExceptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodBody.
def visitMethodBody(self, ctx:Java9Parser.MethodBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#instanceInitializer.
def visitInstanceInitializer(self, ctx:Java9Parser.InstanceInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#staticInitializer.
def visitStaticInitializer(self, ctx:Java9Parser.StaticInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constructorDeclaration.
def visitConstructorDeclaration(self, ctx:Java9Parser.ConstructorDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constructorModifier.
def visitConstructorModifier(self, ctx:Java9Parser.ConstructorModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constructorDeclarator.
def visitConstructorDeclarator(self, ctx:Java9Parser.ConstructorDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#simpleTypeName.
def visitSimpleTypeName(self, ctx:Java9Parser.SimpleTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constructorBody.
def visitConstructorBody(self, ctx:Java9Parser.ConstructorBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#explicitConstructorInvocation.
def visitExplicitConstructorInvocation(self, ctx:Java9Parser.ExplicitConstructorInvocationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumDeclaration.
def visitEnumDeclaration(self, ctx:Java9Parser.EnumDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumBody.
def visitEnumBody(self, ctx:Java9Parser.EnumBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstantList.
def visitEnumConstantList(self, ctx:Java9Parser.EnumConstantListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstant.
def visitEnumConstant(self, ctx:Java9Parser.EnumConstantContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstantModifier.
def visitEnumConstantModifier(self, ctx:Java9Parser.EnumConstantModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumBodyDeclarations.
def visitEnumBodyDeclarations(self, ctx:Java9Parser.EnumBodyDeclarationsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceDeclaration.
def visitInterfaceDeclaration(self, ctx:Java9Parser.InterfaceDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#normalInterfaceDeclaration.
def visitNormalInterfaceDeclaration(self, ctx:Java9Parser.NormalInterfaceDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceModifier.
def visitInterfaceModifier(self, ctx:Java9Parser.InterfaceModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#extendsInterfaces.
def visitExtendsInterfaces(self, ctx:Java9Parser.ExtendsInterfacesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceBody.
def visitInterfaceBody(self, ctx:Java9Parser.InterfaceBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceMemberDeclaration.
def visitInterfaceMemberDeclaration(self, ctx:Java9Parser.InterfaceMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constantDeclaration.
def visitConstantDeclaration(self, ctx:Java9Parser.ConstantDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constantModifier.
def visitConstantModifier(self, ctx:Java9Parser.ConstantModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceMethodDeclaration.
def visitInterfaceMethodDeclaration(self, ctx:Java9Parser.InterfaceMethodDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceMethodModifier.
def visitInterfaceMethodModifier(self, ctx:Java9Parser.InterfaceMethodModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeDeclaration.
def visitAnnotationTypeDeclaration(self, ctx:Java9Parser.AnnotationTypeDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeBody.
def visitAnnotationTypeBody(self, ctx:Java9Parser.AnnotationTypeBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeMemberDeclaration.
def visitAnnotationTypeMemberDeclaration(self, ctx:Java9Parser.AnnotationTypeMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeElementDeclaration.
def visitAnnotationTypeElementDeclaration(self, ctx:Java9Parser.AnnotationTypeElementDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeElementModifier.
def visitAnnotationTypeElementModifier(self, ctx:Java9Parser.AnnotationTypeElementModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#defaultValue.
def visitDefaultValue(self, ctx:Java9Parser.DefaultValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotation.
def visitAnnotation(self, ctx:Java9Parser.AnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#normalAnnotation.
def visitNormalAnnotation(self, ctx:Java9Parser.NormalAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValuePairList.
def visitElementValuePairList(self, ctx:Java9Parser.ElementValuePairListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValuePair.
def visitElementValuePair(self, ctx:Java9Parser.ElementValuePairContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValue.
def visitElementValue(self, ctx:Java9Parser.ElementValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValueArrayInitializer.
def visitElementValueArrayInitializer(self, ctx:Java9Parser.ElementValueArrayInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValueList.
def visitElementValueList(self, ctx:Java9Parser.ElementValueListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#markerAnnotation.
def visitMarkerAnnotation(self, ctx:Java9Parser.MarkerAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#singleElementAnnotation.
def visitSingleElementAnnotation(self, ctx:Java9Parser.SingleElementAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayInitializer.
def visitArrayInitializer(self, ctx:Java9Parser.ArrayInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableInitializerList.
def visitVariableInitializerList(self, ctx:Java9Parser.VariableInitializerListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#block.
def visitBlock(self, ctx:Java9Parser.BlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#blockStatements.
def visitBlockStatements(self, ctx:Java9Parser.BlockStatementsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#blockStatement.
def visitBlockStatement(self, ctx:Java9Parser.BlockStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#localVariableDeclarationStatement.
def visitLocalVariableDeclarationStatement(self, ctx:Java9Parser.LocalVariableDeclarationStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#localVariableDeclaration.
def visitLocalVariableDeclaration(self, ctx:Java9Parser.LocalVariableDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statement.
def visitStatement(self, ctx:Java9Parser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementNoShortIf.
def visitStatementNoShortIf(self, ctx:Java9Parser.StatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementWithoutTrailingSubstatement.
def visitStatementWithoutTrailingSubstatement(self, ctx:Java9Parser.StatementWithoutTrailingSubstatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#emptyStatement.
def visitEmptyStatement(self, ctx:Java9Parser.EmptyStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#labeledStatement.
def visitLabeledStatement(self, ctx:Java9Parser.LabeledStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#labeledStatementNoShortIf.
def visitLabeledStatementNoShortIf(self, ctx:Java9Parser.LabeledStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#expressionStatement.
def visitExpressionStatement(self, ctx:Java9Parser.ExpressionStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementExpression.
def visitStatementExpression(self, ctx:Java9Parser.StatementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ifThenStatement.
def visitIfThenStatement(self, ctx:Java9Parser.IfThenStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ifThenElseStatement.
def visitIfThenElseStatement(self, ctx:Java9Parser.IfThenElseStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ifThenElseStatementNoShortIf.
def visitIfThenElseStatementNoShortIf(self, ctx:Java9Parser.IfThenElseStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#assertStatement.
def visitAssertStatement(self, ctx:Java9Parser.AssertStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchStatement.
def visitSwitchStatement(self, ctx:Java9Parser.SwitchStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchBlock.
def visitSwitchBlock(self, ctx:Java9Parser.SwitchBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchBlockStatementGroup.
def visitSwitchBlockStatementGroup(self, ctx:Java9Parser.SwitchBlockStatementGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchLabels.
def visitSwitchLabels(self, ctx:Java9Parser.SwitchLabelsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchLabel.
def visitSwitchLabel(self, ctx:Java9Parser.SwitchLabelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstantName.
def visitEnumConstantName(self, ctx:Java9Parser.EnumConstantNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#whileStatement.
def visitWhileStatement(self, ctx:Java9Parser.WhileStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#whileStatementNoShortIf.
def visitWhileStatementNoShortIf(self, ctx:Java9Parser.WhileStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#doStatement.
def visitDoStatement(self, ctx:Java9Parser.DoStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forStatement.
def visitForStatement(self, ctx:Java9Parser.ForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forStatementNoShortIf.
def visitForStatementNoShortIf(self, ctx:Java9Parser.ForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#basicForStatement.
def visitBasicForStatement(self, ctx:Java9Parser.BasicForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#basicForStatementNoShortIf.
def visitBasicForStatementNoShortIf(self, ctx:Java9Parser.BasicForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forInit.
def visitForInit(self, ctx:Java9Parser.ForInitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forUpdate.
def visitForUpdate(self, ctx:Java9Parser.ForUpdateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementExpressionList.
def visitStatementExpressionList(self, ctx:Java9Parser.StatementExpressionListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enhancedForStatement.
def visitEnhancedForStatement(self, ctx:Java9Parser.EnhancedForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enhancedForStatementNoShortIf.
def visitEnhancedForStatementNoShortIf(self, ctx:Java9Parser.EnhancedForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#breakStatement.
def visitBreakStatement(self, ctx:Java9Parser.BreakStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#continueStatement.
def visitContinueStatement(self, ctx:Java9Parser.ContinueStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#returnStatement.
def visitReturnStatement(self, ctx:Java9Parser.ReturnStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#throwStatement.
def visitThrowStatement(self, ctx:Java9Parser.ThrowStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#synchronizedStatement.
def visitSynchronizedStatement(self, ctx:Java9Parser.SynchronizedStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#tryStatement.
def visitTryStatement(self, ctx:Java9Parser.TryStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catches.
def visitCatches(self, ctx:Java9Parser.CatchesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catchClause.
def visitCatchClause(self, ctx:Java9Parser.CatchClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catchFormalParameter.
def visitCatchFormalParameter(self, ctx:Java9Parser.CatchFormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catchType.
def visitCatchType(self, ctx:Java9Parser.CatchTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#finally_.
def visitFinally_(self, ctx:Java9Parser.Finally_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#tryWithResourcesStatement.
def visitTryWithResourcesStatement(self, ctx:Java9Parser.TryWithResourcesStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#resourceSpecification.
def visitResourceSpecification(self, ctx:Java9Parser.ResourceSpecificationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#resourceList.
def visitResourceList(self, ctx:Java9Parser.ResourceListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#resource.
def visitResource(self, ctx:Java9Parser.ResourceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableAccess.
def visitVariableAccess(self, ctx:Java9Parser.VariableAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primary.
def visitPrimary(self, ctx:Java9Parser.PrimaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray.
def visitPrimaryNoNewArray(self, ctx:Java9Parser.PrimaryNoNewArrayContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_arrayAccess.
def visitPrimaryNoNewArray_lf_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lf_arrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_arrayAccess.
def visitPrimaryNoNewArray_lfno_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_arrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary.
def visitPrimaryNoNewArray_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary.
def visitPrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary.
def visitPrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classLiteral.
def visitClassLiteral(self, ctx:Java9Parser.ClassLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classInstanceCreationExpression.
def visitClassInstanceCreationExpression(self, ctx:Java9Parser.ClassInstanceCreationExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classInstanceCreationExpression_lf_primary.
def visitClassInstanceCreationExpression_lf_primary(self, ctx:Java9Parser.ClassInstanceCreationExpression_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classInstanceCreationExpression_lfno_primary.
def visitClassInstanceCreationExpression_lfno_primary(self, ctx:Java9Parser.ClassInstanceCreationExpression_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#typeArgumentsOrDiamond.
def visitTypeArgumentsOrDiamond(self, ctx:Java9Parser.TypeArgumentsOrDiamondContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#fieldAccess.
def visitFieldAccess(self, ctx:Java9Parser.FieldAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#fieldAccess_lf_primary.
def visitFieldAccess_lf_primary(self, ctx:Java9Parser.FieldAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#fieldAccess_lfno_primary.
def visitFieldAccess_lfno_primary(self, ctx:Java9Parser.FieldAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayAccess.
def visitArrayAccess(self, ctx:Java9Parser.ArrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayAccess_lf_primary.
def visitArrayAccess_lf_primary(self, ctx:Java9Parser.ArrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayAccess_lfno_primary.
def visitArrayAccess_lfno_primary(self, ctx:Java9Parser.ArrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodInvocation.
def visitMethodInvocation(self, ctx:Java9Parser.MethodInvocationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodInvocation_lf_primary.
def visitMethodInvocation_lf_primary(self, ctx:Java9Parser.MethodInvocation_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodInvocation_lfno_primary.
def visitMethodInvocation_lfno_primary(self, ctx:Java9Parser.MethodInvocation_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#argumentList.
def visitArgumentList(self, ctx:Java9Parser.ArgumentListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodReference.
def visitMethodReference(self, ctx:Java9Parser.MethodReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodReference_lf_primary.
def visitMethodReference_lf_primary(self, ctx:Java9Parser.MethodReference_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#methodReference_lfno_primary.
def visitMethodReference_lfno_primary(self, ctx:Java9Parser.MethodReference_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayCreationExpression.
def visitArrayCreationExpression(self, ctx:Java9Parser.ArrayCreationExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#dimExprs.
def visitDimExprs(self, ctx:Java9Parser.DimExprsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#dimExpr.
def visitDimExpr(self, ctx:Java9Parser.DimExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constantExpression.
def visitConstantExpression(self, ctx:Java9Parser.ConstantExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#expression.
def visitExpression(self, ctx:Java9Parser.ExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#lambdaExpression.
def visitLambdaExpression(self, ctx:Java9Parser.LambdaExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#lambdaParameters.
def visitLambdaParameters(self, ctx:Java9Parser.LambdaParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#inferredFormalParameterList.
def visitInferredFormalParameterList(self, ctx:Java9Parser.InferredFormalParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#lambdaBody.
def visitLambdaBody(self, ctx:Java9Parser.LambdaBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#assignmentExpression.
def visitAssignmentExpression(self, ctx:Java9Parser.AssignmentExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#assignment.
def visitAssignment(self, ctx:Java9Parser.AssignmentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#leftHandSide.
def visitLeftHandSide(self, ctx:Java9Parser.LeftHandSideContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#assignmentOperator.
def visitAssignmentOperator(self, ctx:Java9Parser.AssignmentOperatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#conditionalExpression.
def visitConditionalExpression(self, ctx:Java9Parser.ConditionalExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#conditionalOrExpression.
def visitConditionalOrExpression(self, ctx:Java9Parser.ConditionalOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#conditionalAndExpression.
def visitConditionalAndExpression(self, ctx:Java9Parser.ConditionalAndExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#inclusiveOrExpression.
def visitInclusiveOrExpression(self, ctx:Java9Parser.InclusiveOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#exclusiveOrExpression.
def visitExclusiveOrExpression(self, ctx:Java9Parser.ExclusiveOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#andExpression.
def visitAndExpression(self, ctx:Java9Parser.AndExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#equalityExpression.
def visitEqualityExpression(self, ctx:Java9Parser.EqualityExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#relationalExpression.
def visitRelationalExpression(self, ctx:Java9Parser.RelationalExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#shiftExpression.
def visitShiftExpression(self, ctx:Java9Parser.ShiftExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#additiveExpression.
def visitAdditiveExpression(self, ctx:Java9Parser.AdditiveExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#multiplicativeExpression.
def visitMultiplicativeExpression(self, ctx:Java9Parser.MultiplicativeExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unaryExpression.
def visitUnaryExpression(self, ctx:Java9Parser.UnaryExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#preIncrementExpression.
def visitPreIncrementExpression(self, ctx:Java9Parser.PreIncrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#preDecrementExpression.
def visitPreDecrementExpression(self, ctx:Java9Parser.PreDecrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#unaryExpressionNotPlusMinus.
def visitUnaryExpressionNotPlusMinus(self, ctx:Java9Parser.UnaryExpressionNotPlusMinusContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#postfixExpression.
def visitPostfixExpression(self, ctx:Java9Parser.PostfixExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#postIncrementExpression.
def visitPostIncrementExpression(self, ctx:Java9Parser.PostIncrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#postIncrementExpression_lf_postfixExpression.
def visitPostIncrementExpression_lf_postfixExpression(self, ctx:Java9Parser.PostIncrementExpression_lf_postfixExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#postDecrementExpression.
def visitPostDecrementExpression(self, ctx:Java9Parser.PostDecrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#postDecrementExpression_lf_postfixExpression.
def visitPostDecrementExpression_lf_postfixExpression(self, ctx:Java9Parser.PostDecrementExpression_lf_postfixExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#castExpression.
def visitCastExpression(self, ctx:Java9Parser.CastExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#identifier.
def visitIdentifier(self, ctx:Java9Parser.IdentifierContext):
return self.visitChildren(ctx)
del Java9Parser | StarcoderdataPython |
9758106 | <gh_stars>1-10
import torch
import torch.nn as nn
from pytorch_lightning.utilities.seed import seed_everything
from hyperbox.networks import OFAMobileNetV3, DartsNetwork, ENASMacroGeneralModel, ENASMicroNetwork, BaseNASNetwork
from hyperbox.mutator import RandomMutator
from hyperbox.utils.utils import load_json
from hyperbox.mutables.ops import Conv2d, BatchNorm2d, FinegrainedModule, Linear
from hyperbox.mutables.spaces import InputSpace, OperationSpace, ValueSpace
from hyperbox.utils.metrics import accuracy
def is_module_equal(m1, m2):
count = 0
for (name1, p1), (name2, p2) in zip(m1.state_dict().items(), m2.state_dict().items()):
loss = (p1-p2).abs().sum()
if loss != 0:
count += 1
print(name1, name2)
if count != 0:
return False
return True
class Net(BaseNASNetwork):
def __init__(self, mask=None):
from hyperbox.networks.darts import DartsCell
super().__init__()
ops1 = [
nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1),
nn.Conv2d(3,8,kernel_size=5,stride=1,padding=2),
]
ops2 = [
nn.Conv2d(3,8,kernel_size=3,stride=1,padding=1),
nn.Conv2d(3,8,kernel_size=5,stride=1,padding=2),
]
self.candidate_op1 = OperationSpace(ops1, key='candidate1', mask=mask)
self.candidate_op2 = OperationSpace(ops2, key='candidate2', mask=mask)
num_nodes = 1
out_channels = int(8 / num_nodes)
self.cell = DartsCell(num_nodes, 8, 8, out_channels, False, False, mask=mask)
self.input_op = InputSpace(n_candidates=3, n_chosen=1, key='input1', mask=mask)
v1 = ValueSpace([1,2,4,8], key='v1', mask=mask)
v2 = ValueSpace([1,2], key='v2', mask=mask)
v3 = ValueSpace([1,3], key='v3', mask=mask)
self.fop1 = Conv2d(8, v1, kernel_size=v3,stride=v2,padding=1,auto_padding=True)
self.fop2 = Conv2d(v1,v1,3,1,1)
self.fop3 = BatchNorm2d(v1)
self.fc = Linear(v1, NUM_CLASSES, bias=False)
def forward(self, x):
bs = x.shape[0]
out1 = self.candidate_op1(x)
out2 = self.candidate_op2(x)
out3 = self.cell(out1, out2)
out = self.input_op([out1, out2, out3])
out = self.fop1(out)
out = self.fop2(out)
out = self.fop3(out)
out = nn.AdaptiveAvgPool2d(1)(out)
out = out.view(bs,-1)
out = self.fc(out)
return out
NUM_CLASSES = 5
def test_case(net_cls, *args, **kwargs):
# try:
x = torch.rand(32,3,64,64)
y = torch.randint(0, NUM_CLASSES, (32,))
print("="*20)
name = net_cls.__name__
# print(f"{name} start")
net = net_cls(*args, **kwargs)
# print(f"{name} init pass")
m = RandomMutator(net)
m.reset()
net.eval()
y1 = net(x)
acc1 = accuracy(y1,y)
mask_file = f'{name}.json'
m.save_arch(mask_file)
# print(f"{name} save arch pass")
mask = load_json(mask_file)
subnet = net.build_subnet(mask)
origin_subnet_state = subnet.state_dict()
subnet.eval()
# print(f"{name} build_subnet pass")
# check whether load state dict successfully
# way 1
subnet.load_from_supernet(net.state_dict())
# print(f"{name} load subnet from supernet pass")
updated_subnet_state = subnet.state_dict()
y2 = subnet(x)
acc2 = accuracy(y2, y)
subnet.load_state_dict(origin_subnet_state)
y3 = subnet(x)
acc3 = accuracy(y3, y)
if acc1!=acc2 or acc1!=acc3 or acc2!=acc3:
print(acc1, acc2, acc3)
print(f"{name} wrong")
for idx, op in enumerate([subnet.candidate_op1, subnet.candidate_op2]):
idx += 1
index = op.index
print(f'c{idx}', is_module_equal(op, eval(f"net.candidate_op{idx}")[index]))
for idx, node in enumerate(subnet.cell.mutable_ops):
for jdx, op in enumerate(node.ops):
index = op.index
pop = net.cell.mutable_ops[idx].ops[jdx][index]
print(f"node{idx}-op{jdx}", is_module_equal(op, pop))
pass
else:
# print(f"{name} pass")
pass
if __name__ == '__main__':
for i in range(2):
seed_everything(i+999)
# test_case(Net)
test_case(OFAMobileNetV3, num_classes=NUM_CLASSES)
test_case(DartsNetwork, 3, 16, NUM_CLASSES, 1) # 'cells.0.mutable_ops.0.ops.0.candidates.0.bn.running_mean'
test_case(ENASMacroGeneralModel, num_classes=NUM_CLASSES) # layers.0.mutable.candidates.0.conv.weight
# test_case(ENASMicroNetwork, num_classes=NUM_CLASSES) # 'layers.0.nodes.0.cell_x.op_choice.candidates.1.conv.depthwise.weight' | StarcoderdataPython |
64444 | import unittest
from pyparsing import ParseException
from media_management_scripts.support.search_parser import parse_and_execute, parse
class ParseTestCase():
def parse(self, query, expected, context={}):
self.assertEqual(parse_and_execute(query, context), expected)
class SimpleTest(unittest.TestCase, ParseTestCase):
def test_basic(self):
self.parse('1+1', 2)
self.parse('1-1', 0)
self.parse('-1-2', -3)
self.parse('3*2', 6)
self.parse('10/2', 5)
def test_whitespace(self):
self.parse(' 1 + 1 ', 2)
self.parse(' 1 + 1 ', 2)
def test_order_of_operations(self):
self.parse('1+2*3', 7)
self.parse('2*3+1', 7)
self.parse('(1+2)*3', 9)
def test_boolean(self):
self.parse('true', True)
self.parse('false', False)
self.parse('true and true', True)
self.parse('true and false', False)
self.parse('True and False', False)
self.parse('true or false', True)
self.parse('not true', False)
self.parse('not false', True)
def test_boolean_order_of_operations(self):
self.parse('true and true or false', True)
self.parse('not false and false', False)
self.parse('not false or false', True)
self.parse('1 in [1] or false', True)
self.parse('1 in [1] and false', False)
def test_comparison(self):
self.parse('1 = 1', True)
self.parse('1 != 1', False)
self.parse('1 != 2', True)
self.parse('1 > 1', False)
self.parse('2 > 1', True)
self.parse('1 < 1', False)
self.parse('1 >= 1', True)
self.parse('1 <= 1', True)
def test_in(self):
self.parse('1 in [1]', True)
self.parse('1 in [1,2]', True)
self.parse('2 in [1]', False)
def test_basic_context(self):
self.parse('a', 2, {'a': 2})
self.parse('a+1', 3, {'a': 2})
self.parse('a.b+1', 3, {'a': {'b': 2}})
def test_reuse(self):
op = parse('a+1')
self.assertEqual(2, op.exec({'a': 1}))
self.assertEqual(3, op.exec({'a': 2}))
def test_invalid(self):
with self.assertRaises(ParseException):
parse('True and')
with self.assertRaises(ParseException):
parse('1+')
def test_isNull(self):
self.parse('isNull(1)', False)
self.parse('isNull(a)', True, {'a': None})
self.parse('not isNull(a)', True, {'a': 1})
def test_all(self):
self.parse('a = 1', True, {'a': [1, 2]})
self.parse('all(a) = 1', True, {'a': [1, 1]})
self.parse('all(a) = 1', False, {'a': [1, 2]})
self.parse('all(a) != 1', True, {'a': [1, 2]})
self.parse('all(a) != 1', False, {'a': [1, 1]})
def test_string(self):
self.parse('"test"', 'test')
self.parse('test', 'test')
self.parse('"test test"', 'test test')
self.parse('"test test"', 'test test')
self.parse('"test test" = "test test"', True)
| StarcoderdataPython |
1881287 | <filename>dot_vim/plugged/vim-devicons/rplugin/python3/denite/filter/devicons_denite_converter.py
# -*- coding: utf-8 -*-
# vim:se fenc=utf8 noet:
from .base import Base
from os.path import isdir
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'devicons_denite_converter'
self.description = 'add devicons in front of candidates'
def filter(self, context):
for candidate in context['candidates']:
if 'bufnr' in candidate:
bufname = self.vim.funcs.bufname(candidate['bufnr'])
filename = self.vim.funcs.fnamemodify(bufname, ':p:t')
elif 'word' in candidate and 'action__path' in candidate:
filename = candidate['word']
icon = self.vim.funcs.WebDevIconsGetFileTypeSymbol(
filename, isdir(filename))
# Customize output format if not done already.
if icon not in candidate.get('abbr', '')[:10]:
candidate['abbr'] = ' {} {}'.format(
icon, candidate.get('abbr', candidate['word']))
return context['candidates']
| StarcoderdataPython |
8064120 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ["MKL_THREADING_LAYER"] = "GNU"
import numpy as np
import numpy.random as npr
import tensorflow as tf
import pickle
import sys
import argparse
import traceback
def test_env_use_true_dynamic():
import gym
#env = gym.make('Pusher-v0')
#env = gym.make('FetchPush-v1')
env = gym.make('CartPole-v1')
from ipdb import set_trace;
set_trace()
for i in range(2000):
env.reset()
for t in range(100):
env.render()
a = env.env.action_space.sample()
o, r, done, env_info = env.step(a) #### step() in mb_env.py ####
# if done:
# print('done in i and t:', done, i, t)
# break
from ipdb import set_trace;
set_trace()
if __name__ == '__main__':
# test_env()
test_env_use_true_dynamic()
| StarcoderdataPython |
6575420 | <reponame>anubhab-code/Competitive-Programming
def lovefunc(flower, flower2):
return (flower + flower2) % 2 != 0 | StarcoderdataPython |
5087370 | import numpy as np
class LinearRegression(object):
def __init__(self, fit_intercept=True, copy_X=True):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self._coef = None
self._intercept = None
self._new_X = None
def fit(self, X, y):
pass
def predict(self, X):
pass
@property
def coef(self):
return self._coef
@property
def intercept(self):
return self._intercept
| StarcoderdataPython |
124796 | <filename>baekjoon/9012/valid_parenthesis_string.py
import sys
input = lambda: sys.stdin.readline().rstrip()
for _ in range(int(input())):
left_paren_count = 0
for c in input():
if c == "(":
left_paren_count += 1
elif c == ")":
left_paren_count -= 1
if left_paren_count < 0:
break
print("NO" if left_paren_count else "YES")
| StarcoderdataPython |
196724 | <filename>FictionTools/amitools/amitools/vamos/cfgcore/trafo.py
class DictTrafo(object):
def __init__(self, trafo_dict=None, prefix=None):
if trafo_dict is None:
trafo_dict = {}
self.trafo_dict = trafo_dict
if type(prefix) is str:
self.prefix = (prefix,)
elif type(prefix) is tuple:
self.prefix = prefix
else:
self.prefix = None
def transform(self, in_dict, trafo_dict=None, keep_none=False):
if trafo_dict is None:
trafo_dict = self.trafo_dict
res = {}
for key in trafo_dict:
val = trafo_dict[key]
tval = type(val)
# sub dict
if tval is dict:
vres = self.transform(in_dict, val)
# (callable, rel_path)
elif tval is tuple and len(val) == 2 and callable(val[0]):
rel_path = self.read_rel_path(val[1], in_dict)
vres = val[0](key, rel_path)
# a rel_path in in_dict
elif tval in (str, tuple, list):
vres = self.read_rel_path(val, in_dict)
# invalid
else:
raise ValueError("invalid type in trafo_dict: %s" + val)
if vres is not None or keep_none:
res[key] = vres
return res
def read_rel_path(self, path, in_dict):
abs_path = []
if self.prefix:
abs_path += self.prefix
if type(path) is str:
abs_path.append(path)
else:
abs_path += path
return self.read_path(abs_path, in_dict)
def read_path(self, path, in_dict):
if len(path) == 0:
return in_dict
if type(in_dict) is not dict:
return None
key = path[0]
if key in in_dict:
val = in_dict[key]
path = path[1:]
return self.read_path(path, val)
| StarcoderdataPython |
1796755 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
('abbreviation', models.CharField(help_text=b'Assessment abbreviation', max_length=250)),
('version', models.CharField(help_text=b'version', max_length=10)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasConcept',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
('definition', models.CharField(default=None, max_length=200)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasTask',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.CharField(max_length=500)),
('label', models.CharField(help_text=b'question unique label', unique=True, max_length=250)),
('required', models.BooleanField(default=True, verbose_name=b'Required', choices=[(False, b'Not required'), (True, b'Required')])),
('data_type', models.CharField(help_text=b'Data type of the question answer', max_length=200, verbose_name=b'Data Type', choices=[(b'LONGINT', b'Long Integer'), (b'DATETIME', b'Date/Time'), (b'TEXT', b'Text'), (b'INT', b'Integer'), (b'DOUBLE', b'Double')])),
('options', models.CharField(default=None, max_length=500)),
('assessment', models.ForeignKey(to='assessments.Assessment')),
('cognitive_atlas_concept', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Concept', to='assessments.CognitiveAtlasConcept', help_text=b"Concept defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QuestionOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numerical_score', models.IntegerField()),
('text', models.CharField(max_length=250)),
('questions', models.ManyToManyField(to='assessments.Question')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='assessment',
name='cognitive_atlas_task',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Task', to='assessments.CognitiveAtlasTask', help_text=b"Assessment defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True),
preserve_default=True,
),
]
| StarcoderdataPython |
1994585 | <filename>src/trans_len.py
#!/usr/bin/env python3
import csv
import argparse
FLAG = None
def write_file(feats,lab_list, fn):
with open(fn,'w') as f:
for num, i in enumerate(feats):
for j in range(len(i)):
f.write(str(i[j]) + ',')
f.write(str([len(i)-1]) + '\n')
return
def transform(feats, lens):
dim = FLAG.feat_dim
trans_feats = []
for i in range(len(feats)):
trans_feats.append(feats[i][:single_len[lens[i]*dim]])
return trans_feats
def read_feat(fn):
feats = []
labs = []
with open(fn,'r') as f:
reader = csv.reader(f)
for row in reader:
feats.append(list(map(float,row[:-1])))
labs.append(float(row[-1]))
return feats, labs
def read_len(fn):
len_list = []
with open(fn,'r') as f:
for line in f:
len_list.append(int(line.rstrip()))
return len_list
def main():
len_list = read_len(FLAG.len_file)
ark_list, lab_list = read_feat(FLAG.ark_file)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Transfrom the fulfilled zeros to no')
parser.add_argument('--feat_dim',type=int, default=39,
help='each frame feat dimension')
parser.add_argument('ark_file',
help='the transforming ark file')
parser.add_argument('len_file',
help='meaning the length of each utterance')
parser.add_argument('out_ark',
help='the output file')
FLAG = parser.parse_args()
main()
| StarcoderdataPython |
1853095 | <filename>plots/create_plots.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
* Copyright (c) 2017, <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
'''
import csv
import math
import numpy as np
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
def parse_csv(file):
ifile = open(file, "rb")
reader = csv.reader(ifile, delimiter=';')
values = []
for row in reader:
values.append(int(row[6]))
ifile.close()
return values
def parse_all(path):
values = [];
for f in listdir(path):
file = join(path, f)
if isfile(file) and file.endswith(".log"):
values.extend(parse_csv(file))
return values
def hist_txt(x):
return "mean= %0.3f\nmax= %d\nmin= %d\nmedian= %d" % (np.mean(x), max(x), min(x), np.median(x))
def plot_hist(x, fname):
f = plt.figure(figsize=(11,5))
plt.hist(x, range(min(x), max(x)+1), normed=True)
plt.ylabel("relative frequency")
plt.xlabel("number of required faulty signatures")
#plt.figtext(.7, .7, hist_txt(x))
table_data = [["mean", "%.1f" %np.mean(x)],["max", max(x)],["min", min(x)],["median","%d"%np.median(x)]]
table = plt.table(cellText=table_data, colWidths = [0.05,0.05],
loc='center right')
table.scale(1.8,1.8)
plt.tight_layout()
f.savefig(fname)
def scatter_txt(x,y):
txt = ""
for p, d in zip(x, y):
txt += "\np=%02d mean=%.3f" % (p, d)
return txt
def plot_scatter(x, y, fname):
f = plt.figure(figsize=(11,5))
plt.plot(x,y, "x", ms=18.0)
plt.ylabel("number of required faulty signatures")
plt.xlabel("number of forgery trials p")
plt.ylim(ymin=0)
plt.ylim(ymax=math.ceil(max(y)+2))
plt.tight_layout()
plt.figtext(.1, .2, scatter_txt(x, y))
#plt.show()
f.savefig(fname)
def plot_boxplot(x, y, fname):
f = plt.figure(figsize=(11,5))
plt.ylabel("number of required faulty signatures")
plt.xlabel("number of forgery trials p")
plt.boxplot(y, labels=x, positions=x,showfliers=False)
#plt.figtext(.8, .65,scatter_txt(x,[np.mean(a) for a in y]));
table_data = zip(x,map(lambda y: "%.1f"%np.mean(y), y))
table_cols = ["p", "mean"]
table = plt.table(cellText=table_data,
colLabels=table_cols, loc='center right',bbox=[0.78, 0.55, 0.20, 0.4])
plt.tight_layout()
f.savefig(fname)
plt.style.use('classic')
data_P_1_N_32 = parse_all("../data/P_1/N_32")
plot_hist(data_P_1_N_32,"FA_P_1_N_32.pdf")
data_P_1_N_64 = parse_all("../data/P_1/N_64")
plot_hist(data_P_1_N_64,"FA_P_1_N_64.pdf")
data_P_1 = data_P_1_N_32[:1000]
data_P_2 = parse_all("../data/P_NOT1/P_2")[:1000]
data_P_4 = parse_all("../data/P_NOT1/P_4")[:1000]
data_P_8 = parse_all("../data/P_NOT1/P_8")[:1000]
data_P_16 = parse_all("../data/P_NOT1/P_16")[:1000]
data_P_24 = parse_all("../data/P_NOT1/P_24")[:1000]
data_P_32 = parse_all("../data/P_NOT1/P_32")[:1000]
p = [1,2,4,8,16,24,32]
d = [np.mean(data_P_1), np.mean(data_P_2), np.mean(data_P_4), np.mean(data_P_8), np.mean(data_P_16), np.mean(data_P_24), np.mean(data_P_32)]
plot_scatter(p,d, "FA_P_1_2_4_8_16_32.pdf")
d = [data_P_1, data_P_2, data_P_4, data_P_8, data_P_16, data_P_24, data_P_32]
plot_boxplot(p, d, "FA_P_boxplot.pdf")
| StarcoderdataPython |
9773293 | <gh_stars>1-10
import json
from urllib.parse import urlparse, urlunparse
import pytest
import sirius_sdk
from sirius_sdk.agent.connections import Endpoint
from sirius_sdk.agent.aries_rfc.feature_0160_connection_protocol.state_machines import Inviter, Invitee, \
ConnRequest, Invitation
from .helpers import run_coroutines, IndyAgent, ServerTestSuite
def replace_url_components(url: str, base: str = None) -> str:
ret = url
if base:
parsed = urlparse(url)
components = list(parsed)
components[1] = urlparse(base).netloc
ret = urlunparse(components)
return ret
async def read_events(uri: str, credentials: bytes, p2p: sirius_sdk.P2PConnection):
async with sirius_sdk.context(uri, credentials, p2p):
listener = await sirius_sdk.subscribe()
async for event in listener:
print('========= EVENT ============')
print(json.dumps(event, indent=2, sort_keys=True))
print('============================')
async def run_inviter(
uri: str, credentials: bytes, p2p: sirius_sdk.P2PConnection, expected_connection_key: str,
me: sirius_sdk.Pairwise.Me = None, replace_endpoints: bool = False, did_doc_extra: dict = None
):
async with sirius_sdk.context(uri, credentials, p2p):
endpoints_ = await sirius_sdk.endpoints()
my_endpoint = [e for e in endpoints_ if e.routing_keys == []][0]
if replace_endpoints:
new_address = replace_url_components(my_endpoint.address, pytest.test_suite_overlay_address)
my_endpoint = Endpoint(new_address, my_endpoint.routing_keys, is_default=my_endpoint.is_default)
listener = await sirius_sdk.subscribe()
async for event in listener:
connection_key = event['recipient_verkey']
if expected_connection_key == connection_key:
request = event['message']
assert isinstance(request, ConnRequest)
if replace_endpoints:
request['connection']['did_doc']['service'][0]['serviceEndpoint'] = replace_url_components(
request['connection']['did_doc']['service'][0]['serviceEndpoint'],
pytest.old_agent_overlay_address
)
# Setup state machine
if me is None:
my_did, my_verkey = await sirius_sdk.DID.create_and_store_my_did()
me = sirius_sdk.Pairwise.Me(did=my_did, verkey=my_verkey)
# create connection
machine = Inviter(me, connection_key, my_endpoint)
ok, pairwise = await machine.create_connection(request, did_doc_extra)
assert ok is True
await sirius_sdk.PairwiseList.ensure_exists(pairwise)
pass
async def run_invitee(
uri: str, credentials: bytes, p2p: sirius_sdk.P2PConnection,
invitation: Invitation, my_label: str, me: sirius_sdk.Pairwise.Me = None,
replace_endpoints: bool = False, did_doc_extra: dict = None
):
async with sirius_sdk.context(uri, credentials, p2p):
if me is None:
my_did, my_verkey = await sirius_sdk.DID.create_and_store_my_did()
me = sirius_sdk.Pairwise.Me(did=my_did, verkey=my_verkey)
endpoints_ = await sirius_sdk.endpoints()
my_endpoint = [e for e in endpoints_ if e.routing_keys == []][0]
if replace_endpoints:
new_address = replace_url_components(my_endpoint.address, pytest.test_suite_overlay_address)
my_endpoint = Endpoint(new_address, my_endpoint.routing_keys, is_default=my_endpoint.is_default)
new_address = replace_url_components(invitation['serviceEndpoint'], pytest.old_agent_overlay_address)
invitation['serviceEndpoint'] = new_address
# Create and start machine
machine = Invitee(me, my_endpoint)
ok, pairwise = await machine.create_connection(invitation=invitation, my_label=my_label, did_doc=did_doc_extra)
assert ok is True
await sirius_sdk.PairwiseList.ensure_exists(pairwise)
@pytest.mark.asyncio
async def test_establish_connection(test_suite: ServerTestSuite):
inviter = test_suite.get_agent_params('agent1')
invitee = test_suite.get_agent_params('agent2')
# Get endpoints
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
inviter_endpoint_address = [e for e in await sirius_sdk.endpoints() if e.routing_keys == []][0].address
connection_key = await sirius_sdk.Crypto.create_key()
invitation = Invitation(label='Inviter', endpoint=inviter_endpoint_address, recipient_keys=[connection_key])
# Init Me
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
inviter_me = sirius_sdk.Pairwise.Me(did, verkey)
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
invitee_me = sirius_sdk.Pairwise.Me(did, verkey)
await run_coroutines(
run_inviter(
inviter['server_address'], inviter['credentials'], inviter['p2p'], connection_key, inviter_me
),
run_invitee(
invitee['server_address'], invitee['credentials'], invitee['p2p'], invitation, 'Invitee', invitee_me
)
)
# Check for Inviter
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
pairwise = await sirius_sdk.PairwiseList.load_for_verkey(invitee_me.verkey)
assert pairwise is not None
assert pairwise.their.did == invitee_me.did
# Check for Invitee
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
pairwise = await sirius_sdk.PairwiseList.load_for_verkey(inviter_me.verkey)
assert pairwise is not None
assert pairwise.their.did == inviter_me.did
@pytest.mark.asyncio
async def test_update_pairwise_metadata(test_suite: ServerTestSuite):
inviter = test_suite.get_agent_params('agent1')
invitee = test_suite.get_agent_params('agent2')
# Get endpoints
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
inviter_endpoint_address = [e for e in await sirius_sdk.endpoints() if e.routing_keys == []][0].address
connection_key = await sirius_sdk.Crypto.create_key()
invitation = Invitation(label='Inviter', endpoint=inviter_endpoint_address, recipient_keys=[connection_key])
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
invitee_endpoint_address = [e for e in await sirius_sdk.endpoints() if e.routing_keys == []][0].address
# Init Me
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
inviter_side = sirius_sdk.Pairwise.Me(did, verkey)
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
invitee_side = sirius_sdk.Pairwise.Me(did, verkey)
# Manually set pairwise list
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
await sirius_sdk.DID.store_their_did(invitee_side.did, invitee_side.verkey)
p = sirius_sdk.Pairwise(
me=inviter_side,
their=sirius_sdk.Pairwise.Their(
invitee_side.did, 'Invitee', invitee_endpoint_address, invitee_side.verkey
)
)
await sirius_sdk.PairwiseList.create(p)
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
await sirius_sdk.DID.store_their_did(inviter_side.did, inviter_side.verkey)
p = sirius_sdk.Pairwise(
me=invitee_side,
their=sirius_sdk.Pairwise.Their(
inviter_side.did, 'Inviter', inviter_endpoint_address, inviter_side.verkey
)
)
await sirius_sdk.PairwiseList.create(p)
await run_coroutines(
run_inviter(
inviter['server_address'], inviter['credentials'], inviter['p2p'], connection_key, inviter_side
),
run_invitee(
invitee['server_address'], invitee['credentials'], invitee['p2p'], invitation, 'Invitee', invitee_side
)
)
# Check for Inviter
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
pairwise = await sirius_sdk.PairwiseList.load_for_did(invitee_side.did)
assert pairwise.metadata != {}
assert pairwise.metadata is not None
# Check for Invitee
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
pairwise = await sirius_sdk.PairwiseList.load_for_did(inviter_side.did)
assert pairwise.metadata != {}
assert pairwise.metadata is not None
@pytest.mark.skip
@pytest.mark.asyncio
async def test_invitee_back_compatibility(indy_agent: IndyAgent, test_suite: ServerTestSuite):
their_invitaton = await indy_agent.create_invitation(label='Test Invitee')
invitation = Invitation.from_url(their_invitaton['url'])
invitee = test_suite.get_agent_params('agent1')
# Init invitee
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
invitee_side = sirius_sdk.Pairwise.Me(did, verkey)
await run_coroutines(
run_invitee(
invitee['server_address'], invitee['credentials'], invitee['p2p'], invitation, 'Invitee', invitee_side, True
),
read_events(
invitee['server_address'], invitee['credentials'], invitee['p2p']
)
)
invitation_pairwise = None
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
async for i, pairwise in sirius_sdk.PairwiseList.enumerate():
if pairwise.me.did == invitee_side.did:
invitation_pairwise = pairwise
break
assert invitation_pairwise is not None
@pytest.mark.skip
@pytest.mark.asyncio
async def test_inviter_back_compatibility(indy_agent: IndyAgent, test_suite: ServerTestSuite, agent1: sirius_sdk.Agent):
inviter = test_suite.get_agent_params('agent1')
# Init inviter
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
inviter_endpoint_address = [e for e in await sirius_sdk.endpoints() if e.routing_keys == []][0].address
connection_key = await sirius_sdk.Crypto.create_key()
inviter_endpoint_address = replace_url_components(inviter_endpoint_address, pytest.test_suite_overlay_address)
invitation = Invitation(label='Inviter', endpoint=inviter_endpoint_address, recipient_keys=[connection_key])
invitation_url = invitation.invitation_url
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
inviter_side = sirius_sdk.Pairwise.Me(did, verkey)
await run_coroutines(
run_inviter(
inviter['server_address'], inviter['credentials'], inviter['p2p'], connection_key, inviter_side, True
),
indy_agent.invite(invitation_url=invitation_url),
)
invitated_pairwise = None
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
async for i, p in sirius_sdk.PairwiseList.enumerate():
assert isinstance(p, sirius_sdk.Pairwise)
if p.me.did == inviter_side.did:
invitated_pairwise = p
break
assert invitated_pairwise is not None
@pytest.mark.asyncio
async def test_did_doc_extra_fields(test_suite: ServerTestSuite):
inviter = test_suite.get_agent_params('agent1')
invitee = test_suite.get_agent_params('agent2')
# Get endpoints
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
inviter_endpoint_address = [e for e in await sirius_sdk.endpoints() if e.routing_keys == []][0].address
connection_key = await sirius_sdk.Crypto.create_key()
invitation = Invitation(label='Inviter', endpoint=inviter_endpoint_address, recipient_keys=[connection_key])
# Init Me
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
inviter_me = sirius_sdk.Pairwise.Me(did, verkey)
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
did, verkey = await sirius_sdk.DID.create_and_store_my_did()
invitee_me = sirius_sdk.Pairwise.Me(did, verkey)
await run_coroutines(
run_inviter(
inviter['server_address'], inviter['credentials'], inviter['p2p'], connection_key, inviter_me,
did_doc_extra={
'creator': {'@id': 'uuid-xxx-yyy'},
'extra': 'Any'
}
),
run_invitee(
invitee['server_address'], invitee['credentials'], invitee['p2p'], invitation, 'Invitee', invitee_me,
did_doc_extra={
'creator': {'@id': 'uuid-www-zzz'},
'extra': 'Test'
}
)
)
# Check for Inviter
async with sirius_sdk.context(inviter['server_address'], inviter['credentials'], inviter['p2p']):
pairwise = await sirius_sdk.PairwiseList.load_for_verkey(invitee_me.verkey)
assert pairwise is not None
assert pairwise.their.did == invitee_me.did
assert pairwise.me.did_doc is not None
assert pairwise.me.did_doc.get('creator', {}) == {'@id': 'uuid-xxx-yyy'}
assert pairwise.me.did_doc.get('extra', None) == 'Any'
assert pairwise.their.did_doc is not None
assert pairwise.their.did_doc.get('creator', {}) == {'@id': 'uuid-www-zzz'}
assert pairwise.their.did_doc.get('extra', None) == 'Test'
# Check for Invitee
async with sirius_sdk.context(invitee['server_address'], invitee['credentials'], invitee['p2p']):
pairwise = await sirius_sdk.PairwiseList.load_for_verkey(inviter_me.verkey)
assert pairwise is not None
assert pairwise.their.did == inviter_me.did
assert pairwise.me.did_doc is not None
assert pairwise.me.did_doc.get('creator', {}) == {'@id': 'uuid-www-zzz'}
assert pairwise.me.did_doc.get('extra', None) == 'Test'
assert pairwise.their.did_doc is not None
assert pairwise.their.did_doc.get('creator', {}) == {'@id': 'uuid-xxx-yyy'}
assert pairwise.their.did_doc.get('extra', None) == 'Any'
| StarcoderdataPython |
6671821 | import unittest
import time
from pybarker.utils.redis import SharedStorage
class Test(unittest.TestCase):
APP_REDIS_CONNECTION = 'redis://localhost:6379/3'
def setUp(self):
self.shared_storage = SharedStorage(
self.APP_REDIS_CONNECTION,
)
self.shared_storage2 = SharedStorage(
self.APP_REDIS_CONNECTION,
)
def test_retry_timeout(self):
valint = int(time.time() * 1000)
valstr = str(valint)
valobj = {"str": valstr, "int": valint}
print("valobj:", valobj)
self.shared_storage.BEN = valstr
self.assertEqual(self.shared_storage.BEN, valstr)
self.shared_storage.BEN = valint
self.assertEqual(self.shared_storage.BEN, valint)
self.shared_storage.BEN = valobj
self.assertEqual(self.shared_storage.BEN, valobj)
self.shared_storage.BEN = None
self.assertEqual(self.shared_storage.BEN, None)
# other instance storage
self.assertEqual(self.shared_storage2.BEN, None)
self.shared_storage.BEN = valstr
self.assertEqual(self.shared_storage2.BEN, valstr)
self.shared_storage.BEN = valint
self.assertEqual(self.shared_storage2.BEN, valint)
self.shared_storage.BEN = valobj
self.assertEqual(self.shared_storage2.BEN, valobj)
self.shared_storage.BEN = None
self.assertEqual(self.shared_storage2.BEN, None)
# missing
self.assertEqual(self.shared_storage.BEN666, None)
# set, get
self.shared_storage.set("BEN", valint)
self.assertEqual(self.shared_storage.get("BEN"), valint)
self.assertEqual(self.shared_storage.get("BEN666"), None)
self.assertEqual(self.shared_storage.get("BEN666", 666), 666)
self.shared_storage.set("BEN", valobj)
self.assertEqual(self.shared_storage.get("BEN"), valobj)
# del
self.shared_storage.BEN = valstr
self.assertEqual(self.shared_storage.BEN, valstr)
del self.shared_storage.BEN
self.assertEqual(self.shared_storage.BEN, None)
# del + other instance storage
self.assertEqual(self.shared_storage2.BEN, None)
self.shared_storage.BEN = valstr
self.assertEqual(self.shared_storage2.BEN, valstr)
self.shared_storage.delete("BEN")
self.assertEqual(self.shared_storage2.BEN, None)
# del silent
del self.shared_storage.BEN666
self.shared_storage.delete("BEN666")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8114659 | """
ChainRad
========
File: model training
"""
# Standard library imports
from os import listdir, mkdir
from os.path import isdir, isfile, join
import pickle
from tqdm import tqdm
# 3rd party imports
from PIL import Image
import torch
# Project level imports
from core import IMG_DIR, LOG_DIR, MODEL_DIR, OUT_DIR, SoloClassifier
from core import check_and_get_basics, get_accuracy, get_data_in_batches
from core import get_headless_models
from core import get_training_transformer
# Training parameters
BATCH_SIZE = 128
LEARNING_RATE = 5e-6
MAX_EPOCHS = 200
# Detecting device availability
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
"""
Provides main functionality
===========================
Raises
------
RuntimeError
When the folder of raw dataset images doesn't exist.
"""
print('Device "{}" will be used for deep neural network operations.'
.format(DEVICE))
if not isdir(IMG_DIR):
raise RuntimeError('Image folder missing, please download the dataset' +
' or copy/move it to the IMG_DIR folder.')
original_images = [f for f in listdir(IMG_DIR) if isfile(join(IMG_DIR, f))]
if not isdir(OUT_DIR):
mkdir(OUT_DIR)
headless_outputs = [f for f in listdir(IMG_DIR) if isfile(join(OUT_DIR, f))]
new_files = list(set(original_images).intersection(set(headless_outputs)))
if len(new_files) > 0:
print('{} files doesn\'t have headless output. Let\'s create them.'
.format(len(new_files)))
save_headless_outputs(new_files)
train_binary_classifiers()
def save_headless_outputs(imagelist : list):
"""
Save headless output of raw images
==================================
Parameters
----------
imagelist : list
List of raw images to save as the concatenation of the outputs of the
headless models.
"""
# pylint: disable=no-member
# toch has a member function cat()
# Link: https://pytorch.org/docs/stable/generated/torch.cat.html
headless = get_headless_models()
for value in headless.values():
value.to(DEVICE)
transform = get_training_transformer()
for filename in tqdm(imagelist, unit='image'):
name_root = filename.split('.')[0]
img = Image.open(join(IMG_DIR, filename)).convert('RGB')
img = transform(img).unsqueeze(0).to(DEVICE)
flats = []
for value in headless.values():
flats.append(value(img).squeeze(0).detach().cpu())
final = torch.cat(flats)
with open(join(OUT_DIR, '{}.out'.format(name_root)), 'wb') as outstream:
pickle.dump(final, outstream)
def train_binary_classifiers():
"""
Train binary classifiers
========================
See also
--------
Error codes : check_prerequisites_and_get_basics()
"""
# pylint: disable=too-many-statements
# Breaking this function to functions doesn't have too much sense.
# pylint: disable=too-many-branches
# Breaking this function to functions doesn't have too much sense.
# pylint: disable=too-many-locals
# Same variables are separated due to readability of the code, some
# other are needed being separated.
# pylint: disable=no-member
# toch has a member functions round(), stack(), sigmoid()
# Link: https://pytorch.org/docs/stable/generated/torch.round.html
# Link: https://pytorch.org/docs/stable/generated/torch.stack.html
# Link: https://pytorch.org/docs/stable/generated/torch.sigmoid.html
# pylint: disable=not-callable
# toch.tensor() is callable
# Link: https://pytorch.org/docs/stable/generated/torch.tensor.html
diseases_basics = check_and_get_basics()
for disease, meta_file_id in diseases_basics.items():
print('\rDisease: {} --- initializing model... '.format(disease),
end='')
disease_classifier = SoloClassifier()
disease_classifier.to(DEVICE)
optimizer = torch.optim.Adam(disease_classifier.parameters(),
lr=LEARNING_RATE)
criterion = torch.nn.BCEWithLogitsLoss()
print('\rDisease: {} --- creating datasets... '.format(disease),
end='')
train_dataset = get_data_in_batches(meta_file_id, batch_size=BATCH_SIZE)
test_dataset = get_data_in_batches(meta_file_id, dataset_type='test',
batch_size=1, shuffle_count=1)
print('\rDisease: #{} --- training... '.format(disease),
end='', flush=True)
train_len = len(train_dataset)
test_len = len(test_dataset)
min_test_loss = 100.0
test_no_decrease_count = 0
with open(join(LOG_DIR, '{}.csv'.format(meta_file_id)), 'w',
encoding='utf8') as outstream:
outstream.write('\t'.join(['epoch', 'train_loss', 'train_accuracy',
'test_loss', 'test_accuracy']) + '\n')
for epoch in range(MAX_EPOCHS):
epoch_loss = 0.0
epoch_preds, epoch_targets = [], []
disease_classifier.train()
torch.cuda.empty_cache()
for batch_x, batch_y in tqdm(train_dataset, unit='batch',
total=train_len):
for _y in batch_y:
epoch_targets.append(_y)
batch_x = torch.stack(batch_x).to(DEVICE)
batch_y = torch.tensor(batch_y).float().to(DEVICE)
optimizer.zero_grad()
batch_y_hat = disease_classifier(batch_x)
batch_y_hat = batch_y_hat.squeeze(1)
loss = criterion(batch_y_hat, batch_y)
loss.backward()
optimizer.step()
batch_loss = loss.item()
epoch_loss += batch_loss
batch_y_hat = torch.sigmoid(batch_y_hat)
batch_y_hat = torch.round(batch_y_hat).tolist()
for _y_hat in batch_y_hat:
epoch_preds.append(int(_y_hat))
epoch_accuracy = get_accuracy(epoch_preds, epoch_targets)
epoch_loss /= train_len
print('{} TRAIN {:3d}/{:3d}: loss {:.9f} -- accuracy {:5.2f} %'
.format(disease, epoch + 1, MAX_EPOCHS, epoch_loss,
epoch_accuracy * 100))
torch.save(disease_classifier.state_dict(),
join(MODEL_DIR, '{}_{:03d}.statedict'.format(meta_file_id,
epoch + 1)))
test_loss = 0.0
test_preds, test_preds_float, test_targets = [], [], []
torch.cuda.empty_cache()
disease_classifier.eval()
with torch.no_grad():
for batch_x, batch_y in tqdm(test_dataset, unit='batch',
total=test_len):
for _y in batch_y:
test_targets.append(_y)
batch_x = torch.stack(batch_x).to(DEVICE)
batch_y = torch.tensor(batch_y).float().to(DEVICE)
batch_y_hat = disease_classifier(batch_x)
batch_y_hat = batch_y_hat.squeeze(1)
loss = criterion(batch_y_hat, batch_y)
batch_loss = loss.item()
test_loss += batch_loss
batch_y_hat = torch.sigmoid(batch_y_hat)
batch_preds_float = batch_y_hat.tolist()
for _y_hat in batch_preds_float:
test_preds_float.append(float(_y_hat))
batch_y_hat = torch.round(batch_y_hat).tolist()
for _y_hat in batch_y_hat:
test_preds.append(int(_y_hat))
test_accuracy = get_accuracy(test_preds, test_targets)
test_loss /= test_len
print('{} TEST {:3d}/{:3d}: loss {:.9f} -- accuracy {:5.2f} %'
.format(disease, epoch + 1, MAX_EPOCHS, test_loss,
test_accuracy * 100),
flush=True)
with open(join(LOG_DIR, '{}.csv'.format(meta_file_id)), 'a',
encoding='utf8') as outstream:
outstream.write('{}\t{}\t{}\t{}\t{}\n'.format(epoch + 1,
epoch_loss, epoch_accuracy, test_loss,
test_accuracy))
with open(join(LOG_DIR, 'last_{}.csv'.format(meta_file_id)), 'w',
encoding='utf8') as outstream:
outstream.write('prediction\ttarget\n')
for _x, _y in zip(test_preds_float, test_targets):
outstream.write('{}\t{}\n'.format(_x, _y))
disease_classifier.load_state_dict(torch.load(join(MODEL_DIR,
'{}_{:03d}.statedict'.format(meta_file_id, epoch + 1))))
if test_loss <= min_test_loss:
test_no_decrease_count = 0
min_test_loss = test_loss
else:
test_no_decrease_count += 1
if test_no_decrease_count > 10:
break
print('Training finished.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3425712 | #!/usr/bin/python
# coding: utf-8
import unittest
import random
import pendulum
import logging
import json
from cattledb.core._timeseries import FastFloatTSList
class CTimeSeriesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(level=logging.INFO)
def test_base(self):
t1 = FastFloatTSList("hellö", "world")
end = pendulum.now("Europe/Vienna")
start = end.subtract(minutes=199)
data = [(start.add(minutes=n).isoformat(), random.random()) for n in range(0, 200)]
random.shuffle(data)
for ts, val in data:
t1.insert_iso(ts, val)
t1_json = json.dumps(t1.serializable())
j1 = json.loads(t1_json)
assert len(j1) == 200
assert len(t1.to_list()) == 200
assert t1.at_index(0)[1] == start.offset
assert t1.at_index(0)[0] == start.int_timestamp
assert t1.iso_at_index(0)[0] == start.replace(microsecond=0).isoformat()
assert t1.iso_at_index(len(t1)-1)[0] == end.replace(microsecond=0).isoformat()
def test_timezone(self):
t1 = FastFloatTSList("abc", "def")
dt = pendulum.now("Europe/Vienna").replace(microsecond=0)
iso_str = dt.isoformat()
t1.insert_iso(iso_str, 0.1)
assert t1.at_index(0)[1] == dt.offset
assert t1.at_index(0)[0] == dt.int_timestamp
assert t1.iso_at_index(0)[0] == iso_str
def test_canada(self):
t1 = FastFloatTSList("abc", "def")
dt = pendulum.datetime(2019, 2, 12, 8, 15, 32, tz='America/Toronto').replace(microsecond=0)
iso_str = dt.isoformat()
t1.insert_iso(iso_str, 0.1)
assert t1.at_index(0)[0] == dt.int_timestamp
assert t1.at_index(0)[1] == -5*3600
assert t1.iso_at_index(0)[0] == "2019-02-12T08:15:32-05:00"
def test_vienna(self):
t1 = FastFloatTSList("abc", "def")
dt = pendulum.datetime(2008, 3, 3, 12, 0, 0, tz='Europe/Vienna')
iso_str = dt.isoformat()
t1.insert_iso(iso_str, 0.1)
assert t1.at_index(0)[0] == dt.int_timestamp
assert t1.at_index(0)[1] == 3600
assert t1.iso_at_index(0)[0] == "2008-03-03T12:00:00+01:00"
def test_trim(self):
t1 = FastFloatTSList("a", "b")
end = pendulum.now("Europe/Vienna")
start = end.subtract(minutes=199)
data = [(start.add(minutes=n), random.random()) for n in range(0, 200)]
for dt, val in data:
t1.insert_datetime(dt, val)
assert t1._data.bisect_left(0) == 0
assert t1._data.bisect_right(0) == 0
assert len(t1) == 200
t1.trim_index(100, 200)
assert len(t1) == 100
t1.trim_ts(start, end)
assert len(t1) == 100
t1.trim_ts(end.subtract(minutes=9), end)
assert len(t1) == 10
t1.trim_index(0, 0)
assert len(t1) == 1
t1.trim_index(1, 1)
assert len(t1) == 0
def test_trim_exact(self):
t1 = FastFloatTSList("a", "b")
t1.insert(100, 0, 2.2)
t1.insert(200, 0, 2.2)
t1.insert(300, 0, 2.2)
t1.insert(400, 0, 2.2)
assert len(t1) == 4
t1.trim_ts(100, 400)
assert len(t1) == 4
t1.trim_ts(100, 399)
assert len(t1) == 3
t1.trim_ts(99, 399)
assert len(t1) == 3
t1.trim_ts(101, 399)
assert len(t1) == 2
t1.trim_ts(0, 399)
assert len(t1) == 2
t1.trim_ts(200, 300)
assert len(t1) == 2
t1.trim_ts(0, 1)
assert len(t1) == 0
# test right
t2 = FastFloatTSList("a", "b")
ts = pendulum.now("utc").int_timestamp
t2.insert_datetime(ts, float(2.2))
self.assertEqual(len(t2), 1)
t2.trim_ts(ts, ts+1)
self.assertEqual(len(t2), 1)
t2.trim_ts(ts+1, ts+2)
self.assertEqual(len(t2), 0)
# test left
t3 = FastFloatTSList("a", "b")
ts = pendulum.now("utc").int_timestamp
t3.insert_datetime(ts, float(2.2))
self.assertEqual(len(t3), 1)
t3.trim_ts(ts-1, ts)
self.assertEqual(len(t3), 1)
t3.trim_ts(ts-2, ts-1)
self.assertEqual(len(t3), 0)
def test_index(self):
t = FastFloatTSList("a", "b")
end = pendulum.now("America/Toronto")
start = end.subtract(minutes=199)
data = [(start.add(minutes=n), random.random()) for n in range(0, 200)]
for dt, val in data:
t.insert_datetime(dt, val)
print(t.at_index(199))
assert t.index_of_ts(end) == 199
assert t.index_of_ts(end.subtract(minutes=3)) == 196
with self.assertRaises(KeyError):
t.index_of_ts(end.subtract(seconds=1))
with self.assertRaises(KeyError):
del t[-1]
with self.assertRaises(KeyError):
t.index_of_ts(end.add(seconds=1))
last_ts = t.nearest_index_of_ts(end.add(seconds=1))
assert last_ts == 199
prev_ts = t.nearest_index_of_ts(end.subtract(seconds=40))
assert prev_ts == 198
| StarcoderdataPython |
8074948 | <filename>pomma/derive_initial_state_model.py
import logging
def derive_initial_state_model(max_repeats,
num_symbols,
max_extra_states=15,
start_symbol=1000,
end_symbol=1001,
num_random_starts=20
):
"""derives initial state model using Expectation-Maximization (E-M) algorithm
Parameters
----------
max_extra_states : int
Maximum number of states to allow for each symbol.
Default is 15.
start_symbol : int
Numerical symbol for the start state. Default is 1000.
end_symbol : int
Numerical symbol for the end state. Default is 1001.
max_repeats : dict
where each key is a symbol and the corresponding value is
the maximum number of consecutive repeats of that symbol
found in any of the sequences
num_symbols : int
number of unique symbols, i.e., len(symbols)
num_random_starts : int
Number of random starts to derive the best model. Default is 20.
Returns
-------
"""
for num_extra_states in range(1, max_extra_states+1):
logging.info(f'Trying model with {num_extra_states} extra_states.')
state_symbols = [start_symbol, end_symbol]
max_repeat_nums = [0, 0]
for symbol in range(0,num_symbols):
number_states = 1 + num_extra_states
state_symbols.extend([symbol] * number_states)
max_repeat_nums.extend([max_repeats[symbol]] * number_states)
| StarcoderdataPython |
8085300 | <reponame>gearbird/calgo
# type: ignore
from functools import wraps
def log_it(prefix: str, suffix: str):
def deco(func):
@wraps(func)
def updated_func(*args, **kargs):
print(prefix)
res = func(*args, **kargs)
print(suffix)
return res
return updated_func
return deco
@log_it(prefix='pre', suffix='after')
def prog(a: int, b: int) -> int:
c = a + b
print(f'a + b = {c}')
return c
prog(1, 2) | StarcoderdataPython |
192348 | from opnsense_cli.api.base import ApiBase
class Export(ApiBase):
MODULE = "haproxy"
CONTROLLER = "export"
"""
Haproxy ExportController
"""
@ApiBase._api_call
def config(self, *args):
self.method = "get"
self.command = "config"
@ApiBase._api_call
def diff(self, *args):
self.method = "get"
self.command = "diff"
@ApiBase._api_call
def download(self, *args):
self.method = "get"
self.command = "download"
class Service(ApiBase):
MODULE = "haproxy"
CONTROLLER = "service"
"""
Haproxy ServiceController
"""
@ApiBase._api_call
def configtest(self, *args):
self.method = "get"
self.command = "configtest"
@ApiBase._api_call
def reconfigure(self, *args):
self.method = "post"
self.command = "reconfigure"
class Settings(ApiBase):
MODULE = "haproxy"
CONTROLLER = "settings"
"""
Haproxy SettingsController
"""
@ApiBase._api_call
def addAcl(self, *args):
self.method = "post"
self.command = "addAcl"
@ApiBase._api_call
def addAction(self, *args):
self.method = "post"
self.command = "addAction"
@ApiBase._api_call
def addBackend(self, *args, json=None):
self.method = "post"
self.command = "addBackend"
@ApiBase._api_call
def addCpu(self, *args):
self.method = "post"
self.command = "addCpu"
@ApiBase._api_call
def addErrorfile(self, *args):
self.method = "post"
self.command = "addErrorfile"
@ApiBase._api_call
def addFrontend(self, *args, json=None):
self.method = "post"
self.command = "addFrontend"
@ApiBase._api_call
def addGroup(self, *args):
self.method = "post"
self.command = "addGroup"
@ApiBase._api_call
def addHealthcheck(self, *args):
self.method = "post"
self.command = "addHealthcheck"
@ApiBase._api_call
def addLua(self, *args):
self.method = "post"
self.command = "addLua"
@ApiBase._api_call
def addMapfile(self, *args):
self.method = "post"
self.command = "addMapfile"
@ApiBase._api_call
def addServer(self, *args, json=None):
self.method = "post"
self.command = "addServer"
@ApiBase._api_call
def addUser(self, *args):
self.method = "post"
self.command = "addUser"
@ApiBase._api_call
def addmailer(self, *args):
self.method = "post"
self.command = "addmailer"
@ApiBase._api_call
def addresolver(self, *args):
self.method = "post"
self.command = "addresolver"
@ApiBase._api_call
def delAcl(self, *args):
self.method = "post"
self.command = "delAcl"
@ApiBase._api_call
def delAction(self, *args):
self.method = "post"
self.command = "delAction"
@ApiBase._api_call
def delBackend(self, *args, json=None):
self.method = "post"
self.command = "delBackend"
@ApiBase._api_call
def delCpu(self, *args):
self.method = "post"
self.command = "delCpu"
@ApiBase._api_call
def delErrorfile(self, *args):
self.method = "post"
self.command = "delErrorfile"
@ApiBase._api_call
def delFrontend(self, *args, json=None):
self.method = "post"
self.command = "delFrontend"
@ApiBase._api_call
def delGroup(self, *args):
self.method = "post"
self.command = "delGroup"
@ApiBase._api_call
def delHealthcheck(self, *args):
self.method = "post"
self.command = "delHealthcheck"
@ApiBase._api_call
def delLua(self, *args):
self.method = "post"
self.command = "delLua"
@ApiBase._api_call
def delMapfile(self, *args):
self.method = "post"
self.command = "delMapfile"
@ApiBase._api_call
def delServer(self, *args):
self.method = "post"
self.command = "delServer"
@ApiBase._api_call
def delUser(self, *args):
self.method = "post"
self.command = "delUser"
@ApiBase._api_call
def delmailer(self, *args):
self.method = "post"
self.command = "delmailer"
@ApiBase._api_call
def delresolver(self, *args):
self.method = "post"
self.command = "delresolver"
@ApiBase._api_call
def get(self, *args):
self.method = "get"
self.command = "get"
@ApiBase._api_call
def setAcl(self, *args):
self.method = "post"
self.command = "setAcl"
@ApiBase._api_call
def setAction(self, *args):
self.method = "post"
self.command = "setAction"
@ApiBase._api_call
def setBackend(self, *args, json=None):
self.method = "post"
self.command = "setBackend"
@ApiBase._api_call
def setCpu(self, *args):
self.method = "post"
self.command = "setCpu"
@ApiBase._api_call
def setErrorfile(self, *args):
self.method = "post"
self.command = "setErrorfile"
@ApiBase._api_call
def setFrontend(self, *args, json=None):
self.method = "post"
self.command = "setFrontend"
@ApiBase._api_call
def setGroup(self, *args):
self.method = "post"
self.command = "setGroup"
@ApiBase._api_call
def setHealthcheck(self, *args):
self.method = "post"
self.command = "setHealthcheck"
@ApiBase._api_call
def setLua(self, *args):
self.method = "post"
self.command = "setLua"
@ApiBase._api_call
def setMapfile(self, *args):
self.method = "post"
self.command = "setMapfile"
@ApiBase._api_call
def setServer(self, *args, json=None):
self.method = "post"
self.command = "setServer"
@ApiBase._api_call
def setUser(self, *args):
self.method = "post"
self.command = "setUser"
@ApiBase._api_call
def setmailer(self, *args):
self.method = "post"
self.command = "setmailer"
@ApiBase._api_call
def setresolver(self, *args):
self.method = "post"
self.command = "setresolver"
| StarcoderdataPython |
63694 | import unittest
import json
from typing import Any
from src.shapeandshare.dicebox.config.dicebox_config import DiceboxConfig
from src.shapeandshare.dicebox.factories.network_factory import NetworkFactory
class DiceboxNetworkTest(unittest.TestCase):
"""
The basic class that inherits unittest.TestCase
"""
TEST_DATA_BASE = "test/fixtures"
local_config_file = "%s/dicebox.config" % TEST_DATA_BASE
local_lonestar_model_file = "%s/dicebox.lonestar.json" % TEST_DATA_BASE
# local_create_fcs = True
# local_disable_data_indexing = True
# ACTIVATION = ["softmax", "elu", "softplus", "softsign", "relu", "tanh", "sigmoid", "hard_sigmoid", "linear"]
# OPTIMIZER = ["rmsprop", "adam", "sgd", "adagrad", "adadelta", "adamax", "nadam"]
def setUp(self):
self.maxDiff = None
# def test_create_random(self):
# nf: NetworkFactory = NetworkFactory(config=DiceboxConfig(config_file=self.local_config_file))
# nf.create_random_network()
#
# # dn: DiceboxNetwork = DiceboxNetwork(dc,
# # create_fsc=True,
# # disable_data_indexing=True)
# # dn.generate_random_network()
#
# # self.assertEqual(dn.__network, {})
# # dn.__network = dn.__network_factory.create_random_network()
# # self.assertIsNotNone(dn.__network)
# # logging.debug(dn.__network)
# # self.assertIsNot(dn.__network, {})
# # logging.debug(dn.__network)
# # dn = None
def test_load_network(self):
dc = DiceboxConfig(config_file=self.local_config_file)
with open(self.local_lonestar_model_file, "r") as json_file:
expected_dicebox_serialized_model = json.load(json_file)
expected_compiled_model: Any = None
with open("%s/lonestar.model.json" % self.TEST_DATA_BASE) as json_file:
expected_compiled_model = json.load(json_file)
local_input_size = 784
local_output_size = 10
local_optimizer = "adamax"
local_network_definition = {
"optimizer": local_optimizer,
"input_shape": [
local_input_size,
],
"output_size": local_output_size,
"layers": [
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 89, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
],
}
nf = NetworkFactory(config=dc)
dn = nf.create_network(network_definition=local_network_definition)
# dn.__network_factory.create_network(network_definition=)
# dn.create_lonestar(create_model=local_create_model, weights_filename=local_weights_file)
# returned_model = dn.__model
# self.assertIsNotNone(returned_model)
# generate a sample..
# with open('%s/lonestar.__model.out.json' % self.TEST_DATA_BASE, 'w') as json_file:
# json_file.write(json.dumps(json.loads(returned_model.to_json()), indent=4))
# self.assertEqual(json.loads(returned_model.to_json()), expected_compiled_model)
# dn = None
# def test_compile_model(self):
# expected_compiled_model = None
# with open('%s/__model.json' % self.TEST_DATA_BASE) as json_file:
# expected_compiled_model = json.load(json_file)
# self.assertIsNotNone(expected_compiled_model)
#
# local_input_size = 784
# local_output_size = 10
# local_optimizer = 'adamax'
# local_dicebox_model_definition = {
# 'optimizer': local_optimizer,
# 'input_shape': [local_input_size, ],
# 'output_size': local_output_size,
# 'layers': [
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 89,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# }
# ]
# }
#
# dn = DiceboxNetwork(create_fcs=self.local_create_fcs,
# disable_data_indexing=self.local_disable_data_indexing,
# config_file=self.local_config_file,
# lonestar_model_file=self.local_lonestar_model_file)
#
# local_network: Network = dn.__network_factory.create_network(local_dicebox_model_definition)
# returned_compiled_model = dn.__network_factory.compile_network(dicebox_network=local_network)
#
# serialized_result = returned_compiled_model.to_json()
#
# # # generate a sample ..
# # with open('%s/__model.out.json' % self.TEST_DATA_BASE, 'w') as json_file:
# # json_file.write(json.dumps(json.loads(serialized_result), indent=4))
#
# self.assertEqual(json.loads(serialized_result), expected_compiled_model)
# dn = None
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(DiceboxNetworkTest())
| StarcoderdataPython |
3226062 | from flask import Response, Flask, render_template, request, \
redirect, url_for, send_from_directory, send_file, jsonify, session
from werkzeug import generate_password_hash, check_password_hash, secure_filename
import sqlite3
from dateutil import parser
sqlite_file = 'LocalDB/LocalDB.db'
app = Flask(__name__)
@app.route('/main_analytics/drive')
def drive():
user_id = 1
return render_template('drive.html')
@app.route('/main_analytics')
def main_analytics():
user_id = 1
return render_template('main_analytics.html')
@app.route('/drive_date', methods=['POST'])
def datedb():
connection = sqlite3.connect(sqlite_file)
cursor = connection.cursor()
user_id = request.form.get("patient")
get_drive_score = "select rec_date,score from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = {} order by rec_date".format(
user_id)
cursor.execute(get_drive_score)
drive_score = cursor.fetchall()
cursor.close()
return jsonify(drive_score)
@app.route('/drive_date/hover', methods=['POST'])
def hoverdb():
connection = sqlite3.connect(sqlite_file)
cursor = connection.cursor()
user_id = request.form.get("patient")
rec_date = request.form.get("date")
get_distance = "select distance,emergency_stops,u_turns from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = 20 driveanalytics.score=9 limit 1".format(
result1)
cursor.execute(get_distance)
result = cursor.fetchall()
print(result)
cursor.close()
return jsonify(result)
@app.route('/drive_hour', methods=['POST'])
def hourdb():
connection = sqlite3.connect(sqlite_file)
cursor = connection.cursor()
user_id = request.form.get("patient")
get_drive_score = "select rec_date,score from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = {} order by rec_date".format(
user_id)
cursor.execute(get_drive_score)
drive_score = cursor.fetchall()
cursor.close()
return jsonify(drive_score)
@app.route('/drive_hour/hover', methods=['POST'])
def timedb():
connection = sqlite3.connect(sqlite_file)
cursor = connection.cursor()
user_id = request.form.get("patient")
rec_date = request.form.get("date")
print(rec_date)
get_distance = "select distance,emergency_stops,u_turns from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = 20 limit 1"
cursor.execute(get_distance)
result = cursor.fetchall()
cursor.close()
return jsonify(result)
@app.route('/drive_map', methods=['POST'])
def mapdb():
connection = sqlite3.connect(sqlite_file)
cursor = connection.cursor()
user_id = 20
get_drive_score = "select rec_date,score from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = {} order by rec_date".format(
user_id)
cursor.execute(get_drive_score)
drive_score = cursor.fetchall()
cursor.close()
return jsonify(drive_score)
@app.route('/query/drive/time-of-day', methods=['POST'])
def queryTimeOfDayDrive():
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
user_id = request.form.get("patient_id")
sql = "select rec_date,score from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = {} order by rec_date".format(
user_id)
c.execute(sql)
result = c.fetchall()
c.close()
parsedresult = []
timeofdayArr = [[], [], []]
avgPerTimeOfDay = []
# dates = [each[0] for each in result]
for each in result:
date = parser.parse(each[0])
if date.hour <= 12 and date.hour >= 6: ##morning
timeofdayArr[0].append(each[1])
elif date.hour <= 18: ##after-noon
timeofdayArr[1].append(each[1])
else: ##night
timeofdayArr[2].append(each[1])
print(timeofdayArr);
for timeofday in timeofdayArr:
if len(timeofday) is not 0:
avg = sum(timeofday) / float(len(timeofday))
avgPerTimeOfDay.append(avg)
else:
avgPerTimeOfDay.append(0)
return jsonify(avgPerTimeOfDay)
if __name__ == '__main__':
app.run(debug=True)
# app.run(debug=True)
# @app.route('/route', methods = ['POST'])
# def routedb():
# connection = sqlite3.connect(sqlite_file)
# cursor = connection.cursor ()
# user_id = 20
# sql = "select * from driveanalytics join driverecordings on driveanalytics.id = driverecordings.id where driverecordings.user_id = {}".format(user_id)
# cursor.execute(sql)
# result = cursor.fetchall()
# cursor.close()
# return jsonify(result)
| StarcoderdataPython |
127698 | <filename>modules/lib/homekit/accessory.py
# Distributed under MIT License
# Copyright (c) 2021 <NAME>
""" Homekit accessory class """
from homekit.server import *
class Accessory:
""" Homekit accessory class """
CID_NONE = 0
CID_OTHER = 1
CID_BRIDGE = 2
CID_FAN = 3
CID_GARAGE_DOOR_OPENER = 4
CID_LIGHTING = 5
CID_LOCK = 6
CID_OUTLET = 7
CID_SWITCH = 8
CID_THERMOSTAT = 9
CID_SENSOR = 10
CID_SECURITY_SYSTEM = 11
CID_DOOR = 12
CID_WINDOW = 13
CID_WINDOW_COVERING = 14
CID_PROGRAMMABLE_SWITCH = 15
CID_RESERVED = 16
CID_IP_CAMERA = 17
CID_VIDEO_DOORBELL = 18
CID_AIR_PURIFIER = 19
CID_HEATER = 20
CID_AIR_CONDITIONER = 21
CID_HUMIDIFIER = 22
CID_DEHUMIDIFIER = 23
def __init__(self, cid, **kwargs):
""" Create accessory.
Parameters : name(string), manufacturer(string), model(string), serialNumber(string), firmwareRevision(string), hardwareRevision(string), productVersion(string), productData (string with 8 bytes required)"""
import homekit_
self.accessory = homekit_.Accessory(\
cid = cid, \
name = kwargs.get("name" , "NoName"), \
manufacturer = kwargs.get("manufacturer" , "Manufacturer"), \
model = kwargs.get("model" , "ESP32"), \
serial_number = kwargs.get("serial_number" , "0000000000"), \
firmware_revision = kwargs.get("firmware_revision", "1.0"), \
hardware_revision = kwargs.get("hardware_revision", "1.0"), \
product_version = kwargs.get("product_version" , "1.0"))
self.accessory.set_product_data(kwargs.get("product_data","01234568"))
def __del__(self):
""" Destroy homekit accessory """
self.accessory.deinit()
def add_server(self, server):
""" Add the serve to the accessory object """
self.accessory.add_server(server.server)
def set_identify_callback(self, callback):
""" Set identify callback. In a real accessory, something like LED blink should be implemented got visual identification """
self.accessory.set_identify_callback(callback)
def set_product_data(self, data):
""" Set product data. 8 bytes product data assigned to the Product Plan. """
self.accessory.set_product_data(data)
| StarcoderdataPython |
3216662 | <gh_stars>1-10
if __name__ == '__main__':
import os
import torch
from torch.utils.data import DataLoader
from networks import Discriminator, Generator, Loss
from options import TrainOption
from pipeline import CustomDataset
from utils import binning_and_cal_pixel_cc, Manager, update_lr, weights_init
import numpy as np
from tqdm import tqdm
import datetime
torch.backends.cudnn.benchmark = True
opt = TrainOption().parse()
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_ids)
device = torch.device('cuda:0' if opt.gpu_ids != -1 else 'cpu:0')
dtype = torch.float16 if opt.data_type == 16 else torch.float32
image_height = opt.image_height
radius = 392 if image_height == 1024 else 196
if opt.val_during_train:
from options import TestOption
test_opt = TestOption().parse()
val_freq = opt.val_freq
init_lr = opt.lr
lr = opt.lr
dataset = CustomDataset(opt)
data_loader = DataLoader(dataset=dataset,
batch_size=opt.batch_size,
num_workers=opt.n_workers,
shuffle=not opt.no_shuffle)
G = Generator(opt).apply(weights_init).to(device=device, dtype=dtype)
D = Discriminator(opt).apply(weights_init).to(device=device, dtype=dtype)
criterion = Loss(opt)
G_optim = torch.optim.Adam(G.parameters(), lr=lr, betas=(opt.beta1, opt.beta2), eps=opt.eps)
D_optim = torch.optim.Adam(D.parameters(), lr=lr, betas=(opt.beta1, opt.beta2), eps=opt.eps)
if opt.latest and os.path.isfile(opt.model_dir + '/' + str(opt.latest) + '_dict.pt'):
pt_file = torch.load(opt.model_dir + '/' + str(opt.latest) + '_dict.pt')
init_epoch = pt_file['Epoch']
print("Resume at epoch: ", init_epoch)
G.load_state_dict(pt_file['G_state_dict'])
D.load_state_dict(pt_file['D_state_dict'])
G_optim.load_state_dict(pt_file['G_optim_state_dict'])
D_optim.load_state_dict(pt_file['D_optim_state_dict'])
current_step = init_epoch * len(dataset)
for param_group in G_optim.param_groups:
lr = param_group['lr']
else:
init_epoch = 1
current_step = 0
manager = Manager(opt)
total_step = opt.n_epochs * len(data_loader)
start_time = datetime.datetime.now()
for epoch in range(init_epoch, opt.n_epochs + 1):
for input, target, _, _ in tqdm(data_loader):
G.train()
current_step += 1
input, target = input.to(device=device, dtype=dtype), target.to(device, dtype=dtype)
D_loss, G_loss, target_tensor, generated_tensor = criterion(D, G, input, target)
G_optim.zero_grad()
G_loss.backward()
G_optim.step()
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
package = {'Epoch': epoch,
'current_step': current_step,
'total_step': total_step,
'D_loss': D_loss.detach().item(),
'G_loss': G_loss.detach().item(),
'D_state_dict': D.state_dict(),
'G_state_dict': G.state_dict(),
'D_optim_state_dict': D_optim.state_dict(),
'G_optim_state_dict': G_optim.state_dict(),
'target_tensor': target_tensor,
'generated_tensor': generated_tensor.detach()}
manager(package)
if opt.val_during_train and (current_step % val_freq == 0):
G.eval()
test_image_dir = os.path.join(test_opt.image_dir, str(current_step))
os.makedirs(test_image_dir, exist_ok=True)
test_model_dir = test_opt.model_dir
test_dataset = CustomDataset(test_opt)
test_data_loader = DataLoader(dataset=test_dataset,
batch_size=test_opt.batch_size,
num_workers=test_opt.n_workers,
shuffle=not test_opt.no_shuffle)
for p in G.parameters():
p.requires_grad_(False)
list_TUMF_fake = list()
list_TUMF_real = list()
list_cc_1x1_fake = list()
list_cc_1x1_real = list()
list_cc_1x1 = list()
list_cc_bin_2x2 = list()
list_cc_bin_4x4 = list()
list_cc_bin_8x8 = list()
list_R1 = list()
list_R2 = list()
for input, target, _, name in tqdm(test_data_loader):
input, target = input.to(device=device, dtype=dtype), target.to(device, dtype=dtype)
fake = G(input)
manager.save_image(fake, path=os.path.join(test_image_dir, name[0] + '_fake.png'))
manager.save_image(target, path=os.path.join(test_image_dir, name[0] + '_real.png'))
# Model measurements
bin_size = 8
np_fake, np_real = fake.cpu().numpy().squeeze() * 100., target.cpu().numpy().squeeze() * 100.
# rearrange [-100, 100]
carrier_fake = list()
carrier_real = list()
for i in range(image_height):
for j in range(image_height):
if (i - 511) ** 2 + (j - 511) ** 2 <= 392 ** 2:
list_cc_1x1_fake.append(np_fake[i, j])
list_cc_1x1_real.append(np_real[i, j])
if abs(np_fake[i, j]) >= 10:
carrier_fake.append(abs(np_fake[i, j]))
if abs(np_real[i, j]) >= 10:
carrier_real.append(abs(np_real[i, j]))
TUMF_fake, TUMF_real = np.array(carrier_fake).sum(), np.array(carrier_real).sum()
list_TUMF_fake.append(TUMF_fake)
list_TUMF_real.append(TUMF_real)
list_R1.append((TUMF_fake - TUMF_real) / TUMF_real)
list_cc_1x1.append(np.corrcoef(list_cc_1x1_fake, list_cc_1x1_real)[0][1])
list_R2.append(((np.array(list_cc_1x1_fake) - np.array(list_cc_1x1_real)) ** 2).sum() / (np.array(list_cc_1x1_real) ** 2).sum())
list_cc_bin_2x2.append(binning_and_cal_pixel_cc(np_fake, np_real, 2))
list_cc_bin_4x4.append(binning_and_cal_pixel_cc(np_fake, np_real, 4))
list_cc_bin_8x8.append(binning_and_cal_pixel_cc(np_fake, np_real, 8))
cc_TUMF = np.corrcoef(np.array(list_TUMF_fake), np.array(list_TUMF_real))
cc_1x1 = np.mean(list_cc_1x1)
cc_bin_2x2 = np.mean(list_cc_bin_2x2)
cc_bin_4x4 = np.mean(list_cc_bin_4x4)
cc_bin_8x8 = np.mean(list_cc_bin_8x8)
R1_mean = np.mean(list_R1)
R1_std = np.std(list_R1)
R2_mean = np.mean(list_R2)
R2_std = np.std(list_R2)
with open(os.path.join(test_model_dir, 'Analysis.txt'), 'a') as analysis:
analysis.write(str(current_step) + ', ' + str(cc_TUMF[0][1]) + ', ' + str(cc_1x1) + ', ' +
str(cc_bin_2x2) + ', ' + str(cc_bin_4x4) + ', ' + str(cc_bin_8x8) + ', ' +
str(R1_mean) + ', ' + str(R1_std) + ', ' + str(R2_mean) + ', ' + str(R2_std) + '\n')
analysis.close()
for p in G.parameters():
p.requires_grad_(True)
if opt.debug:
break
if epoch > opt.epoch_decay and opt.HD:
lr = update_lr(lr, init_lr, opt.n_epochs - opt.epoch_decay, D_optim, G_optim)
print("Total time taken: ", datetime.datetime.now() - start_time)
| StarcoderdataPython |
1754160 | class Attacker(object):
"""docstring for Attacker"""
def __init__(self, task):
super(Attacker, self).__init__()
self.arg = task
def attack(self,sentence):
return "placeholder, please use specific Attacker instead"
class Ragu(Attacker):
"""docstring for Ragu"""
def __init__(self, task):
super(Ragu, self).__init__(task)
def attack(self,sentence):
sentence = sentence + "- attacked"
return sentence
| StarcoderdataPython |
375001 | <reponame>MatteoZanella/siv-texture-analysis<gh_stars>1-10
import unittest
from texture.analysis import LBP
import numpy as np
from PIL import Image
class MyTestCase(unittest.TestCase):
def test_neighbor_offset(self):
offsets = LBP._neighbors_offsets(8, 1)
expected_offsets = np.array([[-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]])
self.assertEqual(offsets.tolist(), expected_offsets.tolist())
def test_lbp(self):
image = Image.open("textures/1.1.04.tiff")
lbp = LBP(image)
# lbp_2 = LBP(image, fast=False) # Too slow
# self.assertEqual(lbp.matrix.tolist(), lbp_2.matrix.tolist())
self.assertEqual(lbp.matrix[0, 0], 0)
self.assertEqual(lbp.matrix[2, 3], 199)
self.assertEqual(lbp.matrix[0, 2], 4)
def test_histograms(self):
pixels = Image.fromarray(
np.array([[1, 2, 3, 1, 0],
[0, 7, 5, 8, 2],
[5, 4, 0, 2, 5],
[7, 1, 3, 4, 9]]))
lbp = LBP(pixels, cell_shape=(2, 2), neighbors=4)
self.assertEqual(lbp.histograms[0, 0].tolist(), [.25, 0, 0, 0, .25, 0, .25, 0, 0, 0, 0, 0, 0, 0, 0, .25])
self.assertEqual(lbp.histograms[0, 2].tolist(), [0, 0, 0, .5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, .5])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1632611 | <filename>spine/classification/config.py
# Copyright 2021 Medical Imaging Center, Vingroup Big Data Insttitute (VinBigdata), Vietnam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
additional configs for classifier
"""
from detectron2.config import CfgNode as CN
def add_classifier_config(cfg):
cfg.MODEL.CLASSIFIER = CN()
cfg.MODEL.CLASSIFIER.CLASSES = ["Abnormal"]
cfg.MODEL.CLASSIFIER.NORM = "BN"
cfg.MODEL.CLASSIFIER.HEAD_NAME = "SimpleHead"
cfg.MODEL.CLASSIFIER.IN_FEATURES = ["res4", "res5"]
cfg.MODEL.CLASSIFIER.LOSS_NAMES = ["BCE",]
cfg.MODEL.CLASSIFIER.LOSS_WEIGHTS = [1.0,]
cfg.MODEL.CLASSIFIER.FOCAL_ALPHA = -1
cfg.MODEL.CLASSIFIER.FOCAL_GAMMA = 2
cfg.MODEL.CLASSIFIER.LOSS_REDUCTION = "mean"
cfg.MODEL.CLASSIFIER.INPUT_SIZE = 224
cfg.MODEL.CLASSIFIER.PRETRAINED = True
cfg.MODEL.DENSENET = CN()
cfg.MODEL.DENSENET.OUT_FEATURES = [""]
cfg.MODEL.DENSENET.DEPTH = 121
cfg.CLASSIFICATION = CN()
cfg.CLASSIFICATION.BOOTSTRAP = False
cfg.CLASSIFICATION.BOOTSTRAP_SAMPLES = 10000
cfg.CLASSIFICATION.BOOTSTRAP_CI = 0.95
| StarcoderdataPython |
302957 |
"""
aggregates coins data by time intervals (specified in INTERVALS)
Data saved in separate collections:
coins_1h
coins_24h
Each entry in the collection has the following keys:
"coin",
"rank",
"price_usd",
"24h_volume_usd",
"market_cap_usd",
"available_supply",
"total_supply",
all of that (except "coin"), in turn, has the following aggragated data:
'min',
'max',
'begin',
'end',
'time_end',
'time_min',
'time_max'
The whole entry has additional fields:
'timestamp',
'number_of_aggregated'
Collections are indexed by coin and timestamp.
Example of an entry:
> db.coins_1h.findOne()
{
"_id" : ObjectId("5abf7aed467767327a5a9f5e"),
"market_cap_usd" : {
"begin" : 275907280960,
"time_max" : 1515364500,
"min" : 275907280960,
"max" : 279920325476,
"end" : 279074603826,
"time_min" : 1515363000,
"avg" : 278521868630.61536
},
"price_usd" : {
"begin" : 16434.4,
"time_max" : 1515364500,
"min" : 16434.4,
"max" : 16673.4,
"end" : 16623,
"time_min" : 1515363000,
"avg" : 16590.1
},
"24h_volume_usd" : {
"begin" : 15983300000,
"time_max" : 1515363300,
"min" : 15874100000,
"max" : 15989000000,
"end" : 15931000000,
"time_min" : 1515364800,
"avg" : 15945238461.538462
},
"timestamp" : 1515363000,
"time_end" : 1515366600,
"rank" : {
"begin" : 1,
"time_max" : 1515363000,
"min" : 1,
"max" : 1,
"end" : 1,
"time_min" : 1515363000,
"avg" : 1
},
"total_supply" : {
"begin" : 16788400,
"time_max" : 1515365700,
"min" : 16788400,
"max" : 16788462,
"end" : 16788462,
"time_min" : 1515363000,
"avg" : 16788438.153846152
},
"available_supply" : {
"begin" : 16788400,
"time_max" : 1515365700,
"min" : 16788400,
"max" : 16788462,
"end" : 16788462,
"time_min" : 1515363000,
"avg" : 16788438.153846152
},
"symbol" : "BTC"
}
"""
import logging
import math
import os
import time
from currencies_names import currency_name
from coins_names import COINS_NAMES
from mng import MongoReader, MongoWriter
MONGO_READER = MongoReader()
MONGO_WRITER = MongoWriter()
INTERVAL = {
'1h': 1*3600,
}
# '24h': 24*3600,
DEBUG_LEVEL = 0
MYDIR = os.path.abspath(os.path.dirname(os.path.dirname('__file__')))
LOGFILE = "%s/log/aggregate.log" % MYDIR
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
filename=LOGFILE,
level=logging.INFO)
def _log(message):
if DEBUG_LEVEL > 0:
logging.info(message)
def _log_error(message):
logging.error(message)
def _get_entries(coin, start_time, end_time):
data = MONGO_READER.get_raw_data(coin, start_time, end_time)
return data
def aggregate_coin(coin, time_start, interval):
"""
Aggregate 5min _coin_ data starting from _start_time_
for _interval_ (in seconds; must be divisible by 5m)
and return aggregated information (dictionary).
Fields of the result: min, max, avg, begin, end
"""
time_end = time_start + interval
entries = list(_get_entries(coin, time_start, time_end))
keys = [
"rank",
"price_usd",
"24h_volume_usd",
"market_cap_usd",
"available_supply",
"total_supply",
]
result = {
'symbol': coin,
'timestamp': entries[0].get('timestamp'),
'time_end': entries[-1].get('timestamp'),
'number_of_aggregated': len(entries),
}
for key in keys:
aggregated = {
'min': entries[0].get(key),
'max': entries[0].get(key),
'begin':entries[0].get(key),
'end': entries[-1].get(key),
'time_min': entries[0].get('timestamp'),
'time_max': entries[0].get('timestamp'),
}
sum_ = 0
for entry in entries:
this = entry.get(key)
time_this = entry.get('timestamp')
if this > aggregated['max']:
aggregated['max'] = this
aggregated['time_max'] = time_this
if this < aggregated['min']:
aggregated['min'] = this
aggregated['time_min'] = time_this
sum_ += this
aggregated['avg'] = sum_/len(entries)
result[key] = aggregated
return result
def aggregate_currencies(_, time_start, interval):
"""
``coin`` is always None; it is used here, because I want to merge aggregation
function later.
Aggregate 5min currencies data starting from _start_time_
for _interval_ (in seconds; must be divisible by 5m)
and return aggregated information (dictionary).
Fields of the result: min, max, avg, begin, end
"""
time_end = time_start + interval
entries = MONGO_READER.get_raw_data(None, time_start, time_end, collection_name='currencies')
result = {
'timestamp': entries[0].get('timestamp'),
'time_end': entries[-1].get('timestamp'),
'number_of_aggregated': len(entries),
}
currencies = [k for k in entries[0].keys()
if not k.startswith('_') and k not in ['timestamp', 'last_updated']]
for key in currencies:
aggregated = {
'min': entries[0].get(key),
'max': entries[0].get(key),
'begin':entries[0].get(key),
'end': entries[-1].get(key),
'time_min': entries[0].get('timestamp'),
'time_max': entries[0].get('timestamp'),
}
sum_ = 0
for entry in entries:
this = entry.get(key)
time_this = entry.get('timestamp')
if this > aggregated['max']:
aggregated['max'] = this
aggregated['time_max'] = time_this
if this < aggregated['min']:
aggregated['min'] = this
aggregated['time_min'] = time_this
sum_ += this
aggregated['avg'] = sum_/len(entries)
result[key] = aggregated
return result
def get_aggregated_coin(coin, time_start, time_end, number_of_ticks, key=None): # pylint: disable=too-many-locals,too-many-branches
"""
Highlevel reader that returns aggregated data ticks (based on agregated data)
and agregated info about the data (min, max and so on).
Data is returned in form:
{
'ticks': [...],
'meta': {
'min':,
'max':,
...
}
}
Number of ticks in the returned data can be greater than _number_of_ticks_
"""
desired_interval = (time_end-time_start)/number_of_ticks
chosen_interval = None
for interval_name, interval_size in sorted(INTERVAL.items(), key=lambda x: -x[1]):
if interval_size < desired_interval:
chosen_interval = interval_name
# if interval is so small, we need to use the raw and not the aggregated data
collection_name = None
if chosen_interval:
collection_name = 'coins_%s' % chosen_interval
entries = MONGO_READER.get_raw_data(coin, time_start, time_end, collection_name=collection_name)
if entries == []:
return {'meta':{}, 'ticks':[]}
if key is None:
key = "price_usd"
meta = {}
ticks = []
sum_ = 0
if collection_name is None:
# not aggregated data
meta = {
'symbol': entries[0]['symbol'],
'begin': entries[0][key],
'end': entries[-1][key],
'time_begin': entries[0]['timestamp'],
'time_end': entries[-1]['timestamp'],
'min': entries[0][key],
'max': entries[0][key],
'time_min': entries[0]['timestamp'],
'time_max': entries[0]['timestamp'],
}
for entry in entries:
ticks.append(entry[key])
sum_ += entry[key]
if entry[key] > meta['max']:
meta['max'] = entry[key]
meta['time_max'] = entry['timestamp']
if entry[key] < meta['min']:
meta['min'] = entry[key]
meta['time_min'] = entry['timestamp']
else:
# aggregated data
# this parameter should be taken to the ticks
take_this = 'avg'
meta = {
'symbol': entries[0]['symbol'],
'begin': entries[0][key]['begin'],
'end': entries[-1][key]['end'],
'time_begin': entries[0]['timestamp'],
'time_end': entries[-1]['time_end'],
'min': entries[0][key]['min'],
'max': entries[0][key]['max'],
'time_min': entries[0][key]['time_min'],
'time_max': entries[0][key]['time_max'],
}
for entry in entries:
ticks.append(entry[key][take_this])
sum_ += entry[key]['avg']
if entry[key]['max'] > meta['max']:
meta['max'] = entry[key]['max']
meta['time_max'] = entry[key]['time_max']
if entry[key]['min'] < meta['min']:
meta['min'] = entry[key]['min']
meta['time_min'] = entry[key]['time_min']
meta['avg'] = sum_/len(ticks)
return {
'ticks': ticks,
'meta': meta,
}
def get_aggregated_pair(coin1, coin2, time_start, time_end, number_of_ticks, key=None): # pylint: disable=too-many-locals,too-many-arguments,too-many-branches,too-many-statements
"""
Aggregate coin pairs (or coin and currency pairs).
It works this way: find data for ``coin1`` and find data for ``coin2``,
after that divide ``coin1`` data by ``coin2`` pairwise.
This method is approximate for aggregated values.
``coin2`` can be a currency.
"""
coin2_is_currency = bool(currency_name(coin2))
desired_interval = (time_end-time_start)/number_of_ticks
chosen_interval = None
for interval_name, interval_size in sorted(INTERVAL.items(), key=lambda x: -x[1]):
if interval_size < desired_interval:
chosen_interval = interval_name
# if interval is so small, we need to use the raw and not the aggregated data
collection_name = None
if chosen_interval:
collection_name = 'coins_%s' % chosen_interval
if key is None:
key = "price_usd"
entries1 = MONGO_READER.get_raw_data(
coin1, time_start, time_end, collection_name=collection_name)
if entries1 == []:
return {'meta':{}, 'ticks':[]}
# depeding on (1) that we have a currency in coin2 or not
# and (2) if data is aggregated, we have to read entries2 from different collections
# and in one case postprocess them
if coin2_is_currency:
if collection_name is None:
currencies_collection = 'currencies'
else:
currencies_collection = collection_name.replace('coins_', 'currencies_')
fields = {'timestamp': 1, coin2: 1}
entries2 = MONGO_READER.get_raw_data(
None, time_start, time_end,
collection_name=currencies_collection,
fields=fields)
new_entries2 = []
for entry in entries2:
entry.update({key: entry[coin2]})
new_entries2.append(entry)
entries2 = new_entries2
else:
entries2 = MONGO_READER.get_raw_data(
coin2, time_start, time_end, collection_name=collection_name)
meta = {}
ticks = []
sum_ = 0
if collection_name is None:
# not aggregated data
meta = {
'symbol': entries1[0]['symbol'],
'begin': entries1[0][key]/entries2[0][key],
'end': entries1[-1][key]/entries2[-1][key],
'time_begin': entries1[0]['timestamp'],
'time_end': entries1[-1]['timestamp'],
'min': entries1[0][key]/entries2[0][key],
'max': entries1[0][key]/entries2[0][key],
'time_min': entries1[0]['timestamp'],
'time_max': entries1[0]['timestamp'],
}
for entry1, entry2 in zip(entries1, entries2):
this_value = entry1[key]/entry2[key]
ticks.append(this_value)
sum_ += this_value
if this_value > meta['max']:
meta['max'] = this_value
meta['time_max'] = entry1['timestamp']
if this_value < meta['min']:
meta['min'] = this_value
meta['time_min'] = entry1['timestamp']
else:
# aggregated data
# this parameter should be taken to the ticks
take_this = 'avg'
meta = {
'symbol': entries1[0]['symbol'],
'begin': entries1[0][key]['begin']/entries2[0][key]['begin'],
'end': entries1[-1][key]['end']/entries2[-1][key]['end'],
'time_begin': entries1[0]['timestamp'],
'time_end': entries1[-1]['time_end'],
'min': entries1[0][key]['min']/entries2[0][key]['max'],
'max': entries1[0][key]['max']/entries2[0][key]['min'],
'time_min': entries1[0][key]['time_min'],
'time_max': entries1[0][key]['time_max'],
}
for entry1, entry2 in zip(entries1, entries2):
this_value = entry1[key][take_this]/entry2[key][take_this]
ticks.append(this_value)
sum_ += this_value
if this_value > meta['max']:
meta['max'] = this_value
meta['time_max'] = entry1[key]['time_max']
if this_value < meta['min']:
meta['min'] = this_value
meta['time_min'] = entry1[key]['time_min']
meta['avg'] = sum_/len(ticks)
return {
'ticks': ticks,
'meta': meta,
}
def aggregate_new_entries(coin):
"""
Aggregate new entries for ``coin``. If ``coin`` is none, aggregate currencies.
"""
if coin:
aggregation_function = aggregate_coin
collection_prefix = 'coins_'
else:
aggregation_function = aggregate_currencies
collection_prefix = 'currencies_'
first_timestamp = MONGO_READER.get_first_timestamp(coin)
last_timestamp = MONGO_READER.get_first_timestamp(coin, last=True)
if first_timestamp is None or last_timestamp is None:
_log_error("timestamp is None for %s" % coin)
return
for interval_name, interval_size in INTERVAL.items():
collection_name = collection_prefix + interval_name
last_aggregated_timestamp = \
MONGO_READER.get_first_timestamp(coin, last=True, collection_name=collection_name)
if last_aggregated_timestamp is None:
_log("[%s/%s] last_aggregated_timestamp is None" % (collection_name, coin))
last_aggregated_timestamp = first_timestamp
_log("[%s/%s] %s entries to insert/update" % \
(collection_name, coin,
int(math.ceil((last_timestamp - last_aggregated_timestamp)*1.0/interval_size))))
inserted_entries = 0
timestamp = last_aggregated_timestamp
while timestamp <= last_timestamp:
try:
entry = aggregation_function(coin, timestamp, interval_size)
except Exception as e_msg:
_log_error("ERROR: coin: %s: %s: %s" % (coin, time.strftime("%Y-%m-%d %H:%M", time.gmtime(timestamp)), e_msg))
entry = None
#import json
#print json.dumps(entry)
# we insert all entries except the last one,
# because it is possible that it is not yet completed
# therefore we insert entry first, and calculate a new one thereafter
if entry:
MONGO_WRITER.update(entry, collection_name)
inserted_entries += 1
if entry['number_of_aggregated'] != interval_size/300 and DEBUG_LEVEL > 1:
_log("[%s/%s] entry[%s][number_of_aggregated] = %s" % \
(collection_name, coin, inserted_entries, entry['number_of_aggregated']))
timestamp += interval_size
_log("[%s/%s] Updated %s entries" % (collection_name, coin, inserted_entries))
# we have blacklisted these coins, because there are some problems
# with their aggregation. As soon as the code is fixed, the list has to be empty
# (or at least it should much shorter than that)
BLACKLISTED = """
B2B 2GIVE 1337 ERC20
I0C FC2 Q2C 42 C2 8BIT 888 611 $$$ MTLMC3
300 ASAFE2 VEC2 XBTC21 G3N CTIC3 CTIC2 P7C
ABJ ABN ABY AC ACC ACOIN ACP ADC ADL ADST
ADZ AERM AHT AIB ALL ALT ALTCOM AMMO AMS ANC
ANTI APW APX ARC ARCO ARDR ARG ARGUS ARI ART
ASAFE ASTRO ATL ATMS ATOM ATS ATX AUR AVT B
BAS BASH BBR BBT BCAP BCC BCF BCY BDL BELA
BENJI BERN BET BIGUP BIOB BIP BIS BIT BITBTC BITEUR
BITGOLD BITS BITSILVER BITZ BLAS BLC BLITZ BLN BLOCKPAY BLU
BLUE BNTY BNX BOAT BOLI BON BPC BPL BQ BRAIN
BRAT BRK BRO BRX BSTY BTA BTB BTCR BTCRED BTCS
BTCZ BTDX BTPL BTQ BTWTY BUCKS BUMBA BUN BUZZ BWK
BXT BYC C CAB CACH CAG CALC CANN CARBON CASH
CBX CCN CCO CCRB CCT CDN CDX CFD CFT CHAN
CHC CHESS CHIPS CJ CMPCO CNNC CNO CNT COAL CON
CONX COVAL CPN CRAVE CRB CRC CRDNC CREA CRED CREDO
CREVA CRM CRX CSNO CTIC CTX CUBE CURE CVCOIN CXT
CYP DAI DALC DAR DAXX DBTC DCY DDF DEM DFT
DGC DGCS DGPT DIBC DICE DIX DLC DLISK DMB DNR
DOLLAR DOPE DOVU DP DRP DRS DRT DRXNE DSH
DSR DUO DYN EAC EAGLE EBCH EBET EBST EBT EBTC
ECN ECO ECOB EDR EFL EFYT EGAS EGC EGO EL
ELE ELIX ELLA ELS ELTCOIN EMD EMV ENT EOT EPY
EQT ERC EREAL ERO ERROR ERY ESP ETBS ETG ETHD
ETT EUC EVIL EVO EXCL EXN FAIR FC FCN FLAX
FLDC FLIK FLIXX FLT FNC FOR FRD FRST FST FUCK
FUNC FUNK FUZZ FXE FYN FYP G GAIA GAM GAP
GB GBX GCC GCN GEERT GEO GIM GIVE GLD GLT
GMT GP GPL GPU GRE GRID GRIM GRWI GSR GUN
HAL HAT HBN HBT HDG HEAT HERO HGT HKN HMP
HNC HODL HOLD HONEY HPC HTC HUC HUSH HVCO HWC
HXX HYP I IBANK ICE ICOB ICON ICOO IETH IFLT
IMPS IMS IMX IND INFX INN INSN INXT IOP ITI
ITNS IXC JET JIN JINN JNS JOBS KAYI KED KEK
KLC KLN KOBO KORE KRB KRONE KURT KUSH LANA LBTC
LCP LEA LGD LIFE LINDA LINX LNK LOC LTB LTCR
LTCU LUNA LUX LVPS MAC MAD MAG MAO MAR MARS
MAX MAY MBI MBRS MCAP MEC MEME MGM MILO MNC
MNE MNM MOIN MOJO MONK MOTO MRT MSCN MST MTLMC
MTNC MXT MYST MZC N NANOX NDC NETKO NEVA NEWB
NIO NKA NOBL NOTE NRO NSR NTO NTRN NTWK NUKO
NVC NVST NYAN NYC OBITS OCL OCT ODN OFF ONG
ONX OPAL OPT ORB ORLY OTN OTX OXY P PAK
PASL PAYX PBL PBT PCOIN PDC PEX PFR PGL PHS
PIGGY PING PINK PIRL PIX PKB PKT PLACO PLAY PLNC
PLU PND POLL PONZI POP POS POST PPY PR PRC
PRIX PROC PRX PTC PURE PUT PXC PXI PZM Q
QBC QCN QRK QTL QVT QWARK RBIES RBT RBY RC
REAL REC RED REE REX RIC RISE RKC RLT RMC
RNS ROC ROOFS RPC RUP RUSTBITS SAGA SCL SCS SDC
SDRN SEND SEQ SFC SGR SIFT SIGT SKC SKIN SLEVIN
SLFI SLG SMC SNRG SOCC SOIL SONG SOON SPACE SPHR
SPRTS SPT SRC SSS STA STAK STARS STN STRC STU
STV SUMO SUPER SWIFT SWING SXC SYNX TAG TAJ TALK
TCC TEK TES TFL TGC TGT TIPS TIT TKR TKS
TOA TOKEN TOR TRCT TRDT TRI TRIG TRK TROLL TRUMP
TRUST TSE TSTR TTC TX TZC UET UFO UFR UIS
ULA UNB UNIC UNIFY UNITS UNITY UNY URC URO USDE
USNBT UTC V VAL VEC VIDZ VISIO VIVO VLT VLTC
VOISE VOLT VOT VPRC VRM VRS VSL VSX VTA VTR
VUC WBB WDC WGO WHL WILD WISH WOMEN WORM WRC
WTT XBC XBL XBTC XBTS XCO XCPO XCRE XCS XCT
XCXT XFT XGOX XGR XHI XIOS XJO XLC XLR XMCC
XMG XNG XNN XPA XPD XPTX XPY XRA XRC XRE
XRL XST XTO XVC XVP YOC YTN ZCG ZEIT ZENI
ZEPH ZER ZET ZMC ZNY ZOI ZRC ZUR ZZC
"""
def main():
"""
Aggregator of existing entries
"""
blacklisted = set(BLACKLISTED.split())
coins_to_aggregate = [None] + [x[0] for x in COINS_NAMES if x[0] not in blacklisted]
for coin in coins_to_aggregate:
try:
aggregate_new_entries(coin)
except IndexError as e_msg:
_log_error("ERROR: coin: %s: %s" % (coin, e_msg))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3384161 | <filename>Jan2019/12Jan2019/StringsDemo.py
class StringDataTypeDemo:
Instances = 0
def __init__(self):
StringDataTypeDemo.Instances += 1
def displayDetails(self, title, value):
print(f"----- {title} -----")
print(f'StringDataTypeDemo.Instances: {self.Instances}')
print(f"Value: {value}")
title = "String Demo"
stringDemo = StringDataTypeDemo()
name = '<NAME>'
stringDemo.displayDetails(title, name)
name = "<NAME> with Double Quotes"
stringDemo.displayDetails(title, name)
multilineString = """
This is first line. \n
This is second line.
"""
stringDemo.displayDetails("Multiline Demo", multilineString)
# concatenate with + operator, and repeated with *
chant = "Hare " + "Krishna "
stringDemo.displayDetails("+ Add Strings", chant)
# Two or more string literals next to each other are automatically concatenated.
chant = "Hare " "Krishna "
stringDemo.displayDetails("+ Add Strings", chant)
chant = "<NAME> " * 3
stringDemo.displayDetails("Multiply Strings", chant)
# to break long strings
longText = ('Put several strings within parentheses '
'to have them joined together.')
stringDemo.displayDetails("Long Strings", longText)
# in operator for string search
searchString = "Hare"
print(f'Is "{searchString}" present in "{chant}" {searchString in chant}')
| StarcoderdataPython |
204522 | <reponame>53X/asteroid
import torch
from torch import nn
from copy import deepcopy
from ..filterbanks import make_enc_dec
from ..masknn import LSTMMasker
from .base_models import BaseEncoderMaskerDecoder
class LSTMTasNet(BaseEncoderMaskerDecoder):
"""TasNet separation model, as described in [1].
Args:
n_src (int): Number of masks to estimate.
out_chan (int or None): Number of bins in the estimated masks.
Defaults to `in_chan`.
hid_size (int): Number of neurons in the RNNs cell state.
Defaults to 128.
mask_act (str, optional): Which non-linear function to generate mask.
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN
(Intra-Chunk is always bidirectional).
rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
n_layers (int, optional): Number of layers in each RNN.
dropout (float, optional): Dropout ratio, must be in [0,1].
in_chan (int, optional): Number of input channels, should be equal to
n_filters.
fb_name (str, className): Filterbank family from which to make encoder
and decoder. To choose among [``'free'``, ``'analytic_free'``,
``'param_sinc'``, ``'stft'``].
n_filters (int): Number of filters / Input dimension of the masker net.
kernel_size (int): Length of the filters.
stride (int, optional): Stride of the convolution.
If None (default), set to ``kernel_size // 2``.
**fb_kwargs (dict): Additional kwards to pass to the filterbank
creation.
References:
[1]: Yi Luo et al. "Real-time Single-channel Dereverberation and Separation
with Time-domain Audio Separation Network", Interspeech 2018
"""
def __init__(
self,
n_src,
out_chan=None,
rnn_type="lstm",
n_layers=4,
hid_size=512,
dropout=0.3,
mask_act="sigmoid",
bidirectional=True,
in_chan=None,
fb_name="free",
n_filters=64,
kernel_size=16,
stride=8,
encoder_activation=None,
**fb_kwargs,
):
encoder, decoder = make_enc_dec(
fb_name, kernel_size=kernel_size, n_filters=n_filters, stride=stride, **fb_kwargs
)
n_feats = encoder.n_feats_out
if in_chan is not None:
assert in_chan == n_feats, (
"Number of filterbank output channels"
" and number of input channels should "
"be the same. Received "
f"{n_feats} and {in_chan}"
)
# Real gated encoder
encoder = _GatedEncoder(encoder)
# Masker
masker = LSTMMasker(
n_feats,
n_src,
out_chan=out_chan,
hid_size=hid_size,
mask_act=mask_act,
bidirectional=bidirectional,
rnn_type=rnn_type,
n_layers=n_layers,
dropout=dropout,
)
super().__init__(encoder, masker, decoder, encoder_activation=encoder_activation)
class _GatedEncoder(nn.Module):
def __init__(self, encoder):
super().__init__()
# For config
self.filterbank = encoder.filterbank
# Gated encoder.
self.encoder_relu = encoder
self.encoder_sig = deepcopy(encoder)
def forward(self, x):
relu_out = torch.relu(self.encoder_relu(x))
sig_out = torch.sigmoid(self.encoder_sig(x))
return sig_out * relu_out
| StarcoderdataPython |
8056851 | <filename>real_estate_market/real_estate_market/items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RemHouseInfoItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
price = scrapy.Field()
size = scrapy.Field()
type = scrapy.Field()
floor = scrapy.Field()
direction = scrapy.Field()
metro = scrapy.Field()
neighbourhood = scrapy.Field()
district = scrapy.Field()
block = scrapy.Field()
time = scrapy.Field()
uri = scrapy.Field()
| StarcoderdataPython |
8024952 |
import os,csv
if __name__ == "__main__":
ipath = "../doc_clr/textbook_data/html_txt/result1_2/"
# iipath = ipath+"existStr/"
# opath = "existRel/"
iipath = ipath+"tmpexistStr/"
# idxslist = indexlist(ipath+"index_terms.txt")
# relslist = relist(ipath+"relation_terms.txt")
files = os.listdir(iipath)
with open(ipath+"tmp_existStr_idx.txt",'a', encoding = 'utf-8-sig',newline='') as wrtf:
for file in files: #遍历文件夹
filename = os.path.splitext(file)[0]
# sentslist = sentlist(iipath+file)
# wrtline = s.split(' ')
# for item in s:
# item = item.strip()
wrtf.write("%s\n"%filename)
# wrtf.write("\n")
# for s in sentslist:
# for r in relslist:
# if r in s:
# with open(ipath+opath+r+".txt",'a', encoding = 'utf-8-sig',newline='') as wrtf:
# # wrtline = s.split(' ')
# for item in s:
# # item = item.strip()
# wrtf.write("%s "%item)
# wrtf.write("\n")
# break
print(file) | StarcoderdataPython |
12837596 | <gh_stars>0
import numpy as np
def vec_reg_linear_grad(x, y,theta, lambda_):
m = x.shape[0]
x_t = x.transpose()
error = x.dot(theta) - y
nabela = x_t.dot(error) / m
# print(nabela)
nabela[1:] = nabela[1:] + theta[1:] * (lambda_ / m)
return nabela
if __name__ == "__main__":
X = np.array([
[ -6, -7, -9],
[ 13, -2, 14],
[ -7, 14, -1],
[ -8, -4, 6],
[ -5, -9, 6],
[ 1, -5, 11],
[ 9, -11, 8]])
Y = np.array([2, 14, -13, 5, 12, 4, -19])
Z = np.array([3,10.5,-6])
print(vec_reg_linear_grad(X,Y, Z, 1)) | StarcoderdataPython |
11208522 | client_id = 'Enter_Client_ID_Here'
client_secret = 'Enter_Client_Secret_Here'
client_url = 'wss://test.deribit.com/ws/api/v2'
# test | StarcoderdataPython |
9793270 | <gh_stars>0
import logging
from ferris_cli.ferris_cli import FerrisKafkaLoggingHandler
from ferris_cli.ferris_cli import CloudEventsAPI
from logstash_formatter import LogstashFormatterV1
import os
logger = logging.getLogger(os.environ['APP_NAME'])
kh = FerrisKafkaLoggingHandler()
kh.setLevel(logging.INFO)
formatter = LogstashFormatterV1()
kh.setFormatter(formatter)
logger.addHandler(kh)
logger.info('loading file')
print('sent logs')
| StarcoderdataPython |
11398355 | <filename>codemon/codemon.py
#!/usr/bin/python3
import sys
import os
from clint.textui import colored
from codemon.CodemonHelp import showHelp
from codemon.CodemonListen import listen
from codemon.CodemonInit import init, init_single_file
from codemon.CodemonReg import codemonReg
from codemon.CodemonMeta import get_filename, get_practice_files
from codemon.CodemonFetch import fetch_tests
from codemon.CodemonParse import Parser
def main():
arg = Parser()
arg.parse(sys.argv[1:])
if arg.help:
showHelp()
elif arg.to_listen:
listen()
elif arg.to_practice:
contestName = arg.name
practiceFiles = get_practice_files()
init(contestName, practiceFiles, arg.init_flags)
elif arg.to_init:
if arg.init_flags["is_single"]:
fileName = arg.name
init_single_file(f'{fileName}', arg.init_flags)
else:
contestName = arg.name
fileNames = get_filename(contestName)
init(contestName, fileNames, arg.init_flags)
if arg.init_flags["to_fetch"]:
fetch_tests(fileNames, contestName)
elif arg.to_fetch:
contestName = os.path.basename(os.getcwd())
fileNames = get_filename(contestName)
fetch_tests(fileNames, contestName)
elif arg.Reg:
codemonReg()
else:
showHelp()
| StarcoderdataPython |
4924315 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='dashboard-home'),
path('matchmaking/', views.matchmaking, name='matchmaking'),
path('matchmaking/lobby', views.lobby, name='lobby'),
path('matchmaking/lobby/game', views.game, name='game'),
path('profile/', views.profile, name='profile'),
path('settings/', views.settings, name='settings'),
path('campaign/', views.campaign, name='campaign'),
path('customgames/', views.customgames, name='customgames'),
path('careers/', views.careers, name='careers'),
path('leaderboard/', views.leaderboard, name='leaderboard'),
]
| StarcoderdataPython |
5008394 | <filename>refbox/ui_test.py<gh_stars>1-10
import tkinter as tk
from . import ui
from uwh.gamemanager import GameManager, TeamColor, Penalty
from .noiomanager import IOManager
import itertools
def test_refbox_config_parser():
cfg = ui.RefboxConfigParser()
assert type(cfg.getint('game', 'half_play_duration')) == int
assert type(cfg.getint('game', 'half_time_duration')) == int
def test_sized_frame():
assert ui.sized_frame(None, 1, 2)
def test_score_column():
root = ui.sized_frame(None, 1, 2)
assert ui.ScoreColumn(root, 2, 'black', 'blue', 5, lambda: 42, lambda: 43,
lambda: 44, ui.RefboxConfigParser())
def test_normal_view():
nv = ui.NormalView(GameManager(), IOManager(), NO_TITLE_BAR=True)
assert nv.mgr.gameClockRunning() is False
assert nv.mgr.gameClock() > 0
nv.gong_clicked("test_normal_view()")
assert nv.mgr.gameClockRunning() is True
def test_game_over():
nv = ui.NormalView(GameManager(), IOManager(), NO_TITLE_BAR=False)
nv.mgr.setGameStateSecondHalf()
nv.mgr.setGameClock(0)
nv.mgr.setGameClockRunning(True)
nv.refresh_time()
assert nv.mgr.gameStateGameOver() is True
assert nv.mgr.gameClockRunning() is False
def test_edit_score():
nv = ui.NormalView(GameManager(), IOManager(), NO_TITLE_BAR=True)
nv.edit_white_score()
nv.edit_black_score()
def test_inc_score():
nv = ui.NormalView(GameManager(), IOManager(), NO_TITLE_BAR=True)
nv.increment_white_score()
nv.increment_black_score()
def test_edit_time():
nv = ui.NormalView(GameManager(), IOManager(), NO_TITLE_BAR=True)
nv.mgr.setGameClock(2)
nv.edit_time()
assert nv.mgr.gameClock() == 2
def test_PlayerSelectNumpad():
root = ui.sized_frame(None, 1, 2)
psn = ui.PlayerSelectNumpad(root, '')
assert psn.get_value() == ''
psn.clicked('1')
psn.clicked('3')
assert psn.get_value() == '13'
psn.clicked('del')
assert psn.get_value() == '1'
psn.clicked('del')
assert psn.get_value() == ''
psn.clicked('del')
assert psn.get_value() == ''
psn.clicked('4')
psn.clicked('2')
assert psn.get_value() == '42'
def test_add_penalty():
nv = ui.NormalView(GameManager(), IOManager(), NO_TITLE_BAR=True)
nv.mgr.setGameClock(2)
nv.add_penalty(TeamColor.black)
def test_PenaltyEditor_submit():
def on_submit(player, duration):
assert player == '42'
assert duration == 5 * 60
editor.submit_was_clicked = True
def on_delete():
editor.delete_was_clicked = True
mgr = GameManager()
cfg = ui.RefboxConfigParser()
root = tk.Tk()
editor = ui.PenaltyEditor(root, 0, mgr, cfg, TeamColor.black, on_delete, on_submit, None)
editor.submit_was_clicked = False
editor.delete_was_clicked = False
editor._numpad.clicked('4')
editor._numpad.clicked('2')
editor.time_select(2 * 60)
editor.time_select(1 * 60)
editor.time_select(-1)
editor.time_select(5 * 60)
editor.submit_clicked()
assert editor.submit_was_clicked == True
assert editor.delete_was_clicked == False
def test_PenaltyEditor_delete():
penalty = Penalty(37, TeamColor.black, 3 * 60)
def on_submit(player, duration):
editor.submit_was_clicked = True
def on_delete(penalty):
assert penalty.duration() == 3 * 60
assert penalty.player() == 37
editor.delete_was_clicked = True
mgr = GameManager()
cfg = ui.RefboxConfigParser()
root = tk.Tk()
editor = ui.PenaltyEditor(root, 0, mgr, cfg, TeamColor.white, on_delete, on_submit, penalty)
editor.submit_was_clicked = False
editor.delete_was_clicked = False
editor._numpad.clicked('4')
editor._numpad.clicked('2')
editor.time_select(5 * 60)
editor.delete_clicked()
assert editor.submit_was_clicked == False
assert editor.delete_was_clicked == True
def test_PenaltyEditor_cancel():
mgr = GameManager()
cfg = ui.RefboxConfigParser()
root = tk.Tk()
editor = ui.PenaltyEditor(root, 0, mgr, cfg, TeamColor.white, lambda: None, lambda: None)
editor.cancel_clicked()
def test_TimeEditor():
def on_submit(new_time):
assert new_time == 59
editor.submit_was_clicked = True
def on_cancel():
editor.cancel_was_clicked = True
cfg = ui.RefboxConfigParser()
root = tk.Tk()
editor = ui.TimeEditor(root, 0, 5 * 60 + 2, on_submit,
on_cancel, cfg)
editor.submit_was_clicked = False
editor.cancel_was_clicked = False
editor.game_clock_m_dn()
assert editor.clock_at_pause_var.get() == 4 * 60 + 2
editor.game_clock_s_dn()
assert editor.clock_at_pause_var.get() == 4 * 60 + 1
editor.game_clock_m_dn()
assert editor.clock_at_pause_var.get() == 3 * 60 + 1
editor.game_clock_s_dn()
assert editor.clock_at_pause_var.get() == 3 * 60 + 0
editor.game_clock_s_dn()
assert editor.clock_at_pause_var.get() == 2 * 60 + 59
editor.game_clock_m_dn()
assert editor.clock_at_pause_var.get() == 1 * 60 + 59
editor.game_clock_m_dn()
assert editor.clock_at_pause_var.get() == 59
editor.game_clock_m_dn()
assert editor.clock_at_pause_var.get() == 0
editor.game_clock_s_dn()
assert editor.clock_at_pause_var.get() == 0
editor.game_clock_m_up()
assert editor.clock_at_pause_var.get() == 60
editor.game_clock_s_up()
assert editor.clock_at_pause_var.get() == 61
editor.game_clock_s_dn()
editor.game_clock_s_dn()
assert editor.clock_at_pause_var.get() == 59
editor.submit_clicked()
assert editor.submit_was_clicked == True
assert editor.cancel_was_clicked == False
editor = ui.TimeEditor(root, 0, 5 * 60 + 2, on_submit,
on_cancel, cfg)
editor.submit_was_clicked = False
editor.cancel_was_clicked = False
editor.cancel_clicked()
assert editor.submit_was_clicked == False
assert editor.cancel_was_clicked == True
def test_ScoreEditor():
def on_submit(new_score):
assert new_score == 99
editor.submit_was_clicked = True
cfg = ui.RefboxConfigParser()
root = tk.Tk()
editor = ui.ScoreEditor(root, 0, 1, True, on_submit, cfg)
editor.submit_was_clicked = False
editor.dn()
editor.dn()
editor.dn()
assert editor.score_var.get() == 0
editor.up()
assert editor.score_var.get() == 1
for _ in itertools.repeat(None, 110):
editor.up()
assert editor.score_var.get() == 99
editor.submit_clicked()
assert editor.submit_was_clicked == True
editor = ui.ScoreEditor(root, 0, 42, True, on_submit, cfg)
editor.submit_was_clicked = False
editor.cancel_clicked()
assert editor.submit_was_clicked == False
def test_ScoreIncrementer():
def on_submit(new_score):
assert new_score == 43
incrementer.submit_was_clicked = True
cfg = ui.RefboxConfigParser()
root = tk.Tk()
incrementer = ui.ScoreIncrementer(root, 0, 42, True, on_submit, cfg)
incrementer.submit_was_clicked = False
incrementer.yes_clicked()
assert incrementer.submit_was_clicked == True
incrementer = ui.ScoreIncrementer(root, 0, 42, True, on_submit, cfg)
incrementer.submit_was_clicked = False
incrementer.no_clicked()
assert incrementer.submit_was_clicked == False
def test_PenaltiesColumn():
root = ui.sized_frame(None, 1, 2)
cfg = ui.RefboxConfigParser()
mgr = GameManager()
penalty = Penalty(37, TeamColor.white, 3 * 60)
mgr.addPenalty(penalty)
penalty = Penalty(38, TeamColor.black, 5 * 60)
mgr.addPenalty(penalty)
penalty = Penalty(38, TeamColor.black, -1)
mgr.addPenalty(penalty)
penalty = Penalty(38, TeamColor.black, 5 * 60, 10 * 60)
mgr.addPenalty(penalty)
mgr.setGameClock(0)
def edit_penalty(idx):
assert idx == 1
pc.edit_was_clicked = True
def add_penalty():
pc.add_was_clicked = True
pc = ui.PenaltiesColumn(root, 0, TeamColor.black, 50, mgr, edit_penalty, add_penalty, cfg)
pc.add_was_clicked = False
pc.edit_was_clicked = False
pc.add_clicked()
assert pc.add_was_clicked == True
assert pc.edit_was_clicked == False
| StarcoderdataPython |
1670140 | from django.urls import path, include
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from . import views
router = routers.DefaultRouter()
router.register('categories', views.CategoryView)
router.register('posts', views.PostView, base_name='post')
urlpatterns = [
path('token-auth/', obtain_auth_token, name='token_auth'),
# POST /api/v1/token-auth/ HTTP/1.1
# {
# "username": "admin",
# "password": "<PASSWORD>"
# }
path('comments/', views.CommentListView.as_view(), name='comment-list'),
path('comments/<int:pk>/', views.CommentDetailView.as_view(), name='comment-detail'),
path('', include(router.urls)),
] | StarcoderdataPython |
256673 | import numpy as np
import torch
import torch.nn as nn
import pickle
from torch.utils.data import DataLoader
from vanilla_autoencoder import VanillaAE
from dataprep import AutoEncoderDataset
if __name__ == "__main__":
#Setup DEVICE
seed = 99
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(seed)
print(device)
#Setup the model
INPUT_DIM = 200 #must be bigger than 1
model = VanillaAE(INPUT_DIM)
#loss_function = nn.BCELoss(reduce=False)
loss_function = nn.MSELoss()
print("Model initialised.")
# Some information about the model
# print(torch.cuda.memory_cached(device=device))
#print(torch.cuda.memory_allocated(device=device))
num_parameters = 0
for parameter in model.parameters():
num_parameters += parameter.view(-1).size()[0]
print("Number of parameters: %i" % num_parameters)
print(model)
# Load the data and split into sets
with open("ae_dataset.pkl", "rb") as f:
data = pickle.load(f) #load the autoencoderdatasets
#Split the dataset into train and test
#training dataset size, testing dataset size
lengths = [int(np.floor(0.99*len(data))), int(np.ceil(0.01*len(data)))]
train_data, test_data = torch.utils.data.random_split(data, lengths)
print("Training data length %i, test data lenght %i" % (lengths[0], lengths[1]))
training_data = DataLoader(train_data, batch_size=100, shuffle=True, drop_last=True)
testing_data = DataLoader(test_data, batch_size=1, shuffle=True, drop_last=True)
print("Training data loaded.")
#Setup the optimizer
torch.manual_seed(seed)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, betas=(0.9, 0.999))
#Begin the training
model.train()
for epoch in range(25):
for i, (labels, timeseries) in enumerate(training_data):
#print(sum(np.isnan(labels)))
#print(labels, timeseries)
#print(labels, timeseries)
#print(labels.size(), timeseries.size())
model.zero_grad()
prediction = model(timeseries)
prediction = prediction.view_as(labels)
loss = loss_function(prediction, labels)
loss.backward()
optimizer.step()
print("Epoch %i, batch %i, loss: %f" % (epoch, i, loss))
#if epoch % 10 == 0.0:
# torch.save(model, "model_%i.pkt" % epoch)
# torch.save(h, "hidden_%i.pkt" % epoch)
# print("Model saved!")
#Show the test results - this needs to be moved from here
#import matplotlib; matplotlib.use("qt4cairo")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
print('here')
with PdfPages("vanilla_autoencoder.pdf") as pdf:
for j, (test_label, test_timeseries) in enumerate(testing_data):
print(j)
if j < 500:
plt.figure(figsize=(8, 6))
plt.scatter(range(0, 200), test_timeseries, label="Input")
plt.scatter(range(0, 200), model(test_timeseries).detach().numpy(), color="r", label="AE output")
plt.xlabel('Data point index')
plt.ylabel('Normalised flux')
plt.legend()
plt.grid('on')
plt.title('Test example #%i' % j)
plt.tight_layout()
pdf.savefig()
plt.close('all')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.