id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
4818183 | <filename>common/utils/mathematical_conversation_utils.py
def convert_price_to_us(price: str) -> float:
us_number_convention = {",": ".", ".": ","}
trans_table = price.maketrans(us_number_convention)
price = price.translate(trans_table).replace(",", "")
return float(price)
| StarcoderdataPython |
87880 | #!/usr/bin/env python3
# prerequisite: xsel
import subprocess
import time
class Selection:
def __init__(self, intvl):
self.content = ""
self.intvl = intvl
def get_str(self):
while True:
cur = subprocess.check_output(["xsel"])
if cur == self.content:
time.sleep(self.intvl)
continue
self.content = cur
return cur.decode()
| StarcoderdataPython |
3321166 | """Test the json file with keys and values describing the data file."""
import os.path as op
import json
import sp_experiment
init_dir = op.dirname(sp_experiment.__file__)
fname = 'task-sp_events.json'
fpath = op.join(init_dir, fname)
def test_json():
"""Test json file."""
with open(fpath, 'r') as f:
try:
assert json.load(f)
except ValueError as e:
print('invalid json: %s' % e)
raise
| StarcoderdataPython |
4818699 | # Generated by Django 3.0.7 on 2020-06-30 17:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20200630_1728'),
]
operations = [
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 30, 20, 30, 58, 322232)),
),
]
| StarcoderdataPython |
146682 | # Copyright 2013-2017 Ent. Services Development Corporation LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This file may incorporate work covered under the following copyright
# and permission notice:
#
# Copyright (c) 2012, <NAME>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The names of the contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Base classes for all tests
See http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/testing.html
"""
import collections
import unittest
from pyramid import testing
from webob.multidict import MultiDict
from wtforms import Field
from wtforms.validators import DataRequired, InputRequired, Length, Email, Optional, NumberRange
from eucaconsole.routes import urls
from eucaconsole.caches import short_term
from eucaconsole.caches import default_term
from eucaconsole.caches import long_term
from eucaconsole.caches import extra_long_term
class Mock(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class BaseViewTestCase(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
for route in urls:
self.config.add_route(route.name, route.pattern)
def tearDown(self):
testing.tearDown()
def create_request(self, path='/', is_xhr=False, matchdict=None, params=None, session=None):
request = testing.DummyRequest(path=path)
request.id = 'test_request_id'
request.is_xhr = is_xhr
request.matchdict = matchdict or {}
request.params = MultiDict(csrf_token=request.session.get_csrf_token())
if params:
request.params.update(params)
if session:
request.session.update(session)
return request
class BaseTestCase(unittest.TestCase):
"""Use this as a base when you need to run test with no routes automatically configured.
Note: You probably want to use BaseViewTestCase instead."""
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
class BaseFormTestCase(unittest.TestCase):
"""Base form class, modified from wtforms-test to better work with CSRF forms.
See https://github.com/kvesteri/wtforms-test/blob/master/wtforms_test/__init__.py
"""
form_class = None
request = None
csrf_enabled = True
memory_cache = 'dogpile.cache.pylibmc'
memory_cache_url = '127.0.0.1:11211'
username = None
password = <PASSWORD>
short_term.configure(
memory_cache,
expiration_time=60,
arguments={
'url': [memory_cache_url],
'binary': True,
'min_compress_len': 1024,
'behaviors': {"tcp_nodelay": True,"ketama":True},
'username': username,
'password': password
},
)
default_term.configure(
memory_cache,
expiration_time=300,
arguments= {
'url': [memory_cache_url],
'binary': True,
'min_compress_len': 1024,
'behaviors': {"tcp_nodelay": True, "ketama": True},
'username': username,
'password': password
},
)
long_term.configure(
memory_cache,
expiration_time=3600,
arguments={
'url': [memory_cache_url],
'binary': True,
'min_compress_len': 1024,
'behaviors': {"tcp_nodelay": True, "ketama": True},
'username': username,
'password': password
},
)
extra_long_term.configure(
memory_cache,
expiration_time=43200,
arguments={
'url': [memory_cache_url],
'binary': True,
'min_compress_len': 1024,
'behaviors': {"tcp_nodelay": True, "ketama": True},
'username': username,
'password': password
},
)
def setUp(self):
self.config = testing.setUp()
def _make_form(self, csrf_enabled=False, *args, **kwargs):
return self.form_class(request=self.request, csrf_enabled=self.csrf_enabled, *args, **kwargs)
def _get_field(self, field_name):
form = self._make_form()
return getattr(form, field_name)
def _get_validator(self, field, validator_class):
for validator in field.validators:
if isinstance(validator, validator_class):
return validator
def get_validator(self, field_name, validator_class):
return self._get_validator(
self._get_field(field_name),
validator_class
)
def has_field(self, field_name):
form = self._make_form()
return hasattr(form, field_name)
def assert_type(self, field_name, field_type):
self.assert_has(field_name)
assert self._get_field(field_name).__class__ is field_type
def assert_has(self, field_name):
try:
field = self._get_field(field_name)
except AttributeError:
field = None
msg = "Form does not have a field called '%s'." % field_name
assert isinstance(field, Field), msg
def assert_min(self, field_name, min_value):
field = self._get_field(field_name)
found = False
for validator in field.validators:
# we might have multiple NumberRange validators
if isinstance(validator, NumberRange):
if validator.min == min_value:
found = True
assert found, "Field does not have min value of %d" % min_value
def assert_max(self, field_name, max_value):
field = self._get_field(field_name)
found = False
for validator in field.validators:
# we might have multiple NumberRange validators
if isinstance(validator, NumberRange):
if validator.max == max_value:
found = True
assert found, "Field does not have max value of %d" % max_value
def assert_min_length(self, field_name, min_length):
field = self._get_field(field_name)
found = False
for validator in field.validators:
# we might have multiple Length validators
if isinstance(validator, Length):
if validator.min == min_length:
found = True
assert found, "Field does not have min length of %d" % min_length
def assert_max_length(self, field_name, max_length):
field = self._get_field(field_name)
found = False
for validator in field.validators:
# we might have multiple Length validators
if isinstance(validator, Length):
if validator.max == max_length:
found = True
assert found, "Field does not have max length of %d" % max_length
def assert_description(self, field_name, description):
field = self._get_field(field_name)
assert field.description == description
def assert_default(self, field_name, default):
field = self._get_field(field_name)
assert field.default == default
def assert_label(self, field_name, label):
field = self._get_field(field_name)
assert field.label.text == label
def assert_has_validator(self, field_name, validator):
field = self._get_field(field_name)
msg = "Field '%s' does not have validator %r." % (
field_name, validator
)
assert self._get_validator(field, validator), msg
def assert_not_optional(self, field_name):
field = self._get_field(field_name)
msg = "Field '%s' is optional." % field_name
assert not self._get_validator(field, DataRequired), msg
def assert_optional(self, field_name):
field = self._get_field(field_name)
msg = "Field '%s' is not optional." % field_name
assert self._get_validator(field, Optional), msg
def assert_choices(self, field_name, choices):
field = self._get_field(field_name)
assert field.choices == choices
def assert_choice_values(self, field_name, choices):
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
field = self._get_field(field_name)
assert compare(field.choices, choices)
def assert_not_required(self, field_name):
field = self._get_field(field_name)
msg = "Field '%s' is required." % field_name
valid = self._get_validator(field, InputRequired) or self._get_validator(field, DataRequired)
assert not valid, msg
def assert_required(self, field_name):
field = self._get_field(field_name)
msg = "Field '%s' is not required." % field_name
required = self._get_validator(field, InputRequired) or self._get_validator(field, DataRequired)
assert required, msg
def assert_email(self, field_name):
field = self._get_field(field_name)
msg = (
"Field '%s' is not required to be a valid email address." %
field_name
)
assert self._get_validator(field, Email), msg
| StarcoderdataPython |
1698040 | """
Zipf's law
This program fits data ranked along certain dimension (e.g. city population
and word appearance) to Zipfian distribution. The probability mass function
for zipf is: pmf(x, a) = 1/(zeta(a) * x**a), for x >= 1 and a > 1.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.zipf.html
It's clear that fitting data to zipf is essentially find a.
HOWEVER, the above function fails to characterize zipf if a <= 1. Therefore,
we resort to more original maths expression:
f(x) = (1/x**a) / sum_1^N (1/x**a), where N is the number of elements.
https://en.wikipedia.org/wiki/Zipf%27s_law
The right most part: sum_1^N (1/x**a) ~ (N**(1-a)-1) / (1-a)
https://en.wikipedia.org/wiki/Euler%E2%80%93Maclaurin_formula
This step significantly reduce computational complexity.
"""
import os.path
import numpy as np
import matplotlib.pyplot as plt
from scipy import special
import seaborn as sns
# import data
source_data_name = 'example_zipf.csv'
cwd = os.getcwd()
data_file = cwd + os.sep + source_data_name
data_original = np.genfromtxt(data_file, delimiter=',')
data_whole = data_original[~np.isnan(data_original)]
data_unique = np.trim_zeros(np.unique(data_whole))
# remove duplicates and rank the frequencies (sort them in descending order)
frequency = np.sort(data_unique)[::-1]
# truncate data if only part of the data is interested
frequency = frequency[0:1000]
rank = np.arange(1, len(frequency)+1)
pmf = frequency / sum(frequency)
# Zipf pmf(or normalized frequency) fitting with rank and frequency
# Maths: f(x) = 1 / (c * x**a) => log(f(x)) = - log(c) - a*log(x)
# Use numpy.polyfit (or scipy.polyfit) to find a and then we get f(x) easily
x = np.log(rank)
y = np.log(frequency / sum(frequency))
p = np.polyfit(x, y, 1)
a = -p[0]
if a > 1:
c1 = special.zeta(a)
c2 = rank ** a
pmf_z = 1 / (special.zeta(a) * rank ** a), a
else:
n = len(frequency)
pmf_z = (1-a) / ((n**(1-a) - 1) * rank ** a)
a = round(a, 3) # keep the three two decimal
# plot fitting result
log_plot = 1 # 0 - normal plot, 1 - log plot
format_on = 1 # 0 off, 1 on
sns.set()
if log_plot == 1:
plt.loglog(rank, pmf, 'o')
plt.loglog(rank, pmf_z, 'red', linewidth=2)
else:
plt.plot(rank, pmf, 'o')
plt.plot(rank, pmf_z, 'red', linewidth=2)
if format_on == 1:
plt.xlabel('Ranking of videos in terms of number of shares')
plt.ylabel('PMF')
lbs = ['Orignal data', 'Zipf distribution ($\\alpha$={})'.format(a)]
plt.legend(lbs, loc='upper right', bbox_to_anchor=(1, 1), frameon=False)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
19918 | import const
def corpora2idx(sents, ind2idx):
return [[ind2idx[w] if w in ind2idx else const.UNK for w in s] for s in sents]
| StarcoderdataPython |
3343293 | <gh_stars>0
import logging
import requests
from bs4 import BeautifulSoup
from collections import Counter
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python Website Parser function processed a request.')
url = req.params.get('url')
logging.info(f"Received request for target URL {url}")
if url:
resultText = ""
try:
website = requests.get(url)
except requests.exceptions.RequestException as e:
logging.exception(f"Exception during external get request: {e}")
return func.HttpResponse(
"Could not call external URL",
status_code=500
)
soup = BeautifulSoup(website.content, 'html.parser')
h1 = [''.join(s.findAll(text=True))for s in soup.findAll('h1')]
if h1:
# We simply assume there might be one h1 heading - and just take the first one (there shouldn't be really more than one h1)
resultText += h1[0] + '\n\n'
# Text is assumed to be in <p> tags, so we grab all of those
text = [''.join(s.findAll(text=True))for s in soup.findAll('p')]
for item in text:
resultText += item
return func.HttpResponse(f"{resultText}")
else:
return func.HttpResponse(
"Please pass a url on the query string",
status_code=400
)
| StarcoderdataPython |
3246934 | """
CCT 建模优化代码
束线
作者:赵润晓
日期:2021年5月1日
"""
import multiprocessing # since v0.1.1 多线程计算
import time # since v0.1.1 统计计算时长
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import matplotlib.pyplot as plt
import math
import random # since v0.1.1 随机数
import sys
import os # since v0.1.1 查看CPU核心数
import numpy
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import warnings # since v0.1.1 提醒方法过时
from packages.point import *
from packages.constants import *
from packages.base_utils import BaseUtils
from packages.local_coordinate_system import LocalCoordinateSystem
from packages.line2s import *
from packages.trajectory import Trajectory
from packages.particles import *
from packages.magnets import *
from packages.cct import CCT
class Beamline(Line2, Magnet, ApertureObject):
def __init__(self, trajectory: Trajectory) -> None:
"""
不要直接调用构造器
请使用 set_start_point
"""
self.magnets: List[Magnet] = []
self.trajectory: Trajectory = trajectory
# 2021年3月18日 新增,表示元件。List 中每个元素表示一个元件
# 元件由三部分组成,位置、元件自身、长度
# 其中位置表示沿着 Beamline 的长度
# 元件自身,使用 None 表示漂移段。
self.elements: List[Tuple[float, Magnet, float]] = []
def magnetic_field_at(self, point: P3) -> P3:
"""
返回 Beamline 在全局坐标系点 P3 处产生的磁场
"""
b: P3 = P3.zeros()
for m in self.magnets:
b += m.magnetic_field_at(point)
return b
# from Magnet
def magnetic_field_along(
self,
line2: Optional[Line2] = None,
p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),
step: float = 1 * MM,
) -> List[ValueWithDistance[P3]]:
"""
计算本对象在二维曲线 line2 上的磁场分布(line2 为 None 时,默认为 self.trajectory)
p2_t0_p3 是一个函数,用于把 line2 上的二维点转为三维,默认转为 z=0 的三维点
step 表示 line2 分段长度
-------
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).magnetic_field_along(
line2=line2, p2_t0_p3=p2_t0_p3, step=step
)
def magnetic_field_bz_along(
self,
line2: Optional[Line2] = None,
p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),
step: float = 1 * MM,
) -> List[P2]:
"""
计算本对象在二维曲线 line (line2 为 None 时,默认为 self.trajectory)上的磁场 Z 方向分量的分布
因为磁铁一般放置在 XY 平面,所以 Bz 一般可以看作自然坐标系下 By,也就是二级场大小
p2_t0_p3 是一个函数,用于把 line2 上的二维点转为三维,默认转为 z=0 的三维点
step 表示 line2 分段长度
返回 P2 的数组,P2 中 x 表示曲线 line2 上距离 s,y 表示前述距离对应的点的磁场 bz
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).magnetic_field_bz_along(
line2=line2, p2_t0_p3=p2_t0_p3, step=step
)
def graident_field_along(
self,
line2: Optional[Line2] = None,
good_field_area_width: float = 10 * MM,
step: float = 1 * MM,
point_number: int = 4,
) -> List[P2]:
"""
计算本对象在二维曲线 line2 (line2 为 None 时,默认为 self.trajectory)上的磁场梯度的分布
每一点的梯度,采用这点水平垂线上 Bz 的多项式拟合得到
good_field_area_width:水平垂线的长度,注意应小于等于好场区范围
step:line2 上取点间距
point_number:水平垂线上取点数目,越多则拟合越精确
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).graident_field_along(
line2=line2, good_field_area_width=good_field_area_width, step=step, point_number=point_number
)
def second_graident_field_along(
self,
line2: Optional[Line2] = None,
good_field_area_width: float = 10 * MM,
step: float = 1 * MM,
point_number: int = 4,
) -> List[P2]:
"""
计算本对象在二维曲线 line2 (line2 为 None 时,默认为 self.trajectory)上的磁场二阶梯度的分布(六极场)
每一点的梯度,采用这点水平垂线上 Bz 的多项式拟合得到
good_field_area_width:水平垂线的长度,注意应小于等于好场区范围
step:line2 上取点间距
point_number:水平垂线上取点数目,越多则拟合越精确
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).second_graident_field_along(
line2=line2, good_field_area_width=good_field_area_width, step=step, point_number=point_number
)
def track_ideal_particle(
self,
kinetic_MeV: float,
s: float = 0.0,
length: Optional[float] = None,
footstep: float = 5 * MM,
) -> List[P3]:
"""
束流跟踪,运行一个理想粒子,返回轨迹
kinetic_MeV 粒子动能,单位 MeV
s 起点位置
length 粒子运行长度,默认运动到束线尾部
footstep 粒子运动步长
"""
if length is None:
length = self.trajectory.get_length() - s
ip = ParticleFactory.create_proton_along(
self.trajectory, s, kinetic_MeV)
return ParticleRunner.run_get_trajectory(ip, self, length, footstep)
def track_phase_space_particle(
self,
x_mm: float,
xp_mrad: float,
y_mm: float,
yp_mrad,
delta: float,
kinetic_MeV: float,
s: float = 0.0,
length: Optional[float] = None,
footstep: float = 10 * MM,
) -> List[ValueWithDistance[PhaseSpaceParticle]]:
"""
运行一个相空间粒子
x_mm 相空间坐标 x,单位 mm
xp_mrad 相空间坐标 xp,单位 mrad
y_mm 相空间坐标 y,单位 mm
yp_mrad 相空间坐标 yp,单位 mrad
delta 动量分散
kinetic_MeV 正则动能,单位 MeV
s 在束线上的起点,默认 0.0
length 运动长度,如果为空则运行到束线尾
footstep 运动步长,默认 10*MM
返回值是一个 List[ValueWithDistance[PhaseSpaceParticle]]
即一个数组,数组元素是 ValueWithDistance
即对应运动位置的粒子的相空间坐标信息
"""
if length is None:
length = self.trajectory.get_length() - s
pp = PhaseSpaceParticle(
x=x_mm * MM,
xp=xp_mrad * MM,
y=y_mm * MM,
yp=yp_mrad * MM,
z=0.0,
delta=delta
)
# ip, distence = 0.0
ip = ParticleFactory.create_proton_along(
self.trajectory, s, kinetic_MeV)
# to rp, distence = 0.0
rp = ParticleFactory.create_from_phase_space_particle(
ideal_particle=ip,
coordinate_system=ip.get_natural_coordinate_system(),
phase_space_particle=pp
)
# run all info, distence from 0.0
all_info = ParticleRunner.run_get_all_info(
p=rp,
m=self,
length=length,
footstep=footstep
)
# for cp
ret: List[ValueWithDistance[PhaseSpaceParticle]] = []
for cp in all_info:
d = cp.distance # , distence from 0.0
cip = ParticleFactory.create_proton_along(
self.trajectory, d + s, kinetic_MeV) # 所以这里是 d + s
cpp = PhaseSpaceParticle.create_from_running_particle(
ideal_particle=cip,
coordinate_system=cip.get_natural_coordinate_system(),
running_particle=cp
)
ret.append(ValueWithDistance(
value=cpp, distance=d
))
return ret
def track_phase_ellipse(
self,
x_sigma_mm: float,
xp_sigma_mrad: float,
y_sigma_mm: float,
yp_sigma_mrad,
delta: float,
particle_number: int,
kinetic_MeV: float,
s: float = 0.0,
length: Optional[float] = None,
footstep: float = 10 * MM,
concurrency_level: int = 1,
report: bool = True
) -> Tuple[List[P2], List[P2]]:
"""
束流跟踪,运行两个相椭圆边界上的粒子,
返回一个长度 2 的元组,表示相空间 x-xp 平面和 y-yp 平面上粒子投影(单位 mm / mrad)
两个相椭圆,一个位于 xxp 平面,参数为 σx 和 σxp ,动量分散为 delta
另一个位于 xxp 平面,参数为 σx 和 σxp ,动量分散为 delta
x_sigma_mm σx 单位 mm
xp_sigma_mrad σxp 单位 mrad
y_sigma_mm σy 单位 mm
yp_sigma_mrad σyp 单位 mrad
delta 动量分散 单位 1
particle_number 粒子数目
kinetic_MeV 动能 单位 MeV
s 起点位置
length 粒子运行长度,默认运行到束线尾部
footstep 粒子运动步长
concurrency_level 并发等级(使用多少个核心进行粒子跟踪)
report 是否打印并行任务计划
"""
if length is None:
length = self.trajectory.get_length() - s
ip_start = ParticleFactory.create_proton_along(
self.trajectory, s, kinetic_MeV)
ip_end = ParticleFactory.create_proton_along(
self.trajectory, s + length, kinetic_MeV
)
pp_x = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=x_sigma_mm * MM,
xpMax=xp_sigma_mrad * MRAD,
delta=delta,
number=particle_number,
)
pp_y = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(
yMax=y_sigma_mm * MM,
ypMax=yp_sigma_mrad * MRAD,
delta=delta,
number=particle_number,
)
rp_x = ParticleFactory.create_from_phase_space_particles(
ideal_particle=ip_start,
coordinate_system=ip_start.get_natural_coordinate_system(),
phase_space_particles=pp_x,
)
rp_y = ParticleFactory.create_from_phase_space_particles(
ideal_particle=ip_start,
coordinate_system=ip_start.get_natural_coordinate_system(),
phase_space_particles=pp_y,
)
# run
# refactor v0.1.1 合并计算
ParticleRunner.run_only(
p=rp_x + rp_y, m=self, length=length, footstep=footstep, concurrency_level=concurrency_level,
report=report
)
pp_x_end = PhaseSpaceParticle.create_from_running_particles(
ideal_particle=ip_end,
coordinate_system=ip_end.get_natural_coordinate_system(),
running_particles=rp_x,
)
pp_y_end = PhaseSpaceParticle.create_from_running_particles(
ideal_particle=ip_end,
coordinate_system=ip_end.get_natural_coordinate_system(),
running_particles=rp_y,
)
xs = [pp.project_to_xxp_plane() / MM for pp in pp_x_end]
ys = [pp.project_to_yyp_plane() / MM for pp in pp_y_end]
s: BaseUtils.Statistic = BaseUtils.Statistic()
print(
f"delta={delta}," +
f"avg_size_x={s.clear().add_all(P2.extract(xs)[0]).half_width()}mm," +
f"avg_size_y={s.clear().add_all(P2.extract(ys)[0]).half_width()}mm"
)
return (xs, ys)
# from ApertureObject
def is_out_of_aperture(self, point: P3) -> bool:
"""
判断点 point 是否超出 Beamline 的任意一个元件的孔径
只有当粒子轴向投影在元件内部时,才会进行判断,
否则即时粒子距离轴线很远,也认为粒子没有超出孔径,
这是因为粒子不在元件内时,很可能处于另一个大孔径元件中,这样会造成误判。
注意:这个函数的效率极低!
"""
for m in self.magnets:
if isinstance(m, ApertureObject) and m.is_out_of_aperture(point):
print(f"beamline在{m}位置超出孔径")
return True
return False
def trace_is_out_of_aperture(
self, trace_with_distance: List[ValueWithDistance[P3]]
) -> bool:
"""
判断一条粒子轨迹是否超出孔径
注意:这个函数的效率极低!
"""
for pd in trace_with_distance:
if self.is_out_of_aperture(pd.value):
return True
return False
def get_length(self) -> float:
"""
获得 Beamline 的长度
"""
return self.trajectory.get_length()
def point_at(self, s: float) -> P2:
"""
获得 Beamline s 位置处的点 (x,y)
-------
"""
return self.trajectory.point_at(s)
def direct_at(self, s: float) -> P2:
"""
获得 Beamline s 位置处的方向
"""
return self.trajectory.direct_at(s)
class __BeamlineBuilder:
"""
构建 Beamline 的中间产物
"""
def __init__(self, start_point: P2) -> None:
self.start_point = start_point
def first_drift(self, direct: P2 = P2.x_direct(), length: float = 1.0) -> "Beamline":
"""
为 Beamline 添加第一个 drift
正如 Trajectory 的第一个曲线段必须是是直线一样
Beamline 中第一个元件必须是 drift
"""
bl = Beamline(
Trajectory.set_start_point(self.start_point).first_line(
direct=direct, length=length
)
)
bl.elements.append((0, None, length))
return bl
@staticmethod
# -> "Beamline.__BeamlineBuilder"
def set_start_point(start_point: P2 = P2.origin()):
"""
设置束线起点
"""
return Beamline.__BeamlineBuilder(start_point)
def append_drift(self, length: float) -> "Beamline":
"""
尾加漂移段
length 漂移段长度
"""
old_len = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
self.elements.append((old_len, None, length))
return self
def append_straight_dipole_magnet(
self,
magnetic_field: float,
length: float,
aperture_radius: float,
# field_direct: P2 = P2.y_direct()
) -> "Beamline":
"""
尾加直线二极铁
"""
old_length = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
lum = LocalUniformMagnet.create_local_uniform_magnet_along(
trajectory=self.trajectory,
s=old_length,
length=length,
magnetic_field=magnetic_field,
aperture_radius=aperture_radius,
)
self.magnets.append(lum)
self.elements.append((old_length, lum, length))
return self
def append_qs(
self,
length: float,
gradient: float,
second_gradient: float,
aperture_radius: float,
) -> "Beamline":
"""
尾加 QS 磁铁
length: float QS 磁铁长度
gradient: float 梯度 T/m
second_gradient: float 二阶梯度(六极场) T/m^2
aperture_radius: float 半孔径 单位 m
"""
old_length = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
qs = QS.create_qs_along(
trajectory=self.trajectory,
s=old_length,
length=length,
gradient=gradient,
second_gradient=second_gradient,
aperture_radius=aperture_radius,
)
self.magnets.append(qs)
self.elements.append((old_length, qs, length))
return self
def append_q(
self,
length: float,
gradient: float,
aperture_radius: float,
) -> "Beamline":
"""
尾加 Q 磁铁
length: float QS 磁铁长度
gradient: float 梯度 T/m
aperture_radius: float 半孔径 单位 m
"""
old_length = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
q = Q.create_q_along(
trajectory=self.trajectory,
s=old_length,
length=length,
gradient=gradient,
aperture_radius=aperture_radius,
)
self.magnets.append(q)
self.elements.append((old_length, q, length))
return self
def append_dipole_cct(
self,
big_r: float,
small_r_inner: float,
small_r_outer: float,
bending_angle: float,
tilt_angles: List[float],
winding_number: int,
current: float,
disperse_number_per_winding: int = 120,
) -> "Beamline":
"""
尾加二极CCT
big_r: float 偏转半径
small_r_inner: float 内层半孔径
small_r_outer: float 外层半孔径
bending_angle: float 偏转角度(正数表示逆时针、负数表示顺时针)
tilt_angles: List[float] 各极倾斜角
winding_number: int 匝数
current: float 电流
disperse_number_per_winding: int 每匝分段数目,越大计算越精确
"""
old_length = self.trajectory.get_length()
cct_length = big_r * abs(BaseUtils.angle_to_radian(bending_angle))
self.trajectory.add_arc_line(
radius=big_r, clockwise=bending_angle < 0, angle_deg=abs(bending_angle)
)
cct_inner = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_r_inner,
bending_angle=abs(bending_angle),
tilt_angles=tilt_angles,
winding_number=winding_number,
current=current,
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
2 * math.pi * winding_number,
BaseUtils.angle_to_radian(bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct_inner)
self.elements.append((old_length, cct_inner, cct_length))
cct_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_r_outer,
bending_angle=abs(bending_angle),
tilt_angles=BaseUtils.list_multiply(tilt_angles, -1),
winding_number=winding_number,
current=current,
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
-2 * math.pi * winding_number,
BaseUtils.angle_to_radian(bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct_outer)
self.elements.append((old_length, cct_outer, cct_length))
return self
def append_agcct(
self,
big_r: float,
small_rs: List[float],
bending_angles: List[float],
tilt_angles: List[List[float]],
winding_numbers: List[List[int]],
currents: List[float],
disperse_number_per_winding: int = 120,
) -> "Beamline":
"""
尾加 agcct
本质是两层二极 CCT 和两层交变四极 CCT
big_r: float 偏转半径,单位 m
small_rs: List[float] 各层 CCT 的孔径,一共四层,从大到小排列。分别是二极CCT外层、内层,四极CCT外层、内层
bending_angles: List[float] 交变四极 CCT 每个 part 的偏转半径(正数表示逆时针、负数表示顺时针),要么全正数,要么全负数。不需要传入二极 CCT 偏转半径,因为它就是 sum(bending_angles)
tilt_angles: List[List[float]] 二极 CCT 和四极 CCT 的倾斜角,典型值 [[30],[90,30]],只有两个元素的二维数组
winding_numbers: List[List[int]], 二极 CCT 和四极 CCT 的匝数,典型值 [[128],[21,50,50]] 表示二极 CCT 128匝,四极交变 CCT 为 21、50、50 匝
currents: List[float] 二极 CCT 和四极 CCT 的电流,典型值 [8000,9000]
disperse_number_per_winding: int 每匝分段数目,越大计算越精确
添加 CCT 的顺序为:
外层二极 CCT
内层二极 CCT
part1 四极 CCT 内层
part1 四极 CCT 外层
part2 四极 CCT 内层
part2 四极 CCT 外层
... ...
"""
if len(small_rs) != 4:
raise ValueError(
f"small_rs({small_rs}),长度应为4,分别是二极CCT外层、内层,四极CCT外层、内层")
if not BaseUtils.is_sorted(small_rs[::-1]):
raise ValueError(
f"small_rs({small_rs}),应从大到小排列,分别是二极CCT外层、内层,四极CCT外层、内层")
total_bending_angle = sum(bending_angles)
old_length = self.trajectory.get_length()
cct_length = big_r * \
abs(BaseUtils.angle_to_radian(total_bending_angle))
self.trajectory.add_arc_line(
radius=big_r,
clockwise=total_bending_angle < 0,
angle_deg=abs(total_bending_angle),
)
# 构建二极 CCT 外层
cct2_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_rs[0],
bending_angle=abs(total_bending_angle),
tilt_angles=BaseUtils.list_multiply(tilt_angles[0], -1),
winding_number=winding_numbers[0][0],
current=currents[0],
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
-2 * math.pi * winding_numbers[0][0],
BaseUtils.angle_to_radian(total_bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct2_outer)
self.elements.append((old_length, cct2_outer, cct_length))
# 构建二极 CCT 内层
cct2_innter = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_rs[1],
bending_angle=abs(total_bending_angle),
tilt_angles=tilt_angles[0],
winding_number=winding_numbers[0][0],
current=currents[0],
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
2 * math.pi * winding_numbers[0][0],
BaseUtils.angle_to_radian(total_bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct2_innter)
self.elements.append((old_length, cct2_innter, cct_length))
# 构建内外侧四极交变 CCT
# 提取参数
agcct_small_r_out = small_rs[2]
agcct_small_r_in = small_rs[3]
agcct_winding_nums: List[int] = winding_numbers[1]
agcct_bending_angles: List[float] = bending_angles
agcct_bending_angles_rad: List[float] = BaseUtils.angle_to_radian(
agcct_bending_angles
)
agcct_tilt_angles: List[float] = tilt_angles[1]
agcct_current: float = currents[1]
# 构建 part1
agcct_index = 0
agcct_start_in = P2.origin()
agcct_start_out = P2.origin()
agcct_end_in = P2(
((-1.0) ** agcct_index) * 2 * math.pi *
agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_end_out = P2(
((-1.0) ** (agcct_index + 1))
* 2
* math.pi
* agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_part1_inner = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_in,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=BaseUtils.list_multiply(agcct_tilt_angles, -1),
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_in,
end_point_in_ksi_phi_coordinate=agcct_end_in,
disperse_number_per_winding=disperse_number_per_winding,
)
agcct_part1_length = big_r * \
BaseUtils.angle_to_radian(abs(agcct_bending_angles[agcct_index]))
self.magnets.append(agcct_part1_inner)
self.elements.append(
(old_length, agcct_part1_inner, agcct_part1_length))
agcct_part1_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_out,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=agcct_tilt_angles,
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_out,
end_point_in_ksi_phi_coordinate=agcct_end_out,
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(agcct_part1_outer)
self.elements.append(
(old_length, agcct_part1_outer, agcct_part1_length))
old_length_i = old_length + agcct_part1_length
# 构建 part2 和之后的 part
for ignore in range(len(agcct_bending_angles) - 1):
agcct_index += 1
agcct_start_in = agcct_end_in + P2(
0,
agcct_bending_angles_rad[agcct_index - 1]
/ agcct_winding_nums[agcct_index - 1],
)
agcct_start_out = agcct_end_out + P2(
0,
agcct_bending_angles_rad[agcct_index - 1]
/ agcct_winding_nums[agcct_index - 1],
)
agcct_end_in = agcct_start_in + P2(
((-1) ** agcct_index) * 2 * math.pi *
agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_end_out = agcct_start_out + P2(
((-1) ** (agcct_index + 1))
* 2
* math.pi
* agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_parti_inner = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_in,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=BaseUtils.list_multiply(agcct_tilt_angles, -1),
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_in,
end_point_in_ksi_phi_coordinate=agcct_end_in,
disperse_number_per_winding=disperse_number_per_winding,
)
agcct_parti_length = big_r * \
BaseUtils.angle_to_radian(
abs(agcct_bending_angles[agcct_index]))
self.magnets.append(agcct_parti_inner)
self.elements.append(
(old_length_i, agcct_parti_inner, agcct_parti_length))
agcct_parti_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_out,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=agcct_tilt_angles,
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_out,
end_point_in_ksi_phi_coordinate=agcct_end_out,
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(agcct_parti_outer)
self.elements.append(
(old_length_i, agcct_parti_outer, agcct_parti_length))
old_length_i += agcct_parti_length
return self
def get_magnets(self) -> List[Magnet]:
return self.magnets
def get_trajectory(self) -> Trajectory:
return self.trajectory
def __str__(self) -> str:
return f"beamline(magnet_size={len(self.magnets)}, traj_len={self.trajectory.get_length()})"
def __repr__(self) -> str:
return self.__str__() | StarcoderdataPython |
3200072 | <filename>TranscriptCatcher/WallmineCatcher.py<gh_stars>0
from selenium import webdriver
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common import exceptions
import time
import os,shutil
directory_path = os.getcwd()
name='amzn'
quarter='q4'
str1 = 'Earning'
str2 = 'Call'
str3 = 'transcript'
years = ['2017','2018','2019','2020']
driver = webdriver.Chrome(executable_path=r'C:/Users/Qiaoyi/Desktop/MAT/TranscriptCatcher/WebDriver/chromedriver.exe')
def textCatcher(outfile):
time.sleep(1)
try:
WebDriverWait(driver,20).until(EC.visibility_of_all_elements_located((By.XPATH,"//section[@class='usmf-new article-body']")))
elem = driver.find_element_by_xpath("//section[@class='usmf-new article-body']")
article = elem.find_element(By.XPATH,"//span[@class='article-content']")
outpath = os.path.join(directory_path,'ECTs')
outpath = os.path.join(outpath,outfile)
out = open(outpath,'w')
try:
print(article.text)
out.writelines(article.text)
except:
print("error")
except exceptions.TimeoutException as e:
outpath = os.path.join(directory_path,"ECTnotfounds.txt")
notfound = open(outpath,'a')
notfound.write(outfile.replace(".txt","").replace("-"," "))
print(e)
time.sleep(3)
elem = driver.back()
return
def searchCatcher(keyStr):
outpath = os.path.join(directory_path,"ECTnotfounds.txt")
notfound = open(outpath,'a')
# search for company in keyStr
WebDriverWait(driver,20).until(EC.visibility_of_all_elements_located((By.XPATH,"//input[@name='q']")))
search_bar = driver.find_element_by_xpath("//input[@name='q']")
search_bar.click()
search_bar.clear()
search_bar.send_keys(str(keyStr))
time.sleep(3)
search_bar.send_keys(Keys.RETURN)
#limit time login popup box handler
try:
WebDriverWait(driver,10).until(EC.visibility_of_all_elements_located((By.XPATH,"//div[@id='limit-sign-up-modal']")))
limitsignup = driver.find_element(By.XPATH,"//div[@id='limit-sign-up-modal']")
body = driver.find_element(By.TAG_NAME,"body")
emptydiv=driver.find_element(By.XPATH,"//div[@class='modal-backdrop fade show']")
time.sleep(3)
driver.execute_script("arguments[0].setAttribute('class','sign-up-modal modal fade');",limitsignup)
driver.execute_script("arguments[0].setAttribute('aria-hidden','true');",limitsignup)
driver.execute_script("arguments[0].setAttribute('style','display: none;');",limitsignup)
driver.execute_script("arguments[0].setAttribute('class','kit1 platform--ios');",body)
driver.execute_script("arguments[0].setAttribute('class','');",emptydiv)
except (exceptions.NoSuchElementException,exceptions.TimeoutException) as e:
print("No limit time login popup boxes.\n")
#404 page handler
try:
WebDriverWait(driver,10).until(EC.visibility_of_all_elements_located((By.XPATH,"//div[@class='content']")))
notfoundmsg = driver.find_element(By.TAG_NAME,"b")
if "couldn't find" in notfoundmsg.text:
driver.back()
time.sleep(3)
return
except (exceptions.NoSuchElementException,exceptions.TimeoutException) as e:
print("Not 404 page.\n")
#transcript divisionn finder
try:
WebDriverWait(driver,20).until(EC.visibility_of_all_elements_located((By.XPATH,"//div[@class='company-transcripts']")))
except exceptions.TimeoutException as e:
notfound.write("q4 "+"2017"+" "+keyStr+" EarningCallTranscript\n")
notfound.write("q4 "+"2018"+" "+keyStr+" EarningCallTranscript\n")
notfound.write("q4 "+"2019"+" "+keyStr+" EarningCallTranscript\n")
notfound.write("q4 "+"2020"+" "+keyStr+" EarningCallTranscript\n")
return
# try to find company transcript section in page
try:
time.sleep(3)
division = driver.find_element(By.XPATH,"//div[@class='company-transcripts']")
driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'nearest'});",division)
link = division.find_element(By.PARTIAL_LINK_TEXT,"Recent")
link.click()
except exceptions.NoSuchElementException as e:
print(e)
print(keyStr+": Company Transcript Not Found")
notfound.write(keyStr+"\n")
return
# get 2017-2020 q4 earning call transcript href links
time.sleep(3)
try:
WebDriverWait(driver,20).until(EC.visibility_of_all_elements_located((By.XPATH,"//table[@class='table table-sm table--no-margin table-striped']")))
table = driver.find_element(By.XPATH,"//table[@class='table table-sm table--no-margin table-striped']")
rows = table.find_elements(By.XPATH,'//tr[@class="js-clickable-row clickable-row"]')
urls=[]
for row in rows:
url = row.get_attribute("data-href")
for year in years:
if "q4" in url and year in url and "earning" in url and "transcript" in url:
urls.append(url)
break
except (exceptions.NoSuchElementException,exceptions.TimeoutException,exceptions.ElementNotInteractableException) as e:
print("Clickable transcript finding problem\n")
return
# get each link data
try:
if len(urls)!=0:
for u in urls:
if "2017" in u:
year="2017"
elif "2018" in u:
year="2018"
elif "2019" in u:
year="2019"
elif "2020" in u:
year="2020"
else:
year="0000"
outfilename = keyStr+"-"+"q4-"+year+"-EarningCallTranscript.txt"
if "fool" not in u:
outpath = os.path.join(directory_path,"ECTnotfounds.txt")
notfound = open(outpath,'a')
notfound.write(outfilename.replace(".txt","").replace("-"," ")+"\n")
continue
driver.get(u)
WebDriverWait(driver,20).until(EC.visibility_of_all_elements_located((By.XPATH,"//section[@class='usmf-new article-body']")))
time.sleep(3)
textCatcher(outfilename)
except (exceptions.TimeoutException,UnboundLocalError) as e:
print("Text Catcher calling problem\n")
driver.get("https://wallmine.com/")
time.sleep(3)
return
#==============================Main===============================================#
if not os.path.exists('ECTs'):
os.mkdir(os.path.join(directory_path,'ECTs'))
else:
shutil.rmtree(os.path.join(directory_path,'ECTs'))
os.mkdir(os.path.join(directory_path,'ECTs'))
infile = os.path.join(directory_path,"SNP500.txt")
outpath = os.path.join(directory_path,"ECTnotfounds.txt")
os.remove(outpath)
driver.get("https://wallmine.com/")
time.sleep(2)
with open(infile,'r') as content:
data = content.readlines()
for line in data:
name = line.replace("\n","").lower()
print (name)
searchCatcher(name)
driver.close()
| StarcoderdataPython |
167168 | from citrination_client.data import DatasetFile
def test_can_crud_path():
"""
Tests that full get/set/delete functionality is
available for the path property
"""
path = "path"
d = DatasetFile(path)
assert d.path is path
d.path = path
assert d.path is path
del(d.path)
assert d.path is None
def test_can_crud_url():
"""
Tests that full get/set/delete functionality is
available for the url property
"""
path = "path"
d = DatasetFile(path)
url = "http://mysite.com"
assert d.url is None
d.url = url
assert d.url is url
del(d.url)
assert d.url is None | StarcoderdataPython |
1659980 | <filename>number_of_ships_rectangle.py
"""
https://leetcode.com/problems/number-of-ships-in-a-rectangle/
"""
from __future__ import annotations
class Sea(object):
def hasShips(self, topRight: 'Point', bottomLeft: 'Point') -> bool:
pass
class Point(object):
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Solution(object):
def countShips(self, sea: 'Sea', topRight: 'Point', bottomLeft: 'Point') -> int:
X = topRight.x - bottomLeft.x
Y = topRight.y - bottomLeft.y
if X == Y == 0:
return
if not sea.hasShips:
return 0
if X > Y:
half = bottomLeft.x + X // 2
new_bottom_left = Point(half + 1, bottomLeft.y)
new_top_right = Point(half topRight.y)
return self.countShips(sea, topRight, new_bottom_left) + self.countShips(sea, new_top_right, bottomLeft)
else:
half = bottomLeft.y + Y // 2
half_bottom_left = Point(bottomLeft.x, half + 1)
half_top_right = Point(topRight.x, half)
return self.countShips(sea, half_top_right, bottomLeft) + self.countShips(sea, topRight, half_bottom_left) | StarcoderdataPython |
149578 | import pygame
from core import animation
from utils import vector
from utils.settings import *
class Actor(pygame.sprite.Sprite):
"""The Generic Actor Class"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.actor_type = ACTOR_NONE
self.can_collide = False
self.active = False
self.hitrect = pygame.Rect(0, 0, 0, 0)
self.hitrect_offset_x = 0
self.hitrect_offset_y = 0
self.object_collided_with = self
self.bound_style = None
self.animation_list = animation.Animation()
self.image = None
self.position = vector.Vector2d(0, 0)
self.bounds = 0, 0, 0, 0
def actor_update(self):
pass
def update(self):
try:
self.animation_list.update()
self.image = self.animation_list.image
except:
pass
self.position += self.velocity
self.check_bounds()
self.rect.center = (self.position.x, self.position.y)
self.hitrect.center = (self.position.x + self.hitrect_offset_x, self.position.y + self.hitrect_offset_y)
self.actor_update()
def check_collision(self, group_checked):
for object_checked in group_checked:
if self.hitrect.colliderect(object_checked.hitrect):
if self.active and object_checked.active:
self.object_collided_with = object_checked
object_checked.object_collided_with = self
self.collide()
object_checked.collide()
def collide(self):
pass
def check_bounds(self):
current_x = self.position.x
current_y = self.position.y
if current_x < self.bounds[LEFT] or current_x > self.bounds[RIGHT] or current_y < self.bounds[TOP] or current_y > self.bounds[BOTTOM]:
self.out_of_bounds()
def die(self):
self.kill()
del self
def out_of_bounds(self):
if self.bound_style == BOUND_STYLE_CLAMP:
if self.position.x < self.bounds[LEFT]:
self.position = vector.Vector2d(self.bounds[LEFT], self.position.y)
elif self.position.x > self.bounds[RIGHT]:
self.position = vector.Vector2d(self.bounds[RIGHT], self.position.y)
if self.position.y < self.bounds[TOP]:
self.position = vector.Vector2d(self.position.x, self.bounds[TOP])
elif self.position.y > self.bounds[BOTTOM]:
self.position = vector.Vector2d(self.position.x, self.bounds[BOTTOM])
elif self.bound_style == BOUND_STYLE_WRAP:
if self.position.x < self.bounds[LEFT]:
self.position = vector.Vector2d(self.bounds[RIGHT], self.position.y)
elif self.position.x > self.bounds[RIGHT]:
self.position = (self.bounds[LEFT],self.position.y)
if self.position.y < self.bounds[TOP]:
self.position = (self.position.x, self.bounds[BOTTOM])
elif self.position.y > self.bounds[BOTTOM]:
self.position = (self.position.x, self.bounds[TOP])
elif self.bound_style == BOUND_STYLE_REFLECT:
if self.position.x < self.bounds[LEFT]:
self.position = vector.Vector2d(self.bounds[LEFT], self.position.y)
self.velocity *= -1.0, 1.0
elif self.position.x > self.bounds[RIGHT]:
self.position = vector.Vector2d(self.bounds[RIGHT], self.position.y)
self.velocity *= -1.0, 1.0
if self.position.y < self.bounds[TOP]:
self.position = vector.Vector2d(self.position.x, self.bounds[TOP])
self.velocity *= 1.0, -1.0
elif self.position.y > self.bounds[BOTTOM]:
self.position = vector.Vector2d(self.position.x, self.bounds[BOTTOM])
self.velocity *= 1.0, -1.0
elif self.bound_style == BOUND_STYLE_KILL:
self.kill()
elif self.bound_style == BOUND_STYLE_CUSTOM:
self.custom_bounds()
def custom_bounds(self):
pass
| StarcoderdataPython |
3356331 | # -*- coding: utf-8 -*-
""" Manifest Defaults
"""
from django.conf import settings
from django.urls import reverse_lazy
MANIFEST_ACTIVATED_LABEL = getattr(
settings, "MANIFEST_ACTIVATED_LABEL", "ACCOUNT_ACTIVATED"
)
MANIFEST_ACTIVATION_DAYS = getattr(settings, "MANIFEST_ACTIVATION_DAYS", 7)
MANIFEST_ACTIVATION_REQUIRED = getattr(
settings, "MANIFEST_ACTIVATION_REQUIRED", True
)
MANIFEST_AVATAR_DEFAULT = getattr(
settings, "MANIFEST_GRAVATAR_DEFAULT", "gravatar"
)
MANIFEST_AVATAR_SIZE = getattr(settings, "MANIFEST_AVATAR_SIZE", 128)
MANIFEST_DISABLE_PROFILE_LIST = getattr(
settings, "MANIFEST_DISABLE_PROFILE_LIST", False
)
MANIFEST_FORBIDDEN_USERNAMES = getattr(
settings,
"MANIFEST_FORBIDDEN_USERNAMES",
(
"login",
"logout",
"register",
"activate",
"signin",
"signout",
"signup",
"me",
"user",
"account",
"email",
"password",
"profile",
"about",
"contact",
"test",
),
)
MANIFEST_GRAVATAR_DEFAULT = getattr(
settings, "MANIFEST_GRAVATAR_DEFAULT", "identicon"
)
MANIFEST_LANGUAGE_CODE = getattr(settings, "LANGUAGE_CODE", "en-us")
MANIFEST_LOCALE_FIELD = getattr(settings, "MANIFEST_LOCALE_FIELD", "locale")
MANIFEST_LOGIN_REDIRECT_URL = getattr(
settings, "MANIFEST_LOGIN_REDIRECT_URL", reverse_lazy("profile_settings")
)
MANIFEST_LOGOUT_ON_GET = getattr(settings, "MANIFEST_LOGOUT_ON_GET", False)
MANIFEST_PICTURE_FORMATS = getattr(
settings, "MANIFEST_PICTURE_FORMATS", ["jpeg", "gif", "png"]
)
MANIFEST_PICTURE_MAX_FILE = getattr(
settings, "MANIFEST_PICTURE_MAX_FILE", 1024 * 1024
)
MANIFEST_PICTURE_MAX_SIZE = getattr(
settings, "MANIFEST_PICTURE_MAX_SIZE", "1024 x 1024"
)
MANIFEST_PICTURE_PATH = getattr(settings, "MANIFEST_PICTURE_PATH", "manifest")
MANIFEST_REDIRECT_ON_LOGOUT = getattr(
settings, "MANIFEST_REDIRECT_ON_LOGOUT", "/"
)
MANIFEST_REMEMBER_DAYS = getattr(
settings, "ACCOUNTS_REMEMBER_ME_DAYS", (("a month"), 30)
)
MANIFEST_SESSION_LOGIN = getattr(settings, "MANIFEST_SESSION_LOGIN", True)
MANIFEST_TIME_ZONE = getattr(settings, "TIME_ZONE", "Europe/Istanbul")
MANIFEST_USE_HTTPS = getattr(settings, "MANIFEST_USE_HTTPS", False)
MANIFEST_USE_MESSAGES = getattr(settings, "MANIFEST_USE_MESSAGES", True)
| StarcoderdataPython |
1798 | <reponame>mashaka/TravelHelper
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm
from django.contrib.auth import update_session_auth_hash, login, authenticate
from django.contrib import messages
from django.shortcuts import render, redirect
from social_django.models import UserSocialAuth
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from rest_framework.authtoken.models import Token
from app.methods import prepare_user
def get_token(request):
if request.user:
user = request.user
prepare_user(user)
token,_ = Token.objects.get_or_create(user=user)
url = "travel://?token=" + token.key + '&id=' + str(user.id)
else:
url = "travel://error"
response = HttpResponse(url, status=302)
response['Location'] = url
return response
@login_required
def get_facebook_token(request):
q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook')
return HttpResponse(str(q.extra_data))
def signup(request):
return render(request, 'signup.html')
@login_required
def home(request):
return render(request, 'home.html')
@login_required
def settings(request):
user = request.user
try:
github_login = user.social_auth.get(provider='github')
except UserSocialAuth.DoesNotExist:
github_login = None
try:
twitter_login = user.social_auth.get(provider='twitter')
except UserSocialAuth.DoesNotExist:
twitter_login = None
try:
facebook_login = user.social_auth.get(provider='facebook')
except UserSocialAuth.DoesNotExist:
facebook_login = None
can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())
return render(request, 'settings.html', {
'facebook_login': facebook_login,
'can_disconnect': can_disconnect
})
@login_required
def password(request):
if request.user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, 'Your password was successfully updated!')
return redirect('password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordForm(request.user)
return render(request, 'password.html', {'form': form})
| StarcoderdataPython |
139833 | <reponame>MissCatLady/AlarmEZ
"""passlib.ifc - abstract interfaces used by Passlib"""
#=============================================================================
# imports
#=============================================================================
# core
import logging; log = logging.getLogger(__name__)
import sys
# site
# pkg
# local
__all__ = [
"PasswordHash",
]
#=============================================================================
# 2.5-3.2 compatibility helpers
#=============================================================================
if sys.version_info >= (2,6):
from abc import ABCMeta, abstractmethod, abstractproperty
else:
# create stub for python 2.5
ABCMeta = type
def abstractmethod(func):
return func
# def abstractproperty():
# return None
def create_with_metaclass(meta):
"class decorator that re-creates class using metaclass"
# have to do things this way since abc not present in py25,
# and py2/py3 have different ways of doing metaclasses.
def builder(cls):
if meta is type(cls):
return cls
return meta(cls.__name__, cls.__bases__, cls.__dict__.copy())
return builder
#=============================================================================
# PasswordHash interface
#=============================================================================
class PasswordHash(object):
"""This class describes an abstract interface which all password hashes
in Passlib adhere to. Under Python 2.6 and up, this is an actual
Abstract Base Class built using the :mod:`!abc` module.
See the Passlib docs for full documentation.
"""
#===================================================================
# class attributes
#===================================================================
#---------------------------------------------------------------
# general information
#---------------------------------------------------------------
##name
##setting_kwds
##context_kwds
#---------------------------------------------------------------
# salt information -- if 'salt' in setting_kwds
#---------------------------------------------------------------
##min_salt_size
##max_salt_size
##default_salt_size
##salt_chars
##default_salt_chars
#---------------------------------------------------------------
# rounds information -- if 'rounds' in setting_kwds
#---------------------------------------------------------------
##min_rounds
##max_rounds
##default_rounds
##rounds_cost
#---------------------------------------------------------------
# encoding info -- if 'encoding' in context_kwds
#---------------------------------------------------------------
##default_encoding
#===================================================================
# primary methods
#===================================================================
@classmethod
@abstractmethod
def encrypt(cls, secret, **setting_and_context_kwds): # pragma: no cover -- abstract method
"encrypt secret, returning resulting hash"
raise NotImplementedError("must be implemented by subclass")
@classmethod
@abstractmethod
def verify(cls, secret, hash, **context_kwds): # pragma: no cover -- abstract method
"verify secret against hash, returns True/False"
raise NotImplementedError("must be implemented by subclass")
#===================================================================
# additional methods
#===================================================================
@classmethod
@abstractmethod
def identify(cls, hash): # pragma: no cover -- abstract method
"check if hash belongs to this scheme, returns True/False"
raise NotImplementedError("must be implemented by subclass")
@classmethod
@abstractmethod
def genconfig(cls, **setting_kwds): # pragma: no cover -- abstract method
"compile settings into a configuration string for genhash()"
raise NotImplementedError("must be implemented by subclass")
@classmethod
@abstractmethod
def genhash(cls, secret, config, **context_kwds): # pragma: no cover -- abstract method
"generated hash for secret, using settings from config/hash string"
raise NotImplementedError("must be implemented by subclass")
#===================================================================
# undocumented methods / attributes
#===================================================================
# the following entry points are used internally by passlib,
# and aren't documented as part of the exposed interface.
# they are subject to change between releases,
# but are documented here so there's a list of them *somewhere*.
#---------------------------------------------------------------
# checksum information - defined for many hashes
#---------------------------------------------------------------
## checksum_chars
## checksum_size
#---------------------------------------------------------------
# CryptContext flags
#---------------------------------------------------------------
# hack for bsdi_crypt: if True, causes CryptContext to only generate
# odd rounds values. assumed False if not defined.
## _avoid_even_rounds = False
##@classmethod
##def _bind_needs_update(cls, **setting_kwds):
## """return helper to detect hashes that need updating.
##
## if this method is defined, the CryptContext constructor
## will invoke it with the settings specified for the context.
## this method should return either ``None``, or a callable
## with the signature ``needs_update(hash,secret)->bool``.
##
## this ``needs_update`` function should return True if the hash
## should be re-encrypted, whether due to internal
## issues or the specified settings.
##
## CryptContext will automatically take care of deprecating
## hashes with insufficient rounds for classes which define fromstring()
## and a rounds attribute - though the requirements for this last
## part may change at some point.
## """
#---------------------------------------------------------------
# experimental methods
#---------------------------------------------------------------
##@classmethod
##def normhash(cls, hash):
## """helper to clean up non-canonic instances of hash.
## currently only provided by bcrypt() to fix an historical passlib issue.
## """
# experimental helper to parse hash into components.
##@classmethod
##def parsehash(cls, hash, checksum=True, sanitize=False):
## """helper to parse hash into components, returns dict"""
# experiment helper to estimate bitsize of different hashes,
# implement for GenericHandler, but may be currently be off for some hashes.
# want to expand this into a way to programmatically compare
# "strengths" of different hashes and hash algorithms.
# still needs to have some factor for estimate relative cost per round,
# ala in the style of the scrypt whitepaper.
##@classmethod
##def bitsize(cls, **kwds):
## """returns dict mapping component -> bits contributed.
## components currently include checksum, salt, rounds.
## """
#===================================================================
# eoc
#===================================================================
PasswordHash = create_with_metaclass(ABCMeta)(PasswordHash)
#=============================================================================
# eof
#=============================================================================
| StarcoderdataPython |
46974 | <filename>Implementations/New folder/E2.py
command = input()
command = command.strip()
tokens = []
numbers = ['0','1','2','3','4','5','6','7','9']
if (command[:4]=="cout" and command[-1]==';'):
index = 4
while(True):
if(command[index]=='<' and command[index+1]=='<'):
index+=2
s=""
while(command[index]!='<' and command[index]!=';'):
s+=command[index]
index+=1
tokens.append(s)
if (command[index]==';'):
break
elif (command[index]=='<'):
continue
else:
print("ERROR!")
exit()
else:
print("ERROR")
exit()
else:
print("ERROR")
exit()
print(tokens)
cout = []
for t in tokens:
to = t.strip()
num=""
i=0
if (to[0]=='\"' and to[-1]=='\"'):
cout.append(to[1:len(to)-1])
elif (to[i] in numbers):
while(i!= len(to) and to[i] in numbers):
num+=to[i]
i+=1
if (i!=len(to)):
print("ERROR")
exit()
else:
cout.append(num)
elif (to=="endl"):
cout.append(-1)
else:
print("ERROR!!")
exit()
for p in cout:
if p==-1:
print('\n',end='')
else:
print(p,end=' ') | StarcoderdataPython |
128596 | <reponame>digolds/dp<gh_stars>0
import pandas as pd
def _parse(args):
return []
def _drop_empty_row(df):
return df.dropna(how='all')
name = 'drop-empty-row'
def operator(df, args):
return _drop_empty_row(df, *_parse(args))
if __name__ == "__main__":
data = [['tom', 10], ['nick', 15], [None, None]]
df = pd.DataFrame(data, columns = ['Name', 'Age'])
args = {}
operator(df, args) | StarcoderdataPython |
3295936 | from brainstorming.email_verification import send_email_verification
from brainstorming.models import BrainstormingWatcher
from django.conf import settings
from django.core.mail import send_mail, send_mass_mail
from django.core.urlresolvers import reverse
from django.template.defaultfilters import truncatechars
from django.template.loader import render_to_string
def new_brainstorming(brainstorming, language=settings.LANGUAGE_CODE):
send_mail(u'Link for brainstorming "{0}"'.format(truncatechars(brainstorming.get_safe_question(), 20)),
render_to_string('brainstorming/mails/new.txt', {'brain': brainstorming}),
settings.FORM_MAIL,
[brainstorming.creator_email],
fail_silently=False)
def new_idea(idea, recipients, language=settings.LANGUAGE_CODE):
message = render_to_string('brainstorming/mails/idea.txt', {
'brain': idea.brainstorming,
'idea': idea
})
mails = [(u'New idea at "{0}"'.format(truncatechars(idea.brainstorming.get_safe_question(), 20)),
message,
settings.FORM_MAIL,
[r]) for r in recipients]
send_mass_mail(mails, fail_silently=False)
def toggle_notification(brainstorming, email):
url = reverse('notification', kwargs={'brainstorming_id': brainstorming.pk})
status = 'add'
action = 'activate'
subject = 'Activate brainstorming notifications'
if BrainstormingWatcher.objects.filter(brainstorming=brainstorming, email=email).exists():
status = 'remove'
action = 'deactivate'
subject = 'Deactivate brainstorming notifications'
send_email_verification(to=email,
subject=subject,
callback=url,
template='brainstorming/mails/toggle_notification.txt',
context={'action': action, 'brain': brainstorming})
return {'status': status} | StarcoderdataPython |
3239656 | #!/usr/bin/env python3.4
# encoding: utf-8
import glob
import os
if __name__ == '__main__':
print('''
Formatting example scripts into rst files for the docs
''')
# input()
for example_script in glob.glob('../example_scripts/*.py'):
if os.path.exists(example_script) is False:
continue
basename= os.path.basename(example_script)
print('Reading: {0}'.format(example_script))
with open('source/code_inc/{0}'.format(basename.replace('.py','.inc')), 'w') as o:
print('''.. code-block:: python\n''', file=o)
with open( example_script ) as infile:
for line in infile:
print('\t{0}'.format( line.rstrip()), file=o)
| StarcoderdataPython |
3222582 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#import stdin
#import argparse
import re
"""
Script para determinar las posibles tonalidades a partir de las notas insertadas
Las notas se puede insertar del siguiente modo:
C
Cmaj7
Cmin7
Csus
C#
Por ahora, no se soporta poner "b". Cualquier bemol debe meterse como "#"
"""
def modo_jonico(nota):
candidata = 0
#Cadencia jonico 7: 1 - 1 - 1/2 - 1 - 1 - 1 - 1/2
cadencia = (2, 2, 1, 2, 2, 2, 1)
tonos = ('maj7', 'min7', 'min7', 'maj7', 'dom', 'min7', 'semi', 'maj7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_dorico(nota):
candidata = 0
#Cadencia dorico 7: 1 - 1/2 - 1 - 1 - 1 - 1/2 - 1
cadencia = (2, 1, 2, 2, 2, 1, 2)
tonos = ('min7', 'semi', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_frigio(nota):
candidata = 0
#Cadencia frigio 7: 1/2 - 1 - 1 - 1- 1/2 - 1 - 1
cadencia = (1, 2, 2, 2, 1, 2, 2)
tonos = ('min7', 'maj7', 'dom', 'min7', 'dism', 'maj7', 'min7', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_lidio(nota):
candidata = 0
#Cadencia lidio 7: 1 - 1 - 1 - 1/2 - 1 - 1- 1/2
cadencia = (2, 2, 2, 1, 2, 2, 1)
tonos = ('maj7', 'dom', 'min7', 'dism', 'maj7', 'min7', 'min7', 'maj7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_mixolidio(nota):
candidata = 0
#Cadencia mixolidio 7: 1 - 1 - 1/2 - 1 - 1 - 1/2 - 1
cadencia = (2, 2, 1, 2, 2, 1, 2)
tonos = ('dom', 'min7', 'dism', 'maj7', 'min7', 'min7', 'maj7', 'dom')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_eolico(nota):
candidata = 0
#Cadencia eolico 7: 1 - 1/2 - 1 - 1- 1/2 - 1 - 1
cadencia = (2, 1, 2, 2, 1, 2, 2)
tonos = ('min7', 'dism', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_locria(nota):
candidata = 0
#Cadencia locria 7: 1/2 - 1 - 1- 1/2 - 1 - 1 - 1
cadencia = (1, 2, 2, 1, 2, 2, 2)
tonos = ('dism', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7', 'dism')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def chequeo_tono(tono, notas_array):
#print 'tono es', tono
for value in notas_array:
candidata = 0
#print 'value vale', value
if (value.find('#') != -1):
for nota in tono:
#print 'nota es', nota
if nota.startswith(value):
candidata = 1
break
else:
for nota in tono:
#print 'nota vale', nota
if (nota.startswith(value) and not (nota.find('#') != -1)):
candidata = 1
#print 'hizo match'
break
if not(candidata):
break
return(candidata)
def main():
notas_input = raw_input("Inserta las notas separadas por espacio: ")
notas_array = notas_input.split(' ')
while ('' in notas_array):
notas_array.remove('')
#index = notas_array.index('')
#notas_array.pop(index)
posibles_tonos = []
for index in range(0,len(notas_array)):
#Chequeo <NAME> (I)
tono = modo_jonico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Jonico I (maj7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Dorico Min7 (II)
tono = modo_dorico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Dorico II (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Frigio Min7 (III)
tono = modo_frigio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Frigio III (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo <NAME> (IV)
tono = modo_lidio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Lidio IV (maj7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Mixolidio Dom (V)
tono = modo_mixolidio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Mixolidio V (dom)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Eolico Min7 (VI)
tono = modo_eolico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Eolico VI (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Locria (VII)
tono = modo_locria(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Locria VII (dism)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
if (len(posibles_tonos)):
print '\nPosibles tonalidades:'
for index in range(0,len(posibles_tonos)):
print ' # Tonalidad', posibles_tonos[index]['modo']
print ' Escala', posibles_tonos[index]['escala']
else:
print '\nNo se han encontrado posibles tonos'
#for line in sys.stdin:
# print line
##############
#Main Program
##############
if __name__ == "__main__":
print '\n## Script started\n'
main()
print '\n## Script finished\n' | StarcoderdataPython |
3304372 | # -*- coding: utf-8 -*-
#
# Copyright 2017 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dsnparse
import logging
from datetime import datetime
from pymongo import DESCENDING
from motor.motor_asyncio import AsyncIOMotorClient
from bson.objectid import ObjectId
from livebridge.storages.base import BaseStorage
logger = logging.getLogger(__name__)
class MongoStorage(BaseStorage):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(MongoStorage, cls).__new__(cls)
logger.debug("MongoDB client: {}".format(cls._instance))
return cls._instance
def __init__(self, **kwargs):
self.dsn = kwargs.get("dsn", None)
self.table_name = kwargs.get("table_name")
self.control_table_name = kwargs.get("control_table_name")
# get db name
info = dsnparse.parse(self.dsn)
self.db_name = info.paths.pop() if len(info.paths) == 1 else ""
assert self.db_name != "", "No database name provided in DSN connection string"
@property
async def db(self):
if hasattr(self, "_db") and self._db:
return self._db
logger.debug("Connecting to {}".format(self.dsn))
client = AsyncIOMotorClient(self.dsn)
self._db = client[self.db_name]
return self._db
async def setup(self):
"""Setting up MongoDB collections, if they not exist."""
try:
db = await self.db
collections = await db.list_collection_names()
created = False
if self.table_name not in collections:
# create table
logger.info("Creating MongoDB collection [{}]".format(self.table_name))
await db.create_collection(self.table_name)
await db[self.table_name].create_index([("target_id", DESCENDING), ("post_id", DESCENDING)])
created = True
# create control collection if not already created.
if self.control_table_name and self.control_table_name not in collections:
# create table
logger.info("Creating MongoDB control data collection [{}]".format(self.control_table_name))
await db.create_collection(self.control_table_name)
created = True
return created
except Exception as exc:
logger.error("[DB] Error when setting up MongoDB collections: {}".format(exc))
return False
async def get_last_updated(self, source_id):
try:
coll = (await self.db)[self.table_name]
cursor = coll.find({"source_id": source_id})
cursor.sort("updated", -1).limit(1)
async for doc in cursor:
if doc.get("updated"):
return doc["updated"]
except Exception as exc:
logger.error("[DB] Error when querying for last updated item on {}".format(source_id))
logger.exception(exc)
return None
async def get_known_posts(self, source_id, post_ids):
results = []
try:
object_ids = list(map(lambda x: ObjectId(x), post_ids))
coll = (await self.db)[self.table_name]
cursor = coll.find({"source_id": source_id, "_id": {"$in": object_ids}})
async for doc in cursor:
results.append(str(doc["_id"]))
except Exception as exc:
logger.error("[DB] Error when querying for posts {}".format(post_ids))
logger.exception(exc)
return results
async def get_post(self, target_id, post_id):
try:
coll = (await self.db)[self.table_name]
doc = await coll.find_one({"target_id": target_id, "post_id": post_id})
if doc:
doc["_id"] = str(doc["_id"])
return doc
except Exception as exc:
logger.error("[DB] Error when querying for a post [{}] on {}".format(post_id, target_id))
logger.error(exc)
return None
async def insert_post(self, **kwargs):
try:
target_id = kwargs.get("target_id")
doc = {
"target_id": target_id,
"post_id": str(kwargs.get("post_id")),
"source_id": kwargs.get("source_id"),
"text": kwargs.get("text") or " ",
"sticky": str(int(kwargs.get("sticky", False))),
"created": kwargs.get("created"),
"updated": kwargs.get("updated"),
"target_id": target_id,
"target_doc": kwargs.get("target_doc", "")
}
coll = (await self.db)[self.table_name]
await coll.insert_one(doc)
logger.info("[DB] Post {} {} was saved!".format(kwargs["source_id"], kwargs["post_id"]))
return True
except Exception as exc:
logger.error("[DB] Error when saving {}".format(kwargs))
logger.error(exc)
return False
async def update_post(self, **kwargs):
try:
target_id = kwargs.get("target_id")
doc = {
"target_id": target_id,
"post_id": str(kwargs.get("post_id")),
"source_id": kwargs.get("source_id"),
"text": kwargs.get("text") or " ",
"sticky": str(int(kwargs.get("sticky", 0))),
"created": kwargs.get("created"),
"updated": kwargs.get("updated"),
"target_id": target_id,
"target_doc": kwargs.get("target_doc", "")
}
coll = (await self.db)[self.table_name]
await coll.replace_one({"target_id": kwargs.get("target_id"), "post_id": kwargs.get("post_id")}, doc)
logger.info("[DB] Post {} {} was updated!".format(kwargs.get("post_id"), kwargs.get("target_id")))
return True
except Exception as exc:
logger.error("[DB] Error when updating for a post [{}] on {}".format(
kwargs.get("post_id"), kwargs.get("target_id")))
logger.error(exc)
return False
async def delete_post(self, target_id, post_id):
try:
coll = (await self.db)[self.table_name]
await coll.delete_one({"target_id": target_id, "post_id": post_id}) #, {"justOne": True})
logger.info("[DB] Post {} {} was deleted!".format(target_id, post_id))
return True
except Exception as exc:
logger.error("[DB] Error when deleting for a post [{}] on {}".format(post_id, target_id))
logger.error(exc)
return False
async def get_control(self, updated=None):
try:
query = {"type": "control"}
if updated:
query["updated"] = {"$gt": updated}
coll = (await self.db)[self.control_table_name]
doc = await coll.find_one(query)
if doc:
return doc
except Exception as exc:
logger.error("[DB] Error when querying for a control data on {}".format(self.control_table_name))
logger.error(exc)
return False
async def save_control(self, data):
try:
query = {"type": "control"}
doc = {"type": "control", "data": data, "updated": datetime.now()}
coll = (await self.db)[self.control_table_name]
res = await coll.replace_one(query, doc, upsert=True)
if res.modified_count != 1 and not res.upserted_id:
logger.error("[DB] Control data was not saved.")
else:
logger.info("[DB] Control data was saved.")
return True
except Exception as exc:
logger.error("[DB] Error when saving control data on {}".format(self.control_table_name))
logger.error(exc)
return False
| StarcoderdataPython |
197401 | from imdbTask9 import*
def directorName(movies):
directorList=[]
for index in movies:
directorList.extend(index["director"])
return directorList
directorList=directorName(movieDetailsWithCashing)
def duplicateDirector(movies):
duplicateDirector=[]
for index1 in range(len(directorList)):
if directorList[index1] not in duplicateDirector:
duplicateDirector.append(directorList[index1])
return duplicateDirector
duplicateDirector=duplicateDirector(movieDetailsWithCashing)
def analyse_movies_directors(movies):
dict={}
i=0
while i<len(duplicateDirector):
j=0
count=0
while j<len(directorList):
if duplicateDirector[i] == directorList[j]:
count=count+1
j=j+1
dict[duplicateDirector[i]]=count
i=i+1
return (dict)
countingOfDirectors=analyse_movies_directors(movieDetailsWithCashing)
# pprint (countingOfDirectors) | StarcoderdataPython |
4801100 | <filename>eventmanagement/events/views.py<gh_stars>1-10
from django.shortcuts import render
from django.http import HttpResponse, Http404
from .models import Event, Organization, Organizer, Participant, Speaker
# Create your views here.
def index(request):
return render(request, "events/index.html", {
"events": Event.objects.all()
})
def event(request, event_id):
event = Event.objects.get(pk=event_id)
# Get the event and the participants of the event
instance = {
'event': event,
'participants': event.participants.all(),
# 'non_participants': Participant.objects.exclude(events = event).all()
}
return render(request, "events/displayEvent.html", instance)
def register(request, event_id):
return render(request, "events/register.html")
def participant(request, participant_id):
participant = Participant.objects.get(id = participant_id)
# allEvents = []
# for event in participant.partEvents.all():
# allEvents.append(event.eventName)
# print(event.eventName)
allEvents = participant.partEvents.all()
print(allEvents)
instance = {
'participant': participant,
'allEvents': allEvents
}
return render(request, "participants/eventsParticipated.html", instance)
# def participant(request, participant_id):
# participant = Participant.objects.get(id = participant_id)
# allEvents = []
# event=participant.partEvents
# for i in event:
# allEvents.append(i.eventName)
# instance = {
# 'participant': participant,
# 'allEvents': allEvents
# }
# return render(request, "participants/eventsParticipated.html", instance) | StarcoderdataPython |
146160 | <filename>flexx/app/pair.py
"""
Base class for objects that live in both Python and JS.
This basically implements the syncing of signals.
"""
import sys
import json
import weakref
import hashlib
from .. import react
from ..react.hassignals import HasSignalsMeta, with_metaclass
from ..react.pyscript import create_js_signals_class, HasSignalsJS
from ..pyscript.functions import py2js, js_rename
from ..pyscript.parser2 import get_class_definition
from .serialize import serializer
if sys.version_info[0] >= 3:
string_types = str,
else: # pragma: no cover
string_types = basestring,
pair_classes = []
def get_pair_classes():
""" Get a list of all known Pair subclasses.
"""
return [c for c in HasSignalsMeta.CLASSES if issubclass(c, Pair)]
def get_instance_by_id(id):
""" Get instance of Pair class corresponding to the given id,
or None if it does not exist.
"""
return Pair._instances.get(id, None)
import json
class JSSignal(react.SourceSignal):
""" A signal that represents a proxy to a signal in JavaScript.
"""
def __init__(self, func_or_name, upstream=[], frame=None, ob=None, doc=None):
def func(v):
return v
if doc is not None:
func.__doc__ = doc
if isinstance(func_or_name, string_types):
func.__name__ = func_or_name
else:
func.__name__ = func_or_name.__name__
self._linked = False
react.SourceSignal.__init__(self, func, [], ob=ob)
def _subscribe(self, *args):
react.SourceSignal._subscribe(self, *args)
if not self._linked:
self.__self__._link_js_signal(self.name)
def _unsubscribe(self, *args):
react.SourceSignal._unsubscribe(self, *args)
if self._linked and not self._downstream:
self.__self__._link_js_signal(self.name, False)
class PySignal(react.SourceSignal):
""" A signal in JS that represents a proxy to a signal in Python.
"""
def __init__(self, name):
def func(v):
return v
func._name = name
react.SourceSignal.__init__(self, func, [])
class PyInputSignal(PySignal):
""" A signal in JS that represents an input signal in Python. On
the JS side, this can be used as an input too, although there is
no validation in this case.
"""
pass
class PairMeta(HasSignalsMeta):
""" Meta class for Pair
Set up proxy signals in Py/JS.
"""
def __init__(cls, name, bases, dct):
HasSignalsMeta.__init__(cls, name, bases, dct)
OK_MAGICS = '__init__', '__json__', '__from_json__'
# Create proxy signals on cls for each signal on JS
if 'JS' in cls.__dict__:
for name, val in cls.JS.__dict__.items():
if isinstance(val, react.Signal) and not isinstance(val, PySignal):
if not hasattr(cls, name):
cls.__signals__.append(name)
setattr(cls, name, JSSignal(name, doc=val._func.__doc__))
elif isinstance(getattr(cls, name), JSSignal):
pass # ok, overloaded signal on JS side
else:
print('Warning: JS signal %r not proxied, as it would hide a Py attribute.' % name)
# Implicit inheritance for JS "sub"-class
jsbases = [getattr(b, 'JS') for b in cls.__bases__ if hasattr(b, 'JS')]
JS = type('JS', tuple(jsbases), {})
for c in (cls, ): #cls.__bases__ + (cls, ):
if 'JS' in c.__dict__:
if '__init__' in c.JS.__dict__:
JS.__init__ = c.JS.__init__
for name, val in c.JS.__dict__.items():
if not name.startswith('__'):
setattr(JS, name, val)
elif name in OK_MAGICS:
setattr(JS, name, val)
cls.JS = JS
# Create proxy signals on cls.JS for each signal on cls
for name, val in cls.__dict__.items():
if isinstance(val, react.Signal) and not isinstance(val, JSSignal):
if not hasattr(cls.JS, name):
if isinstance(val, react.InputSignal):
setattr(cls.JS, name, PyInputSignal(name))
else:
setattr(cls.JS, name, PySignal(name))
elif isinstance(getattr(cls.JS, name), PySignal):
pass # ok, overloaded signal on JS side
else:
print('Warning: Py signal %r not proxied, as it would hide a JS attribute.' % name)
# Set JS and CSS for this class
cls.JS.CODE = cls._get_js()
cls.CSS = cls.__dict__.get('CSS', '')
def _get_js(cls):
""" Get source code for this class.
"""
cls_name = 'flexx.classes.' + cls.__name__
base_class = 'flexx.classes.%s.prototype' % cls.mro()[1].__name__
code = []
# Add JS version of HasSignals when this is the Pair class
if cls.mro()[1] is react.HasSignals:
c = py2js(serializer.__class__, 'flexx.Serializer')
code.append(c)
code.append('flexx.serializer = new flexx.Serializer();')
c = js_rename(HasSignalsJS.JSCODE, 'HasSignalsJS', 'flexx.classes.HasSignals')
code.append(c)
# Add this class
code.append(create_js_signals_class(cls.JS, cls_name, base_class))
if cls.mro()[1] is react.HasSignals:
code.append('flexx.serializer.add_reviver("Flexx-Pair", flexx.classes.Pair.prototype.__from_json__);\n')
return '\n'.join(code)
class Pair(with_metaclass(PairMeta, react.HasSignals)):
""" Subclass of HasSignals representing Python-JavaScript object pairs
Each instance of this class has a corresponding object in
JavaScript, and their signals are synced both ways. Signals defined
in Python can be connected to from JS, and vice versa.
The JS version of this class is defined by the contained ``JS``
class. One can define methods, signals, and (json serializable)
constants on the JS class.
Note:
This class may be renamed. Maybe Object, PairObject, ModelView
or something, suggestion welcome.
Parameters:
proxy: the proxy object that connects this instance to a JS client.
kwargs: initial signal values (see HasSignals).
Notes:
This class provides the base object for all widget classes in
``flexx.ui``. However, one can also create subclasses that have
nothing to do with user interfaces or DOM elements. You could e.g.
use it to calculate pi on nodejs.
Example:
.. code-block:: py
class MyPair(Pair):
def a_python_method(self):
...
class JS:
FOO = [1, 2, 3]
def a_js_method(this):
...
"""
# Keep track of all instances, so we can easily collect al JS/CSS
_instances = weakref.WeakValueDictionary()
# Count instances to give each instance a unique id
_counter = 0
# CSS for this class (no css in the base class)
CSS = ""
def __json__(self):
return {'__type__': 'Flexx-Pair', 'id': self.id}
def __from_json__(dct):
return get_instance_by_id(dct['id'])
def __init__(self, proxy=None, **kwargs):
# Set id and register this instance
Pair._counter += 1
self._id = self.__class__.__name__ + str(Pair._counter)
Pair._instances[self._id] = self
# Flag to implement eventual synchronicity
self._seid_from_js = 0
# Init proxy
if proxy is None:
from .proxy import manager
proxy = manager.get_default_proxy()
self._proxy = proxy
self._proxy.register_pair_class(self.__class__)
# Instantiate JavaScript version of this class
clsname = 'flexx.classes.' + self.__class__.__name__
cmd = 'flexx.instances.%s = new %s(%r);' % (self._id, clsname, self._id)
self._proxy._exec(cmd)
self._init()
# Init signals - signals will be connected updated, causing updates
# on the JS side.
react.HasSignals.__init__(self, **kwargs)
def _init(self):
""" Can be overloaded when creating a custom class.
"""
pass
@property
def id(self):
""" The unique id of this Pair instance. """
return self._id
@property
def proxy(self):
""" The proxy object that connects us to the runtime.
"""
return self._proxy
def __setattr__(self, name, value):
# Sync attributes that are Pair instances
react.HasSignals.__setattr__(self, name, value)
if isinstance(value, Pair):
txt = serializer.saves(value)
cmd = 'flexx.instances.%s.%s = flexx.serializer.loads(%r);' % (self._id, name, txt)
self._proxy._exec(cmd)
def _set_signal_from_js(self, name, text, esid):
""" Notes on synchronizing:
- Py and JS both send updates when a signal changes.
- JS does not send an update for signal updates received from Py.
- Py does, to allow eventual synchronicity. Read on.
- JS sends updates with a nonzero esid (eventual synchronicity
id) and marks the corresponding signal with the same id.
- Py sends an update with the esid that it got from JS, or 0
if the signal originates from Py.
- When JS receives an update from Py, it checks whether the
seid is 0 (the signal originates from Py) or if the signal
seid is 0 (the signal was updated from py since we last
updated it from JS). If either is 0, it updates the signal
value, and sets the signal esid to 0.
"""
signal = getattr(self, name)
value = serializer.loads(text)
self._seid_from_js = esid # to send back to js
signal._set(value)
def _signal_changed(self, signal):
# Set esid to 0 if it originates from Py, or to what we got from JS
esid = self._seid_from_js
self._seid_from_js = 0
if not isinstance(signal, JSSignal):
#txt = json.dumps(signal.value)
txt = serializer.saves(signal.value)
cmd = 'flexx.instances.%s._set_signal_from_py(%r, %r, %r);' % (self._id, signal.name, txt, esid)
self._proxy._exec(cmd)
def _link_js_signal(self, name, link=True):
""" Make a link between a JS signal and its proxy in Python.
This is done when a proxy signal is used as input for a signal
in Python.
"""
# if self._proxy is None:
# self._initial_signal_links.discart(name)
# if link:
# self._initial_signal_links.add(name)
# else:
link = 'true' if link else 'false'
cmd = 'flexx.instances.%s._link_js_signal(%r, %s);' % (self._id, name, link)
self._proxy._exec(cmd)
def call_js(self, call):
cmd = 'flexx.instances.%s.%s;' % (self._id, call)
self._proxy._exec(cmd)
class JS:
def __json__(self):
return {'__type__': 'Flexx-Pair', 'id': self.id}
def __from_json__(dct):
return flexx.instances[dct.id]
def __init__(self, id):
# Set id alias. In most browsers this shows up as the first element
# of the object, which makes it easy to identify objects while
# debugging. This attribute should *not* be used.
self.__id = self._id = self.id = id
self._linked_signals = {} # use a list as a set
# Call _init now. This gives subclasses a chance to init at a time
# when the id is set, but *before* the signals are connected.
self._init()
# Call HasSignals __init__, signals will be created and connected.
# Act signals relying on JS signals will fire.
# Act signals relying on Py signals will fire later.
super().__init__()
def _init(self):
pass
def _set_signal_from_py(self, name, text, esid):
value = flexx.serializer.loads(text)
signal = self[name]
if esid == 0 or signal._esid == 0:
self._signal_emit_lock = True # do not send back to py
signal._set(value)
signal._esid = 0 # mark signal as updated from py
def _signal_changed(self, signal):
if flexx.ws is None: # we could be exported or in an nbviewer
return
if self._signal_emit_lock:
self._signal_emit_lock = False
return
signal._esid = signal._count # mark signal as just updated by us
# todo: what signals do we sync? all but private signals? or only linked?
# signals like `children` should always sync, signals like a 100Hz timer not, mouse_pos maybe neither unless linked against
#if signal.signal_type == 'PyInputSignal' or self._linked_signals[signal._name]:
if signal.signal_type != 'PySignal' and not signal._name.startswith('_'):
#txt = JSON.stringify(signal.value)
txt = flexx.serializer.saves(signal.value)
flexx.ws.send('SIGNAL ' + [self.id, signal._esid, signal._name, txt].join(' '))
def _link_js_signal(self, name, link):
if link:
self._linked_signals[name] = True
signal = self[name]
if signal._timestamp > 1:
self._signal_changed(self[name])
elif self._linked_signals[name]:
del self._linked_signals[name]
## JS event system
# def _proxy_event(self, element, name):
# """ Easily get JS events from DOM elements in our event system.
# """
# that = this
# element.addEventListener(name, lambda ev: that.emit_event(name, {'cause': ev}), False)
#
# def _connect_js_event(self, element, event_name, method_name):
# """ Connect methods of this object to JS events.
# """
# that = this
# element.addEventListener(event_name, lambda ev: that[method_name](ev), False)
# Make pair objects de-serializable
serializer.add_reviver('Flexx-Pair', Pair.__from_json__)
| StarcoderdataPython |
3286125 | from tir import Webapp
from datetime import datetime
DataSystem = datetime.today().strftime('%d/%m/%Y')
import unittest
class MATA410(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAFAT",DataSystem,"99","01","05")
inst.oHelper.Program("MATA410")
def test_MATA410_199(self):
order = 'FATT37'
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("01")
self.oHelper.SetValue("C5_NUM", order)
self.oHelper.SetValue("C5_TIPO","B")
self.oHelper.SetValue("C5_TIPO","N")
self.oHelper.SetValue("C5_CLIENTE","FAT001")
self.oHelper.SetValue("C5_LOJACLI","01")
self.oHelper.SetValue("C5_LOJAENT","00")
self.oHelper.SetValue("C5_CONDPAG","003")
self.oHelper.SetValue("Produto", "FAT000000000000000000000000001", grid=True)
self.oHelper.SetValue("Quantidade", "1,00", grid=True)
self.oHelper.SetValue("C6_OPER", "01", grid=True)
self.oHelper.SetValue("C6_DESCONT", "5,00", grid=True)
self.oHelper.SetValue("C6_TES", "503", grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Cancelar")
self.oHelper.SearchBrowse(f"01 {order}", "Filial+numero")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("C5_NUM", order)
self.oHelper.CheckResult("C5_TIPO","N")
self.oHelper.CheckResult("C5_CLIENTE","FAT001")
self.oHelper.CheckResult("C5_CONDPAG","003")
self.oHelper.CheckResult("Produto", " FAT000000000000000000000000001", grid=True, line=1)
self.oHelper.CheckResult("C6_DESCONT", " 5,00", grid=True, line=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Cancelar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1644673 | <reponame>antonlydell/Pandemy
r"""Tests for the __init__ module of Pandemy."""
# =================================================
# Imports
# =================================================
# Standard Library
from datetime import date
import re
# Local
import pandemy
# =================================================
# Tests
# =================================================
class TestVersion:
r"""Tests for the `__versiontuple__` and `__version__` attributes of Pandemy.
Attributes
----------
__versiontuple__ : Tuple[Union[int, str], ...]
Describes the version of Pandemy in semantic versioning (MAJOR.MINOR.PATCH).
__version__ : str
The version of Pandemy represented as a string.
`__version__` is derived from `__versiontuple__`.
"""
def test__versiontuple__exists(self):
r"""Check that the `__versiontuple__` attribute exists.
If `__versiontuple__` does not exist AttributeError is raised.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
getattr(pandemy, '__versiontuple__')
# Clean up - None
# ===========================================================
def test__versiontuple__is_int_or_str(self):
r"""Check that the values of `__versiontuple__` are of type int or str."""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
assert all(
isinstance(x, int) or isinstance(x, str)
for x in pandemy.__versiontuple__
), 'Not all values of __versiontuple__ are of type int or str'
# Clean up - None
# ===========================================================
def test__version__exists(self):
r"""Check that the `__version__` attribute exists.
If `__version__` does not exist AttributeError is raised.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
getattr(pandemy, '__version__')
# Clean up - None
# ===========================================================
def test__version__is_str(self):
r"""Check that the `__version__` attribute is of type string."""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
assert isinstance(pandemy.__version__, str)
# Clean up - None
# ===========================================================
def test__version__format(self):
r"""Check that the `__version__` attribute has the correct format.
The version specifiers (MAJOR.MINOR.PATCH) are separated by a ".".
The number of "." in the `__version__` string should be one less
than the length of `__versiontuple__`.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
count = len(re.findall(r'\.', pandemy.__version__))
# Verify
# ===========================================================
assert count == len(pandemy.__versiontuple__) - 1
# Clean up - None
# ===========================================================
class TestReleaseDate:
r"""Tests for the `__releasedate__` attribute of Pandemy.
Attributes
----------
__releasedate__ : datetime.date
The date when the version specified in `__versiontuple__` was released.
"""
def test_is_date(self):
r"""Check that the `__releasedate__` attribute is of type datetime.date"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
assert isinstance(pandemy.__releasedate__, date)
# Clean up - None
# ===========================================================
def test_has_valid_release_year(self):
r"""The first release of Pandemy (version 0.0.1) took place in 2021.
Test that the release year >= 2021.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
assert pandemy.__releasedate__.year >= 2021
# Clean up - None
# ===========================================================
| StarcoderdataPython |
1790817 | """
(c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
https://opensource.org/licenses/MIT
# SPDX-License-Identifier: MIT
For information on SDPX, https://spdx.org/licenses/MIT.html
Input: a file name
Output, a table with the names defined in the program, and the line numbers where they were used.
Change Log
Date (YMD) Name What
-------- ------------------ ------------------------
20200826 <NAME> Initial commit
"""
import json
from sys import exit
from urllib3 import disable_warnings
from urllib3.exceptions import NewConnectionError, MaxRetryError, InsecureRequestWarning
import controlm_py as ctm
from controlm_py.rest import ApiException
from aapi_conn import CtmConnection
from pprint import pprint
def run_folder(api_cli, ctm_server='ctm_server', ctm_folder=''):
"""
Simple function that uses the ConfigApi service retrieve the agents list.
:param api_cli: property from CTMConnection object
:param ctm_server: logical name of the ctm server
:param ctm_folder: name of the folder to order
:return: list of named tuple: [{'key': 'value'}] access as list[0].key
"""
# Instantiate the service
run_api = ctm.api.run_api.RunApi(api_client=api_cli)
data = ctm.OrderFolderParameters(ctm=ctm_server, folder=ctm_folder) # OrderFolderParameters | parameters to select the jobs to run (optional)
#Call the service
try:
# Execute requested jobs in certain folder
api_response = run_api.order_jobs_in_folder(data=data)
# pprint(api_response)
except ctm.rest.ApiException as e:
print("Exception when calling RunApi->order_jobs_in_folder: %s\n" % e)
return api_response
if __name__ == '__main__':
host_name = 'vl-aus-ctm-em01.ctm.bmc.com'
host_port = '8443'
host_ssl = True # server using https only
aapi_user = 'CTMAPI'
aapi_password = '<PASSWORD>'
aapi_verify_ssl = False # server using self-signed SSL certs
# Create connection to the AAPI server
aapi_session = CtmConnection(host=host_name,port=host_port, ssl=host_ssl, verify_ssl=aapi_verify_ssl,
user=aapi_user,password=aapi_password,
additional_login_header={'accept': 'application/json'})
aapi_client = aapi_session.api_client
ret = run_folder(aapi_client, ctm_server='psctm', ctm_folder='DCO_WeatherTest')
# print(ret.__dict__)
print(ret)
print('\n')
# print(dir(ret))
print(ret.run_id)
print(ret.status_uri)
# Log out
# if you prefer, you can call the destructor
# Closing the program without logging out explicitly will cause an exception
# The cause is that the network subsystem is shutdown prior to the execution of the destructor code that logs out.
# Explicitly deleting the instance (or logging out) will avoid the exception.
# del aapi_client # This deletes the instance of the CtmConnection object
aapi_session.logout() # This logs out and causes the destructor to do nothing.
exit(0)
| StarcoderdataPython |
176448 | <reponame>WeDias/RespCEV<filename>Exercicios-Mundo1/ex028.py
import random
user = int(input('Digite um Número de 0 a 5: '))
pc = random.randint(0, 5)
if user == pc:
print('O Computador esolheu o Número {}\nPARABENS! VOCÊ ACERTOU !'.format(pc))
else:
print('O Computador escolheu o Número {}\nVOCÊ PERDEU !'.format(pc))
| StarcoderdataPython |
74715 | <filename>aiodogstatsd/contrib/aiohttp.py
from http import HTTPStatus
from typing import AsyncIterator, Callable, Optional, cast
from aiohttp import web
from aiohttp.web_app import _Middleware
from aiohttp.web_routedef import _SimpleHandler
from aiohttp.web_urldispatcher import DynamicResource, MatchInfoError
from aiodogstatsd import Client, typedefs
from aiodogstatsd.compat import get_event_loop
__all__ = (
"DEFAULT_CLIENT_APP_KEY",
"DEAFULT_REQUEST_DURATION_METRIC_NAME",
"cleanup_context_factory",
"middleware_factory",
)
DEFAULT_CLIENT_APP_KEY = "statsd"
DEAFULT_REQUEST_DURATION_METRIC_NAME = "http_request_duration"
def cleanup_context_factory(
*,
client_app_key: str = DEFAULT_CLIENT_APP_KEY,
host: str = "localhost",
port: int = 9125,
namespace: Optional[typedefs.MNamespace] = None,
constant_tags: Optional[typedefs.MTags] = None,
read_timeout: float = 0.5,
close_timeout: Optional[float] = None,
) -> Callable[[web.Application], AsyncIterator[None]]:
async def cleanup_context(app: web.Application) -> AsyncIterator[None]:
app[client_app_key] = Client(
host=host,
port=port,
namespace=namespace,
constant_tags=constant_tags,
read_timeout=read_timeout,
close_timeout=close_timeout,
)
await app[client_app_key].connect()
yield
await app[client_app_key].close()
return cleanup_context
def middleware_factory(
*,
client_app_key: str = DEFAULT_CLIENT_APP_KEY,
request_duration_metric_name: str = DEAFULT_REQUEST_DURATION_METRIC_NAME,
collect_not_allowed: bool = False,
collect_not_found: bool = False,
) -> _Middleware:
@web.middleware
async def middleware(
request: web.Request, handler: _SimpleHandler
) -> web.StreamResponse:
loop = get_event_loop()
request_started_at = loop.time()
# By default response status is 500 because we don't want to write any logic
# for catching exceptions except exceptions which inherited from `web.HTTPException`.
# And also we will override response status in case of any successful handler execution.
response_status = cast(int, HTTPStatus.INTERNAL_SERVER_ERROR.value)
try:
response = await handler(request)
response_status = response.status
except web.HTTPException as e:
response_status = e.status
raise e
finally:
if _proceed_collecting( # pragma: no branch
request, response_status, collect_not_allowed, collect_not_found
):
request_duration = (loop.time() - request_started_at) * 1000
request.app[client_app_key].timing(
request_duration_metric_name,
value=request_duration,
tags={
"method": request.method,
"path": _derive_request_path(request),
"status": response_status,
},
)
return response
return middleware
def _proceed_collecting(
request: web.Request,
response_status: int,
collect_not_allowed: bool,
collect_not_found: bool,
) -> bool:
if isinstance(request.match_info, MatchInfoError) and (
(response_status == HTTPStatus.METHOD_NOT_ALLOWED and not collect_not_allowed)
or (response_status == HTTPStatus.NOT_FOUND and not collect_not_found)
):
return False
return True
def _derive_request_path(request: web.Request) -> str:
"""
AIOHTTP has a lot of different route resources like DynamicResource and we need
to process them correctly to get a valid original request path, so if you found
an issue with the request path in your metrics then you need to go here and
extend deriving logic.
"""
if isinstance(request.match_info.route.resource, DynamicResource):
return request.match_info.route.resource.canonical
return request.path
| StarcoderdataPython |
4807404 | <gh_stars>1-10
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from ..models import Post
from taggit.models import Tag
class SetQueryTestCase(TestCase):
def setUp(self):
self.test_user = get_user_model().objects.create_user(
username='test_user',
email='<EMAIL>',
password='<PASSWORD>',
)
self.post = Post.objects.create(
user=self.test_user,
title='test_title',
content='This is a test content.',
)
test_tags = ['test_tag1', 'test_tag2', 'test_tag3']
for tag in test_tags:
self.tag = Tag.objects.create(name=tag)
self.post.tags.add(self.tag)
class TestIndexView(SetQueryTestCase):
def test_get(self):
response = self.client.get(reverse('post:index'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['post_list'], ['<Post: {}>'.format(self.post.title)])
self.assertContains(response, self.post.title)
def test_tag_get(self):
response = self.client.get(reverse('post:index_tag', kwargs={'tag': 'test_tag1'}))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['post_list'], ['<Post: {}>'.format(self.post.title)])
self.assertContains(response, self.post.title)
def test_search_get(self):
response = self.client.get(reverse('post:index'), {'key_word': 'test'})
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['post_list'], ['<Post: {}>'.format(self.post.title)])
self.assertContains(response, self.post.title)
def test_trend_get(self):
response = self.client.get(reverse('post:index_trend'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['post_list'], ['<Post: {}>'.format(self.post.title)])
self.assertContains(response, self.post.title)
class TestDetailView(SetQueryTestCase):
def test_get(self):
response = self.client.get(reverse('post:post_detail', kwargs={'pk': self.post.pk}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.post.title)
self.assertContains(response, self.post.content)
def test_not_get(self):
response = self.client.get(reverse('post:post_detail', kwargs={'pk': 10}))
self.assertEqual(response.status_code, 404)
class TestInquiryView(SetQueryTestCase):
def test_get(self):
response = self.client.get(reverse('post:inquiry'))
self.assertEqual(response.status_code, 200)
def test_post_success(self):
params = {
'name': 'test_user',
'email': '<EMAIL>',
'title': 'test_title',
'message': 'This is a test message.'
}
response = self.client.post(reverse('post:inquiry'), params)
self.assertRedirects(response, reverse_lazy('post:inquiry'))
def test_post_failure(self):
response = self.client.post(reverse('post:inquiry'), params={})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'errorlist')
class TestCommonTemplateView(TestCase):
def test_privacy_get(self):
response = self.client.get(reverse('post:privacy'))
self.assertEqual(response.status_code, 200)
def test_profile_get(self):
response = self.client.get(reverse('post:profile'))
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
75592 | <gh_stars>0
# Read the documentation here:
# https://nose.readthedocs.org/en/latest/testing.html
# Modify the import path to find our package
import sys
import os.path
sys.path = [os.path.abspath("../scanr_publicationextractor")] + sys.path
sys.path = [os.path.abspath("..")] + sys.path
# Import our package
from scanr_publicationextractor.extractor import _from_line, _pq_from_html, _get_dois
from scanr_publicationextractor.line_tokenizer import get_lines
def test_get_doi():
query = "<NAME>. 2015, Les mondes de la chasse. Contribution à une étude de rapports sociaux spatialisés en Seine-et-Marne et en Côte d'Or. http://dx.doi.org/10.2345/125 12"
assert ["10.2345/125"] == list(_get_dois(query))
def test_from_line():
query = "“Aminobisphosphonates synergize with HCMV to activate the antiviral activity of Vg9Vd2 cells“, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Journal of Immunology (2016), in press. doi:10.4049/jimmunol.1501661 "
assert (True, "10.4049/jimmunol.1501661") == _from_line(query)
query = "<NAME>. 2015, Les mondes de la chasse. Contribution à une étude de rapports sociaux spatialisés en Seine-et-Marne et en Côte d'Or, Thèse pour le Doctorat en Sociologie sous la direction de <NAME> et <NAME>, Sciences Po Paris, 07 juillet, 645 + XXXIV p."
assert (False, query) == _from_line(query)
query = "<NAME>. 2015, Les mondes de la chasse. Contribution à une étude de rapports sociaux spatialisés en Seine-et-Marne et en Côte d'Or. http://dx.doi.org/10.2345/125 12"
assert (True, "10.2345/125") == _from_line(query)
def test_line_tokenizer():
query = "<p>stuff</p><p>other \n stuff</p><p>third<br/>stuff</p><p>hey<img/><br>ho</p>"
# ignore single br
assert ["stuff", "other stuff", "third stuff", "hey ho"] == get_lines(_pq_from_html(query))
query = "<p>hey<br>ho<br><br>heyo</p>"
# split on double br
assert ["hey ho", "heyo"] == get_lines(_pq_from_html(query))
query = "<i>Nature</i> <a>doi:stuff</a>"
# Natural tokenization of tags
assert ["Nature doi:stuff"] == get_lines(_pq_from_html(query))
| StarcoderdataPython |
47480 | Perfect Number
Given a positive integer N as the input, the program must print yes if N is a perfect number. Else no must be printed.
Input Format: The first line contains N.
Output Format: The first line contains yes or no
Boundary Conditions: 1 <= N <= 999999
Example Input/Output 1:
Input: 6
Output:
yes
Example Input/Output 2:
Input: 8
Output:
no
n=int(input())
l=[i for i in range(1,n) if n%i==0]
if n==sum(l):
print("yes")
else:
print("no")
| StarcoderdataPython |
3303022 | <reponame>maximskorik/RIAssigner
from typing import List, Iterable
from RIAssigner.data.Data import Data
from .ComputationMethod import ComputationMethod
class Kovats(ComputationMethod):
""" Class to compute the Kovats retention index. """
def compute(self, query: Data, reference: Data) -> List[Data.RetentionIndexType]:
""" Compute non-isothermal Kovats retention index.
For details see https://webbook.nist.gov/chemistry/gc-ri/.
Parameters
----------
query:
Dataset for which to compute retention indices.
reference:
Reference dataset with retention times and retention indices
Returns
-------
retention_indices: List[Data.RetentionIndexType]
List of computed retention indices
"""
self._check_data_args(query, reference)
index = 0
# Copy rts and ris and insert 0 in the beginning, so that interpolation always starts at 0,0 to the first reference compound.
reference_rts = [0.0] + list(reference.retention_times)
reference_ris = [0.0] + list(reference.retention_indices)
retention_indices = [
self._compute_ri(target_rt, reference_rts, reference_ris, index)
for target_rt in query.retention_times
]
return retention_indices
def _compute_ri(self,
target_rt: Data.RetentionTimeType,
reference_rts: Iterable[Data.RetentionTimeType],
reference_ris: Iterable[Data.RetentionTimeType],
index: int) -> Data.RetentionIndexType:
"""Compute retention index for target retention time.
Args:
target_rt (Data.RetentionTimeType): Retention time for which to compute the index
reference_rts (Iterable[Data.RetentionTimeType]): Reference retention times
reference_ris (Iterable[Data.RetentionTimeType]): Reference retention indices
index (int): Current reference index
Returns:
Data.RetentionIndexType: Computed retention index
"""
ri = None
if Data.is_valid(target_rt):
index = _update_index(target_rt, reference_rts, index)
ri = _compute_kovats(target_rt, reference_rts,
reference_ris, index)
return ri
def _update_index(target_rt: float, reference_rts: Iterable[Data.RetentionTimeType], index: int):
""" Get the indices of previosly eluting and next eluting reference compounds.
Retention times in 'Data' objects are sorted in ascending order, so this method assumes
that 'reference_rt' is sorted in ascending order.
Parameters
----------
reference_rts
Retention times of reference compounds.
"""
if target_rt > max(reference_rts) or index >= len(reference_rts):
index = len(reference_rts) - 1
else:
while reference_rts[index] < target_rt:
index += 1
return index
def _compute_kovats(
target_rt: float,
reference_rts: Iterable[Data.RetentionTimeType],
reference_ris: Iterable[Data.RetentionIndexType],
index: int) -> float:
"""Compute retention index according to <NAME> (see https://webbook.nist.gov/chemistry/gc-ri/)
Args:
target_rt (float): Retention time for which to compute the RI
reference_rts (Iterable[Data.RetentionTimeType]): Reference data retention times
reference_ris (Iterable[Data.RetentionIndexType]): Reference data retention indices
index (int): Higher index of reference compound (n+1)
Returns:
Data.RetentionIndexType: Computed retention index
"""
term_a = target_rt - reference_rts[index - 1]
term_b = reference_rts[index] - reference_rts[index - 1]
ri = 100 * term_a / term_b + reference_ris[index - 1]
return float(ri)
| StarcoderdataPython |
51204 | <gh_stars>0
import discord
from discord.ext import commands
from evs import default
from evs import permissions, default, http, dataIO
import requests
import os
class Autoupdate_ko(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
# Commands
@commands.command()
@commands.check(permissions.is_owner)
async def 업데이트(self, ctx, filename: str):
await ctx.trigger_typing()
await ctx.send("소스코드 업데이트 중...")
link = "https://raw.githubusercontent.com/Shio7/Keter/master/cogs/" + filename + ".py"
r = requests.get(link, allow_redirects=True)
if os.path.isfile('./cogs/' + filename + ".py"):
try:
self.bot.unload_extension(f"cogs.{filename}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Unloaded extension **{filename}.py**")
os.remove('./cogs/' + filename + ".py")
open('./cogs/' + filename + ".py", 'wb').write(r.content)
else:
open('./cogs/' + filename + ".py", 'wb').write(r.content)
await ctx.send("업데이트 완료: "+filename+".py")
""" Loads an extension. """
try:
self.bot.load_extension(f"cogs.{filename}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"**{filename}.py 로드 완료**")
@commands.command()
@commands.check(permissions.is_owner)
async def 지우기(self, ctx, filename: str):
if os.path.isfile('./cogs/' + filename + ".py"):
try:
self.bot.unload_extension(f"cogs.{filename}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Unloaded extension **{filename}.py**")
os.remove('./cogs/' + filename + ".py")
await ctx.send(f"**{filename}.py** 삭제완료")
else:
await ctx.send(f"**{filename}.py 찾을 수 없음**")
def setup(bot):
bot.add_cog(Autoupdate_ko(bot))
| StarcoderdataPython |
99126 | import itertools
# combine iterators
it = itertools.chain([1, 2, 3], [4, 5, 6])
# repeat a value
it = itertools.repeat("hello", 3)
print(list(it))
# repeat an iterator's items
it = itertools.cycle([1, 2])
result = [next(it) for _ in range(10)]
print(result)
# split an iterator
it1, it2, it3 = itertools.tee(["first", "second"], 3)
print(list(it1))
print(list(it2))
print(list(it3))
# zip unequal length iterators with a default value
keys = ["one", "two", "three"]
values = [1, 2]
it = itertools.zip_longest(keys, values)
longest = list(it)
print(longest)
#
| StarcoderdataPython |
35629 | <gh_stars>1-10
from abc import ABC, abstractmethod
class Command(ABC):
@abstractmethod
def execute(self):
pass
@abstractmethod
def un_execute(self):
pass
class AddCommand(Command):
def __init__(self, values, new_value):
self.values = values
self.new_value = new_value
def execute(self):
self.values.append(self.new_value)
def un_execute(self):
self.values.pop()
class SumCommand(Command):
def __init__(self, values):
self.values = values
def execute(self):
return sum(self.values)
def un_execute(self):
return sum(self.values)
class RemoveLastCommand(Command):
def __init__(self, values):
self.values = values
self.removed_value = None
def execute(self):
self.removed_value = self.values.pop()
def un_execute(self):
self.values.append(self.removed_value)
self.removed_value = None
class RemoveFirstCommand(Command):
def __init__(self, values):
self.values = values
self.removed_value = None
def execute(self):
self.removed_value = self.values.pop(0)
def un_execute(self):
self.values.insert(0, self.removed_value)
self.removed_value = None
class CommandsMemento:
def __init__(self, values):
self.state = list(values)
commands = []
values = []
while True:
command_text = input()
if command_text == 'END':
break
if command_text == 'REMOVE_LAST':
command = RemoveLastCommand(values)
elif command_text == 'REMOVE_FIRST':
command = RemoveFirstCommand(values)
elif command_text == 'SUM':
command = SumCommand(values)
else:
_, value = command_text.split(' ')
command = AddCommand(values, int(value))
commands.append(command)
mementos = []
for command in commands:
print(command.execute())
for memento in mementos:
print(memento.state)
print('----')
print(values)
for command in commands[::-1]:
print(command.un_execute())
print(values)
"""
ADD 5
ADD 6
SUM
REMOVE_FIRST
ADD 3
ADD 7
SUM
REMOVE_LAST
SUM
REMOVE_LAST
SUM
REMOVE_LAST
SUM
END
"""
| StarcoderdataPython |
1705561 | #!/usr/bin/env python
import rospy
import rosnode
import roslaunch
from geometry_msgs.msg import Twist
from std_msgs.msg import String, Float64, UInt8
from std_srvs.srv import Empty
from std_srvs.srv import EmptyResponse
# @param timeout_ms Timeout for publishing in the topic (Warning: Precision of 10 miliseconds)
class Monitor_IsAlive():
def __init__(self, topic, msg_type, timeout_ms):
self.enabled = True
self.running = False
self.topic = topic
if(timeout_ms < 10):
rospy.logwarn("Monitor: Timeout value too small. Timer precision is 10 milliseconds")
self.enabled = False
else:
if not ('/' + self.topic) in [item for i in rospy.get_published_topics() for item in i]:
rospy.logwarn("Monitor: Unable to subscribe to topic %s", self.topic)
self.enabled = False
return
else:
self.subs = rospy.Subscriber(topic, msg_type, self.TopicCallback)
self.timer = rospy.Timer(rospy.Duration(0.01), self.TimerCallback, False)
self.timeoutValue = int(timeout_ms / 10)
def AttachCallback(self, timeoutCallback):
self.actionCallback = timeoutCallback
def Start(self):
if self.enabled and callable(self.actionCallback):
self.timeoutCounter = 0
self.running = True
# def Stop(self):
# self.timer.shutdown()
def TopicCallback(self, data):
self.timeoutCounter = 0 #reset timeout counter
def TimerCallback(self, *args):
if not self.enabled:
self.running = False
if(self.running):
#Do additional operations if needed
self.timeoutCounter += 1
if(self.timeoutCounter >= self.timeoutValue):
self.actionCallback() # Execute action
self.running = False | StarcoderdataPython |
160874 | from car import Car
class UberBlack(Car):
typeCarAccepted = []
seatsMaterials = []
def __init__(self, lincense, driver, typeCarAccepted, seatsMaterials):
super().__init__(lincense, driver)
self.typeCarAccepted = typeCarAccepted
self.seatsMaterials = seatsMaterials | StarcoderdataPython |
1732481 | import numpy as np
from neuron import SimpleNeuron
from helpers.diagnostic_helpers import time_measure
class NeuralNetwork(object):
def __init__(self, layer_size=[2, 2, 1], activation_function=None, activation_fun_prime=None):
"""
Default activation function: np.tanh(x)
Default learning function: self.eta * sigma * x * ((1 - x) * (1 + x))
Other examples of functions:
sigmoid: 1 / (1 + np.exp(-x))
sigmoid prime: np.exp(-x) / ((1 + np.exp(-x)) ** 2)
:param layer_size (optional) - list that defines number of neurons of each layer; default value: [2, 2, 1]
:param activation_function (optional) - lambda expression; default value: lambda x: np.tanh(x)
:param activation_fun_prime (optional) - lambda expression; default value: lambda x: (1 - x) * (1 + x)
"""
self._layer_size = layer_size
self._layers = list()
self.verbose = True
self.eta = 2 * 0.05 # teaching speed
"""
Other examples of activation function:
Sigmoid: 1 / (1 + np.exp(-x))
np.tanh(x)
"""
self.activation_function = activation_function
if self.activation_function is None:
self.activation_function = lambda x: np.tanh(x)
"""
Other examples of activation function prime:
Sigmoid Prime: np.exp(-x) / ((1 + np.exp(-x)) ** 2)
(1 - x) * (1 + x)
"""
self.activation_fun_prime = activation_fun_prime
if self.activation_fun_prime is None:
self.activation_fun_prime = lambda x: (1 - x) * (1 + x) # derivative of activation_function
self.learning_function = lambda x, y, sigma: self.eta * sigma * x * self.activation_fun_prime(x)
for i in range(len(self._layer_size)):
layer_i = list()
for j in range(self._layer_size[i]):
if i == 0:
neuron = SimpleNeuron(1, weights=[1],
activation_function=self.activation_function)
else:
neuron = SimpleNeuron(len(self._layers[i - 1]), activation_function=self.activation_function)
layer_i.append(neuron)
self._layers.append(layer_i)
def __repr__(self):
str_ = ""
for i in range(len(self._layers)):
str_ += "Layer {0}:\r\n".format(i)
for j in range(len(self._layers[i])):
str_ += "\t{0}. {1}\r\n".format(j, self._layers[i][j])
return str_
def _print(self, *args):
if self.verbose:
print(" ".join([str(x) for x in args]))
def last_layer_idx(self):
"""
Gets index of last layer
:return index of last layer
"""
return len(self._layers) - 1
def get_neurons_count(self, layer_idx):
"""
Gets neurons count on selected layer
:param layer_idx - layer index
:return neurons count
"""
return len(self._layers[layer_idx])
def get_layers_count(self):
"""
Gets layers count
:return layers count
"""
return len(self._layers)
def set_neurons_weights(self, layer_idx, neuron_idx, weights):
"""
Sets weights of selected neuron
:param layer_idx - layer index
:param neuron_idx - neuron index
:param weights - list of weights (ints/floats)
"""
self._layers[layer_idx][neuron_idx].weights = weights
def get_neuron(self, layer_idx, neuron_idx):
"""
Gets Neuron
:param layer_idx - layer index
:param neuron_idx - neuron index
:return Neuron object
"""
return self._layers[layer_idx][neuron_idx]
def get_neurons(self, layer_idx):
"""
Gets Neurons from seleced layer
:param layer_idx - layer index
:return list with Neurons
"""
return self._layers[layer_idx]
def get_outputs(self, layer_idx):
"""
Gets outputs of selected layer
:param layer_idx - layer index
:return outputs - neurons outputs as list of np.arrays
"""
outputs = list()
for i in range(0, self.get_neurons_count(layer_idx)):
neuron = self.get_neuron(layer_idx, i)
outputs.append(neuron.last_y)
return outputs
def get_weights(self, layer_idx):
"""
Gets weights of selected layer
:param layer_idx - layer index
:return weights - neurons weights as list of np.arrays
"""
weights = list()
for i in range(0, self.get_neurons_count(layer_idx)):
neuron = self.get_neuron(layer_idx, i)
weights.extend(neuron.weights)
return weights
def _get_neuron_weights(self, layer_idx, neuron_idx):
"""
Gets neurons weights
:param layer_idx - layer index
:param neuron_idx - neuron index
:return weights - neurons weights
"""
neuron = self.get_neuron(layer_idx, neuron_idx)
return neuron.weights
def _run_first_layer(self, x):
"""
Runs first layer, that only contains weights for inputs. Returned array has the same size as input array x.
:param x - data as np.array
:return y- output of first layer as np.array
"""
weights = list()
for i in range(len(self._layers[0])):
weights.extend(self._get_neuron_weights(0, i))
y = np.multiply(x, np.array((weights)))
return y
def _run_layer(self, x, layer_idx):
"""
Runs selected layer
:param x - input data as np.array
:param layer_idx - layer index
:return y - output of selected layer as np.array
"""
y = list()
for neuron in self._layers[layer_idx]:
y.append(neuron.compute(x))
return np.array(y)
def apply_changes(self, from_layer, to_layer):
"""
Applies changes to neurons after learning process
:param from_layer
:param to_layer
"""
for layer_idx in range(from_layer, to_layer): # do not apply change to first layer
for neuron_idx in range(self.get_neurons_count(layer_idx)):
self._layers[layer_idx][neuron_idx].apply_changes()
@time_measure
def run(self, x):
"""
Run network
:param x - input data as np.array
:return y - output value as int/float
"""
if len(x) != len(self._layers[0]):
raise Exception("First layer has to have {0} neurons!".format(len(x)))
self._print('Running network...')
y_1 = self._run_first_layer(x)
self._print('y_0:', y_1)
y_i = y_1
for i in range(len(self._layers) - 1):
i += 1
y_i = self._run_layer(y_i, i)
self._print('y_{0}:'.format(i), y_i)
return np.array(y_i)
@time_measure
def learn(self, x, z):
"""
Learning using back propagation.
:param x - input data as np.array
:param z - desired output value as int/float
"""
self._print('Learning...')
y = self.run(x)
sigma = z - y
# last layer:
for neuron_idx in range(self.get_neurons_count(self.last_layer_idx())):
self._layers[self.last_layer_idx()][neuron_idx].sigma = sigma
self._layers[self.last_layer_idx()][neuron_idx].delta_w = self.learning_function(self._layers[self.last_layer_idx()][neuron_idx].last_x, y, sigma)
for layer_idx in range(self.get_layers_count() - 2, -1, -1):
for neuron_idx in range(len(self._layers[layer_idx])):
selected_neuron = self._layers[layer_idx][neuron_idx]
sigm_sum = 0
# sigma for n-th layer is a sum of sigma's from n+1 layer multiplied by proper weights from n+1 layer
for neuron_idx_2 in range(len(self._layers[layer_idx + 1])):
sigm_sum += self._layers[layer_idx + 1][neuron_idx_2].sigma * self._layers[layer_idx + 1][neuron_idx_2].weights[neuron_idx]
selected_neuron.sigma = sigm_sum
selected_neuron.delta_w = self.learning_function(
selected_neuron.last_x, selected_neuron.last_y, sigma)
selected_neuron.delta_bias = self.learning_function(
1,
selected_neuron.bias, sigma)
self.apply_changes(1, self.get_layers_count()) # do not apply change to first layer
@time_measure
def hebb_learn(self, x, eta=0.5):
"""
Learning using Hebbian learning rule.
:param x - input data as np.array
"""
self._print('Learning with Hebbian learning rule...')
self.run(x)
for layer_idx in range(0, self.get_layers_count()):
for neuron_idx in range(self.get_neurons_count(layer_idx)):
delta_w = list()
neuron = self._layers[layer_idx][neuron_idx]
for i in range(len(neuron.weights)):
delta_w.append(neuron.weights[i] * eta * neuron.last_y)
neuron.delta_w = delta_w
self.apply_changes(1, self.get_layers_count()) # do not apply change to first layer
@time_measure
def kohonen_learn(self, x):
"""
Learning using Kohonen method.
:param x - input data as np.array
"""
x_norm = np.linalg.norm(x)
raise NotImplementedError()
def example():
# init network - 2 inputs, 2 Neurons in hidden layer and 1 neuron for output
net = NeuralNetwork(layer_size=[2, 2, 1])
# setting weights:
net.set_neurons_weights(1, 0, np.array(([0, 1]), dtype=float))
net.set_neurons_weights(1, 1, np.array(([1, 0]), dtype=float))
net.set_neurons_weights(2, 0, np.array(([1, 1]), dtype=float))
x = np.array(([-1, 1]), dtype=float)
print 'input data:', x
net.run(x)
net.learn(x, np.array([0.5]))
print net
def example_hebb():
# init network - 2 inputs, 2 Neurons in hidden layer and 1 neuron for output
net = NeuralNetwork(layer_size=[2, 2, 1])
# setting weights:
net.set_neurons_weights(1, 0, np.array(([0, 1]), dtype=float))
net.set_neurons_weights(1, 1, np.array(([1, 0]), dtype=float))
net.set_neurons_weights(2, 0, np.array(([1, 1]), dtype=float))
x = np.array(([-1, 1]), dtype=float)
print 'input data:', x
net.run(x)
print net
net.hebb_learn(x)
print net
| StarcoderdataPython |
3320712 | <filename>tests/test_formal_agg.py
from lake.top.lake_top import *
import fault
import pytest
import tempfile
@pytest.mark.skip
def test_formal_agg():
lt_dut, n, u, t = get_formal_module("agg")
magma_dut = kts.util.to_magma(lt_dut,
flatten_array=True,
check_multiple_driver=False,
optimize_if=False,
check_flip_flop_always_ff=False)
tester = fault.Tester(magma_dut, magma_dut.clk)
tester.circuit.tile_en = 1
tester.circuit.clk = 0
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.clk_en = 1
tester.eval()
config = {}
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_starting_addr"] = 0
config["strg_ub_agg_only_loops_in2buf_0_dimensionality"] = 1
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_enable"] = 1
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_starting_addr"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_dimensionality"] = 1
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_0"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_1"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_2"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_3"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_4"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_5"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_starting_addr"] = 4
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_0"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_1"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_2"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_3"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_4"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_5"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_0_starting_addr"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_starting_addr"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_dimensionality"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_0"] = 4
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_1"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_2"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_3"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_4"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_5"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_0_strides_0"] = 1
config["strg_ub_agg_only_agg_read_addr_gen_0_strides_1"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_0_strides_2"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_0_strides_3"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_0_strides_4"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_0_strides_5"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_strides_0"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_strides_1"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_strides_2"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_strides_3"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_strides_4"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_1_strides_5"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_strides_0"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_strides_1"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_strides_2"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_strides_3"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_strides_4"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_strides_5"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_0_enable"] = 1
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_starting_addr"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_enable"] = 0
config["strg_ub_agg_only_loops_in2buf_0_ranges_0"] = 782
config["strg_ub_agg_only_loops_in2buf_0_ranges_1"] = 0
config["strg_ub_agg_only_loops_in2buf_0_ranges_2"] = 0
config["strg_ub_agg_only_loops_in2buf_0_ranges_3"] = 0
config["strg_ub_agg_only_loops_in2buf_0_ranges_4"] = 0
config["strg_ub_agg_only_loops_in2buf_0_ranges_5"] = 0
config["strg_ub_agg_only_agg_read_addr_gen_1_starting_addr"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_0"] = 194
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_1"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_2"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_3"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_4"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_5"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_enable"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_strides_0"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_strides_1"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_strides_2"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_strides_3"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_strides_4"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_1_sched_addr_gen_strides_5"] = 0
config["strg_ub_agg_only_loops_in2buf_1_ranges_0"] = 0
config["strg_ub_agg_only_loops_in2buf_1_ranges_1"] = 0
config["strg_ub_agg_only_loops_in2buf_1_ranges_2"] = 0
config["strg_ub_agg_only_loops_in2buf_1_ranges_3"] = 0
config["strg_ub_agg_only_loops_in2buf_1_ranges_4"] = 0
config["strg_ub_agg_only_loops_in2buf_1_ranges_5"] = 0
config["strg_ub_agg_only_loops_in2buf_1_dimensionality"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_0_strides_0"] = 1
config["strg_ub_agg_only_agg_write_addr_gen_0_strides_1"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_0_strides_2"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_0_strides_3"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_0_strides_4"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_0_strides_5"] = 0
config["strg_ub_agg_only_agg_write_addr_gen_0_starting_addr"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_strides_0"] = 1
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_strides_1"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_strides_2"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_strides_3"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_strides_4"] = 0
config["strg_ub_agg_only_agg_write_sched_gen_0_sched_addr_gen_strides_5"] = 0
for f1 in config:
setattr(tester.circuit, f1, config[f1])
for i in range(785):
tester.circuit.data_in_0 = i
# check agg_data_out (internal signal) or formal_agg_data_out (top level)
tester.eval()
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tempdir = "agg_formal"
tester.compile_and_run(target="verilator",
directory=tempdir,
flags=["-Wno-fatal", "--trace"])
if __name__ == "__main__":
test_formal_agg()
| StarcoderdataPython |
4831641 | from django import template
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from corehq.apps.domain.models import Domain
import corehq.apps.style.utils as style_utils
from corehq.apps.hqwebapp.models import MaintenanceAlert
from corehq.tabs import MENU_TABS
register = template.Library()
def get_active_tab(visible_tabs, request_path):
for is_active_tab_fn in [
lambda t: t.is_active_fast,
lambda t: t.is_active,
lambda t: t.url and request_path.startswith(t.url),
]:
for tab in visible_tabs:
if is_active_tab_fn(tab):
tab.is_active_tab = True
return tab
class MainMenuNode(template.Node):
def render(self, context):
request = context['request']
current_url_name = context['current_url_name']
couch_user = getattr(request, 'couch_user', None)
project = getattr(request, 'project', None)
domain = context.get('domain')
org = context.get('org')
try:
module = Domain.get_module_by_name(domain)
except (ValueError, AttributeError):
module = None
tabs = getattr(module, 'TABS', MENU_TABS)
visible_tabs = []
for tab_class in tabs:
t = tab_class(
request, current_url_name, domain=domain,
couch_user=couch_user, project=project, org=org)
t.is_active_tab = False
if t.real_is_viewable:
visible_tabs.append(t)
# set the context variable in the highest scope so it can be used in
# other blocks
context.dicts[0]['active_tab'] = get_active_tab(visible_tabs,
request.get_full_path())
template = {
style_utils.BOOTSTRAP_2: 'style/bootstrap2/partials/menu_main.html',
style_utils.BOOTSTRAP_3: 'style/bootstrap3/partials/menu_main.html',
}[style_utils.get_bootstrap_version()]
return mark_safe(render_to_string(template, {
'tabs': visible_tabs,
}))
@register.tag(name="format_main_menu")
def format_main_menu(parser, token):
return MainMenuNode()
@register.simple_tag(takes_context=True)
def format_subtab_menu(context):
active_tab = context.get('active_tab', None)
if active_tab and active_tab.subtabs:
subtabs = [t for t in active_tab.subtabs if t.is_viewable]
else:
subtabs = None
return mark_safe(render_to_string("style/bootstrap2/partials/subtab_menu.html", {
'subtabs': subtabs if subtabs and len(subtabs) > 1 else None
}))
@register.simple_tag(takes_context=True)
def format_sidebar(context):
current_url_name = context['current_url_name']
active_tab = context.get('active_tab', None)
request = context['request']
sections = None
if active_tab and active_tab.subtabs:
# if active_tab is active then at least one of its subtabs should have
# is_active == True, but we guard against the possibility of this not
# being the case by setting sections = None above
for s in active_tab.subtabs:
if s.is_active:
sections = s.sidebar_items
break
if sections is None:
for s in active_tab.subtabs:
if s.url and request.get_full_path().startswith(s.url):
sections = s.sidebar_items
break
else:
sections = active_tab.sidebar_items if active_tab else None
if sections:
# set is_active on active sidebar item by modifying nav by reference
# and see if the nav needs a subnav for the current contextual item
for section_title, navs in sections:
for nav in navs:
if (request.get_full_path().startswith(nav['url']) or
request.build_absolute_uri().startswith(nav['url'])):
nav['is_active'] = True
else:
nav['is_active'] = False
if 'subpages' in nav:
for subpage in nav['subpages']:
if subpage['urlname'] == current_url_name:
if callable(subpage['title']):
actual_context = {}
for d in context.dicts:
actual_context.update(d)
subpage['title'] = subpage['title'](**actual_context)
nav['subpage'] = subpage
break
template = {
style_utils.BOOTSTRAP_2: 'style/bootstrap2/partials/navigation_left_sidebar.html',
style_utils.BOOTSTRAP_3: 'style/bootstrap3/partials/navigation_left_sidebar.html',
}[style_utils.get_bootstrap_version()]
return mark_safe(render_to_string(template, {
'sections': sections
}))
@register.simple_tag
def maintenance_alert():
try:
alert = (MaintenanceAlert.objects
.filter(active=True)
.order_by('-modified'))[0]
except IndexError:
return ''
else:
return format_html(
'<div class="alert alert-warning" style="text-align: center; margin-bottom: 0;">{}</div>',
mark_safe(alert.html),
)
| StarcoderdataPython |
3240868 | import re
import ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTP_SSL, SMTPAuthenticationError
class Mail:
def __init__(self, sender, receivers, smtp_server, password, port):
self.sender = sender
self.receivers = receivers
self.smtp_server = smtp_server
self.password = password
self.port = port
# Checks if an email is valid
# Returns None if any email is not valid
@staticmethod
def check_mails(mails):
regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
mail_list = mails.split(',')
mail_addresses = []
for mail in mail_list:
check_result = re.search(regex, mail)
if not check_result:
return None
mail_addresses.append(mail)
mail_addresses_string = ",".join(mail_addresses)
return mail_addresses_string
# Sends an email.
def send_mail(self, subject, body):
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = self.sender
message["To"] = self.receivers
message["Subject"] = subject
message["Bcc"] = self.receivers # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with SMTP_SSL(self.smtp_server, self.port, context=context) as server:
server.login(self.sender, self.password)
server.sendmail(self.sender, self.receivers.split(','), text)
| StarcoderdataPython |
1612015 | import pandas as pd
from get_json_data import get_json_data
def get_active_users(path_to_csv=None,
token=None):
"""
Returns a dict of active users from The Spatial Community
"""
url = "https://slack.com/api/users.list"
params = dict(
token=token
)
member_list = dict()
data = get_json_data(url=url, params=params)
for member in data["members"]:
if "is_admin" in member.keys() and member["is_admin"] == False:
member_list[member["name"]] = member["id"]
df = pd.read_csv(path_to_csv)
cols = df.columns
cols = cols.map(lambda x: x.replace('-', '_') if isinstance(x, (str, unicode)) else x)
df.columns = cols
filtered_df = df.query("billing_active == 1")
filtered_usernames = filtered_df.username.values.tolist()
active_users = {v:k for k,v in member_list.iteritems() if k in filtered_usernames }
return active_users
| StarcoderdataPython |
140479 | """An AccountScanner scans a set of accounts using an AccountScanPlan to define scan
parameters"""
from collections import defaultdict
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
from dataclasses import dataclass
import random
import time
import traceback
from typing import Any, DefaultDict, Dict, List, Tuple, Type
import boto3
from altimeter.aws.log_events import AWSLogEvents
from altimeter.aws.resource.resource_spec import ScanGranularity, AWSResourceSpec
from altimeter.aws.resource.unscanned_account import UnscannedAccountResourceSpec
from altimeter.aws.scan.account_scan_plan import AccountScanPlan
from altimeter.aws.scan.aws_accessor import AWSAccessor
from altimeter.aws.scan.settings import (
RESOURCE_SPEC_CLASSES,
INFRA_RESOURCE_SPEC_CLASSES,
ORG_RESOURCE_SPEC_CLASSES,
)
from altimeter.aws.settings import (
GRAPH_NAME,
GRAPH_VERSION,
)
from altimeter.core.artifact_io.writer import ArtifactWriter
from altimeter.core.graph.graph_set import GraphSet
from altimeter.core.graph.graph_spec import GraphSpec
from altimeter.core.log import Logger
from altimeter.core.multilevel_counter import MultilevelCounter
from altimeter.core.resource.resource import Resource
def get_all_enabled_regions(session: boto3.Session) -> Tuple[str, ...]:
"""Get all enabled regions - which are either opted-in or are opt-in-not-required - for
a given session.
Args:
session: boto3 Session
Returns:
tuple of enabled regions in the given session.
"""
client = session.client("ec2")
resp: Dict[str, List[Dict[str, str]]] = client.describe_regions(
Filters=[{"Name": "opt-in-status", "Values": ["opt-in-not-required", "opted-in"]}]
)
regions = tuple(region["RegionName"] for region in resp["Regions"])
return regions
@dataclass(frozen=True)
class ScanUnit:
graph_name: str
graph_version: str
account_id: str
region_name: str
service: str
access_key: str
secret_key: str
token: str
resource_spec_classes: Tuple[Type[AWSResourceSpec], ...]
class AccountScanner:
"""An AccountScanner scans a set of accounts using an AccountScanPlan to define scan
parameters
Args:
account_scan_plan: AccountScanPlan describing scan targets
artifact_writer: ArtifactWriter for writing out artifacts
graph_name: name of graph
graph_version: version string for graph
"""
def __init__(
self,
account_scan_plan: AccountScanPlan,
artifact_writer: ArtifactWriter,
max_svc_scan_threads: int,
preferred_account_scan_regions: Tuple[str, ...],
scan_sub_accounts: bool,
graph_name: str = GRAPH_NAME,
graph_version: str = GRAPH_VERSION,
) -> None:
self.account_scan_plan = account_scan_plan
self.artifact_writer = artifact_writer
self.graph_name = graph_name
self.graph_version = graph_version
self.max_threads = max_svc_scan_threads
self.preferred_account_scan_regions = preferred_account_scan_regions
self.resource_spec_classes = RESOURCE_SPEC_CLASSES + INFRA_RESOURCE_SPEC_CLASSES
if scan_sub_accounts:
self.resource_spec_classes += ORG_RESOURCE_SPEC_CLASSES
def scan(self) -> List[Dict[str, Any]]:
logger = Logger()
scan_result_dicts = []
now = int(time.time())
prescan_account_ids_errors: DefaultDict[str, List[str]] = defaultdict(list)
futures = []
with ThreadPoolExecutor(max_workers=self.max_threads) as executor:
shuffled_account_ids = random.sample(
self.account_scan_plan.account_ids, k=len(self.account_scan_plan.account_ids)
)
for account_id in shuffled_account_ids:
with logger.bind(account_id=account_id):
logger.info(event=AWSLogEvents.ScanAWSAccountStart)
try:
session = self.account_scan_plan.accessor.get_session(account_id=account_id)
# sanity check
sts_client = session.client("sts")
sts_account_id = sts_client.get_caller_identity()["Account"]
if sts_account_id != account_id:
raise ValueError(
f"BUG: sts detected account_id {sts_account_id} != {account_id}"
)
if self.account_scan_plan.regions:
scan_regions = tuple(self.account_scan_plan.regions)
else:
scan_regions = get_all_enabled_regions(session=session)
account_gran_scan_region = random.choice(
self.preferred_account_scan_regions
)
# build a dict of regions -> services -> List[AWSResourceSpec]
regions_services_resource_spec_classes: DefaultDict[
str, DefaultDict[str, List[Type[AWSResourceSpec]]]
] = defaultdict(lambda: defaultdict(list))
resource_spec_class: Type[AWSResourceSpec]
for resource_spec_class in self.resource_spec_classes:
client_name = resource_spec_class.get_client_name()
if resource_spec_class.scan_granularity == ScanGranularity.ACCOUNT:
if resource_spec_class.region_whitelist:
account_resource_scan_region = resource_spec_class.region_whitelist[
0
]
else:
account_resource_scan_region = account_gran_scan_region
regions_services_resource_spec_classes[
account_resource_scan_region
][client_name].append(resource_spec_class)
elif resource_spec_class.scan_granularity == ScanGranularity.REGION:
if resource_spec_class.region_whitelist:
resource_scan_regions = tuple(
region
for region in scan_regions
if region in resource_spec_class.region_whitelist
)
if not resource_scan_regions:
resource_scan_regions = resource_spec_class.region_whitelist
else:
resource_scan_regions = scan_regions
for region in resource_scan_regions:
regions_services_resource_spec_classes[region][
client_name
].append(resource_spec_class)
else:
raise NotImplementedError(
f"ScanGranularity {resource_spec_class.scan_granularity} unimplemented"
)
# Build and submit ScanUnits
shuffed_regions_services_resource_spec_classes = random.sample(
regions_services_resource_spec_classes.items(),
len(regions_services_resource_spec_classes),
)
for (
region,
services_resource_spec_classes,
) in shuffed_regions_services_resource_spec_classes:
region_session = self.account_scan_plan.accessor.get_session(
account_id=account_id, region_name=region
)
region_creds = region_session.get_credentials()
shuffled_services_resource_spec_classes = random.sample(
services_resource_spec_classes.items(),
len(services_resource_spec_classes),
)
for (
service,
svc_resource_spec_classes,
) in shuffled_services_resource_spec_classes:
parallel_svc_resource_spec_classes = [
svc_resource_spec_class
for svc_resource_spec_class in svc_resource_spec_classes
if svc_resource_spec_class.parallel_scan
]
serial_svc_resource_spec_classes = [
svc_resource_spec_class
for svc_resource_spec_class in svc_resource_spec_classes
if not svc_resource_spec_class.parallel_scan
]
for (
parallel_svc_resource_spec_class
) in parallel_svc_resource_spec_classes:
parallel_future = schedule_scan(
executor=executor,
graph_name=self.graph_name,
graph_version=self.graph_version,
account_id=account_id,
region_name=region,
service=service,
access_key=region_creds.access_key,
secret_key=region_creds.secret_key,
token=region_creds.token,
resource_spec_classes=(parallel_svc_resource_spec_class,),
)
futures.append(parallel_future)
serial_future = schedule_scan(
executor=executor,
graph_name=self.graph_name,
graph_version=self.graph_version,
account_id=account_id,
region_name=region,
service=service,
access_key=region_creds.access_key,
secret_key=region_creds.secret_key,
token=region_creds.token,
resource_spec_classes=tuple(serial_svc_resource_spec_classes),
)
futures.append(serial_future)
except Exception as ex:
error_str = str(ex)
trace_back = traceback.format_exc()
logger.error(
event=AWSLogEvents.ScanAWSAccountError,
error=error_str,
trace_back=trace_back,
)
prescan_account_ids_errors[account_id].append(f"{error_str}\n{trace_back}")
account_ids_graph_set_dicts: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
for future in as_completed(futures):
account_id, graph_set_dict = future.result()
account_ids_graph_set_dicts[account_id].append(graph_set_dict)
# first make sure no account id appears both in account_ids_graph_set_dicts
# and prescan_account_ids_errors - this should never happen
doubled_accounts = set(account_ids_graph_set_dicts.keys()).intersection(
set(prescan_account_ids_errors.keys())
)
if doubled_accounts:
raise Exception(
(
f"BUG: Account(s) {doubled_accounts} in both "
"account_ids_graph_set_dicts and prescan_account_ids_errors."
)
)
# graph prescan error accounts
for account_id, errors in prescan_account_ids_errors.items():
with logger.bind(account_id=account_id):
unscanned_account_resource = UnscannedAccountResourceSpec.create_resource(
account_id=account_id, errors=errors
)
account_graph_set = GraphSet(
name=self.graph_name,
version=self.graph_version,
start_time=now,
end_time=now,
resources=[unscanned_account_resource],
errors=errors,
stats=MultilevelCounter(),
)
account_graph_set.validate()
output_artifact = self.artifact_writer.write_json(
name=account_id, data=account_graph_set.to_dict()
)
logger.info(event=AWSLogEvents.ScanAWSAccountEnd)
api_call_stats = account_graph_set.stats.to_dict()
scan_result_dicts.append(
{
"account_id": account_id,
"output_artifact": output_artifact,
"errors": errors,
"api_call_stats": api_call_stats,
}
)
# graph rest
for account_id, graph_set_dicts in account_ids_graph_set_dicts.items():
with logger.bind(account_id=account_id):
# if there are any errors whatsoever we generate an empty graph with
# errors only
errors = []
for graph_set_dict in graph_set_dicts:
errors += graph_set_dict["errors"]
if errors:
unscanned_account_resource = UnscannedAccountResourceSpec.create_resource(
account_id=account_id, errors=errors
)
account_graph_set = GraphSet(
name=self.graph_name,
version=self.graph_version,
start_time=now,
end_time=now,
resources=[unscanned_account_resource],
errors=errors,
stats=MultilevelCounter(), # ENHANCHMENT: could technically get partial stats.
)
account_graph_set.validate()
else:
account_graph_set = GraphSet(
name=self.graph_name,
version=self.graph_version,
start_time=now,
end_time=now,
resources=[],
errors=[],
stats=MultilevelCounter(),
)
for graph_set_dict in graph_set_dicts:
graph_set = GraphSet.from_dict(graph_set_dict)
account_graph_set.merge(graph_set)
output_artifact = self.artifact_writer.write_json(
name=account_id, data=account_graph_set.to_dict()
)
logger.info(event=AWSLogEvents.ScanAWSAccountEnd)
api_call_stats = account_graph_set.stats.to_dict()
scan_result_dicts.append(
{
"account_id": account_id,
"output_artifact": output_artifact,
"errors": errors,
"api_call_stats": api_call_stats,
}
)
return scan_result_dicts
def scan_scan_unit(scan_unit: ScanUnit) -> Tuple[str, Dict[str, Any]]:
logger = Logger()
with logger.bind(
account_id=scan_unit.account_id,
region=scan_unit.region_name,
service=scan_unit.service,
resource_classes=sorted(
[
resource_spec_class.__name__
for resource_spec_class in scan_unit.resource_spec_classes
]
),
):
start_t = time.time()
logger.info(event=AWSLogEvents.ScanAWSAccountServiceStart)
session = boto3.Session(
aws_access_key_id=scan_unit.access_key,
aws_secret_access_key=scan_unit.secret_key,
aws_session_token=scan_unit.token,
region_name=scan_unit.region_name,
)
scan_accessor = AWSAccessor(
session=session, account_id=scan_unit.account_id, region_name=scan_unit.region_name
)
graph_spec = GraphSpec(
name=scan_unit.graph_name,
version=scan_unit.graph_version,
resource_spec_classes=scan_unit.resource_spec_classes,
scan_accessor=scan_accessor,
)
start_time = int(time.time())
resources: List[Resource] = []
errors = []
try:
resources = graph_spec.scan()
except Exception as ex:
error_str = str(ex)
trace_back = traceback.format_exc()
logger.error(
event=AWSLogEvents.ScanAWSAccountError, error=error_str, trace_back=trace_back
)
error = f"{str(ex)}\n{trace_back}"
errors.append(error)
end_time = int(time.time())
graph_set = GraphSet(
name=scan_unit.graph_name,
version=scan_unit.graph_version,
start_time=start_time,
end_time=end_time,
resources=resources,
errors=errors,
stats=scan_accessor.api_call_stats,
)
end_t = time.time()
elapsed_sec = end_t - start_t
logger.info(event=AWSLogEvents.ScanAWSAccountServiceEnd, elapsed_sec=elapsed_sec)
return (scan_unit.account_id, graph_set.to_dict())
def schedule_scan(
executor: ThreadPoolExecutor,
graph_name: str,
graph_version: str,
account_id: str,
region_name: str,
service: str,
access_key: str,
secret_key: str,
token: str,
resource_spec_classes: Tuple[Type[AWSResourceSpec], ...],
) -> Future:
scan_unit = ScanUnit(
graph_name=graph_name,
graph_version=graph_version,
account_id=account_id,
region_name=region_name,
service=service,
access_key=access_key,
secret_key=secret_key,
token=token,
resource_spec_classes=resource_spec_classes,
)
future = executor.submit(lambda: scan_scan_unit(scan_unit=scan_unit))
return future
| StarcoderdataPython |
166030 | import SimpleHTTPServer
import SocketServer
import BaseHTTPServer
import ssl
import os
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'https':
exec_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
https = BaseHTTPServer.HTTPServer(('', 4443), SimpleHTTPServer.SimpleHTTPRequestHandler)
https.socket = ssl.wrap_socket (https.socket, certfile=(exec_dir + os.sep + 'server.pem'), server_side=True)
https.serve_forever()
else:
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
| StarcoderdataPython |
1663256 | # coding: utf-8
import webbrowser
from operator import itemgetter
from .config import feedback, gconfig
from .symbol_format import completion_to_suggest
from .util import catch, Pretty
class ProtocolHandler(object):
"""Mixin for common behavior of handling ENSIME protocol responses.
Actual handler implementations are abstract and should be implemented by a
subclass. Requires facilities of an ``EnsimeClient``.
"""
def __init__(self):
self.handlers = {}
self.register_responses_handlers()
def register_responses_handlers(self):
"""Register handlers for responses from the server.
A handler must accept only one parameter: `payload`.
"""
self.handlers["SymbolInfo"] = self.handle_symbol_info
self.handlers["IndexerReadyEvent"] = self.handle_indexer_ready
self.handlers["AnalyzerReadyEvent"] = self.handle_analyzer_ready
self.handlers["NewScalaNotesEvent"] = self.buffer_typechecks
self.handlers["NewJavaNotesEvent"] = self.buffer_typechecks_and_display
self.handlers["BasicTypeInfo"] = self.show_type
self.handlers["ArrowTypeInfo"] = self.show_type
self.handlers["FullTypeCheckCompleteEvent"] = self.handle_typecheck_complete
self.handlers["StringResponse"] = self.handle_string_response
self.handlers["CompletionInfoList"] = self.handle_completion_info_list
self.handlers["TypeInspectInfo"] = self.handle_type_inspect
self.handlers["SymbolSearchResults"] = self.handle_symbol_search
self.handlers["SourcePositions"] = self.handle_source_positions
self.handlers["DebugOutputEvent"] = self.handle_debug_output
self.handlers["DebugBreakEvent"] = self.handle_debug_break
self.handlers["DebugBacktrace"] = self.handle_debug_backtrace
self.handlers["DebugVmError"] = self.handle_debug_vm_error
self.handlers["RefactorDiffEffect"] = self.apply_refactor
self.handlers["ImportSuggestions"] = self.handle_import_suggestions
self.handlers["PackageInfo"] = self.handle_package_info
self.handlers["FalseResponse"] = self.handle_false_response
def handle_incoming_response(self, call_id, payload):
"""Get a registered handler for a given response and execute it."""
self.log.debug('handle_incoming_response: in [typehint: %s, call ID: %s]',
payload['typehint'], call_id) # We already log the full JSON response
typehint = payload["typehint"]
handler = self.handlers.get(typehint)
def feature_not_supported(m):
msg = feedback["handler_not_implemented"]
self.editor.raw_message(msg.format(typehint, self.launcher.ensime_version))
if handler:
with catch(NotImplementedError, feature_not_supported):
handler(call_id, payload)
else:
self.log.warning('Response has not been handled: %s', Pretty(payload))
def handle_indexer_ready(self, call_id, payload):
raise NotImplementedError()
def handle_analyzer_ready(self, call_id, payload):
raise NotImplementedError()
def handle_debug_vm_error(self, call_id, payload):
raise NotImplementedError()
def handle_import_suggestions(self, call_id, payload):
raise NotImplementedError()
def handle_package_info(self, call_id, payload):
raise NotImplementedError()
def handle_symbol_search(self, call_id, payload):
raise NotImplementedError()
def handle_symbol_info(self, call_id, payload):
raise NotImplementedError()
def handle_string_response(self, call_id, payload):
raise NotImplementedError()
def handle_completion_info_list(self, call_id, payload):
raise NotImplementedError()
def handle_type_inspect(self, call_id, payload):
raise NotImplementedError()
def show_type(self, call_id, payload):
raise NotImplementedError()
def handle_source_positions(self, call_id, payload):
raise NotImplementedError()
def handle_false_response(self, call_id, payload):
raise NotImplementedError()
class ProtocolHandlerV1(ProtocolHandler):
"""Implements response handlers for the v1 ENSIME Jerky protocol."""
def handle_indexer_ready(self, call_id, payload):
self.editor.message("indexer_ready")
def handle_analyzer_ready(self, call_id, payload):
self.editor.message("analyzer_ready")
def handle_debug_vm_error(self, call_id, payload):
self.editor.raw_message('Error. Check ensime-vim log for details.')
def handle_false_response(self, call_id, payload):
call_options = self.call_options.get(call_id)
false_msg = call_options.get('false_resp_msg') if call_options else None
if false_msg:
self.editor.raw_message(false_msg)
else:
self.editor.message('false_response')
def handle_import_suggestions(self, call_id, payload):
imports = list()
for suggestions in payload['symLists']:
for suggestion in suggestions:
imports.append(suggestion['name'].replace('$', '.'))
imports = list(sorted(set(imports)))
if not imports:
self.editor.raw_message('No import suggestions found.')
return
choice = self.editor.menu('Select class to import:', imports)
if choice:
self.add_import(choice)
def handle_package_info(self, call_id, payload):
package = payload["fullName"]
def add(member, indentLevel):
indent = " " * indentLevel
t = member["declAs"]["typehint"] if member["typehint"] == "BasicTypeInfo" else ""
line = "{}{}: {}".format(indent, t, member["name"])
self.editor.append(line)
if indentLevel < 4:
for m in member["members"]:
add(m, indentLevel + 1)
# Create a new buffer 45 columns wide
opts = {'buftype': 'nofile', 'bufhidden': 'wipe', 'buflisted': False,
'filetype': 'package_info', 'swapfile': False}
self.editor.split_window('package_info', vertical=True, size=45, bufopts=opts)
self.editor.append(str(package))
for member in payload["members"]:
add(member, 1)
def handle_symbol_search(self, call_id, payload):
"""Handler for symbol search results"""
self.log.debug('handle_symbol_search: in %s', Pretty(payload))
syms = payload["syms"]
qfList = []
for sym in syms:
p = sym.get("pos")
if p:
item = self.editor.to_quickfix_item(str(p["file"]),
p["line"],
str(sym["name"]),
"info")
qfList.append(item)
self.editor.write_quickfix_list(qfList, "Symbol Search")
def handle_symbol_info(self, call_id, payload):
"""Handler for response `SymbolInfo`."""
with catch(KeyError, lambda e: self.editor.message("unknown_symbol")):
decl_pos = payload["declPos"]
f = decl_pos.get("file")
call_options = self.call_options[call_id]
self.log.debug('handle_symbol_info: call_options %s', call_options)
display = call_options.get("display")
if display and f:
self.editor.raw_message(f)
open_definition = call_options.get("open_definition")
if open_definition and f:
self.editor.clean_errors()
self.editor.doautocmd('BufLeave')
if call_options.get("split"):
vert = call_options.get("vert")
self.editor.split_window(f, vertical=vert)
else:
self.editor.edit(f)
self.editor.doautocmd('BufReadPre', 'BufRead', 'BufEnter')
self.set_position(decl_pos)
del self.call_options[call_id]
def handle_string_response(self, call_id, payload):
"""Handler for response `StringResponse`.
This is the response for the following requests:
1. `DocUriAtPointReq` or `DocUriForSymbolReq`
2. `DebugToStringReq`
"""
self.log.debug('handle_string_response: in [typehint: %s, call ID: %s]',
payload['typehint'], call_id)
# :EnDocBrowse or :EnDocUri
url = payload['text']
if not url.startswith('http'):
port = self.ensime.http_port()
url = gconfig['localhost'].format(port, url)
options = self.call_options.get(call_id)
if options and options.get('browse'):
self._browse_doc(url)
del self.call_options[call_id]
else:
# TODO: make this return value of a Vim function synchronously, how?
self.log.debug('EnDocUri %s', url)
return url
def _browse_doc(self, url):
self.log.debug('_browse_doc: %s', url)
try:
if webbrowser.open(url):
self.log.info('opened %s', url)
except webbrowser.Error:
self.log.exception('_browse_doc: webbrowser error')
self.editor.raw_message(feedback["manual_doc"].format(url))
def handle_completion_info_list(self, call_id, payload):
"""Handler for a completion response."""
self.log.debug('handle_completion_info_list: in')
# filter out completions without `typeInfo` field to avoid server bug. See #324
completions = [c for c in payload["completions"] if "typeInfo" in c]
self.suggestions = [completion_to_suggest(c) for c in completions]
self.log.debug('handle_completion_info_list: %s', Pretty(self.suggestions))
def handle_type_inspect(self, call_id, payload):
"""Handler for responses `TypeInspectInfo`."""
style = 'fullName' if self.full_types_enabled else 'name'
interfaces = payload.get("interfaces")
ts = [i["type"][style] for i in interfaces]
prefix = "( " + ", ".join(ts) + " ) => "
self.editor.raw_message(prefix + payload["type"][style])
# TODO @ktonga reuse completion suggestion formatting logic
def show_type(self, call_id, payload):
"""Show type of a variable or scala type."""
if self.full_types_enabled:
tpe = payload['fullName']
else:
tpe = payload['name']
self.log.info('Displayed type %s', tpe)
self.editor.raw_message(tpe)
class ProtocolHandlerV2(ProtocolHandlerV1):
"""Implements response handlers for the v2 ENSIME Jerky protocol."""
def handle_source_positions(self, call_id, payload):
"""Handler for source positions"""
self.log.debug('handle_source_positions: in %s', Pretty(payload))
call_options = self.call_options[call_id]
word_under_cursor = call_options.get("word_under_cursor")
positions = payload["positions"]
if not positions:
self.editor.raw_message("No usages of <{}> found".format(word_under_cursor))
return
qf_list = []
for p in positions:
position = p["position"]
preview = str(p["preview"]) if "preview" in p else "<no preview>"
item = self.editor.to_quickfix_item(str(position["file"]),
position["line"],
preview,
"info")
qf_list.append(item)
qf_sorted = sorted(qf_list, key=itemgetter('filename', 'lnum'))
self.editor.write_quickfix_list(qf_sorted, "Usages of <{}>".format(word_under_cursor))
| StarcoderdataPython |
1783129 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
#
# simple analyzer to make histos within a framework job off the super clusters in the event
# Author: <NAME>, University of Rome & INFN
#
egammaSimpleAnalyzer = cms.EDAnalyzer("EgammaSimpleAnalyzer",
xMaxHist = cms.double(60.0),
outputFile = cms.string('egammaAnalyzer.root'),
#
# island clustering in endcap
#
islandEndcapBasicClusterProducer = cms.string('islandBasicClusters'),
islandEndcapSuperClusterCollection = cms.string('islandEndcapSuperClusters'),
islandBarrelBasicClusterShapes = cms.string('islandBarrelShape'),
correctedHybridSuperClusterProducer = cms.string('correctedHybridSuperClusters'),
islandEndcapBasicClusterCollection = cms.string('islandEndcapBasicClusters'),
correctedIslandEndcapSuperClusterProducer = cms.string('correctedEndcapSuperClustersWithPreshower'),
hybridSuperClusterCollection = cms.string(''),
xMinHist = cms.double(0.0),
islandEndcapSuperClusterProducer = cms.string('islandSuperClusters'),
nbinHist = cms.int32(200),
correctedHybridSuperClusterCollection = cms.string(''),
#
# island clustering in barrel
#
islandBarrelBasicClusterProducer = cms.string('islandBasicClusters'),
islandEndcapBasicClusterShapes = cms.string('islandEndcapShape'),
#
# hybrid clustering in barrel
#
hybridSuperClusterProducer = cms.string('hybridSuperClusters'),
islandBarrelBasicClusterCollection = cms.string('islandBarrelBasicClusters'),
correctedIslandEndcapSuperClusterCollection = cms.string('')
)
| StarcoderdataPython |
97045 | """
Reversed from binary_search.
Given a item, if the item in the list, return its index.
If not in the list, return the index of the first item that is larger than the the given item
If all items in the list are less then the given item, return -1
"""
def binary_search_fuzzy(alist: list, item) -> int:
low_bound = 0
high_bound = len(alist) - 1
while low_bound <= high_bound:
mid = (low_bound + high_bound) // 2
if alist[mid] == item:
return mid
elif alist[mid] < item:
low_bound = mid + 1
elif alist[mid] > item:
high_bound = mid - 1
if low_bound >= len(alist):
return -1
else:
return low_bound
if __name__ == '__main__':
a = [1, 3, 7, 9, 11, 13, 15]
print(binary_search_fuzzy(a, 16))
| StarcoderdataPython |
1715185 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.options import Options
import pandas as pd
import os
import datetime
import time
PATH = "chromedriver.exe"
driver = webdriver.Chrome(PATH)
df = pd.read_csv('equity40.csv')
security_list = df['code'].to_list()
def choose_date(day,month,year):
year_ = Select(driver.find_element_by_class_name("ui-datepicker-year"))
year_.select_by_visible_text(year)
month_ = Select(driver.find_element_by_class_name("ui-datepicker-month"))
month_.select_by_visible_text(month)
day_ = driver.find_element_by_link_text(day)
day_.click()
def download_data(company_name , from_day , from_month , from_year , to_day , to_month, to_year):
driver.get("https://www.bseindia.com/markets/equity/EQReports/StockPrcHistori.aspx")
search = driver.find_element_by_name("ctl00$ContentPlaceHolder1$smartSearch")
search.send_keys(company_name)
search.send_keys(Keys.RETURN)
time.sleep(1)
from_input = driver.find_element_by_name("ctl00$ContentPlaceHolder1$txtFromDate")
from_input.click()
choose_date(from_day,from_month,from_year)
to_input = driver.find_element_by_name("ctl00$ContentPlaceHolder1$txtToDate")
to_input.click()
choose_date(to_day,to_month,to_year)
submit_btn = driver.find_element_by_name("ctl00$ContentPlaceHolder1$btnSubmit")
submit_btn.click()
download_btn = driver.find_element_by_id("ContentPlaceHolder1_btnDownload1")
download_btn.click()
def download_monthly_data(company_name , from_month, from_year):
driver.get("https://www.bseindia.com/markets/equity/EQReports/StockPrcHistori.aspx")
month_active = driver.find_element_by_id("ContentPlaceHolder1_rdbMonthly")
month_active.click()
search = driver.find_element_by_name("ctl00$ContentPlaceHolder1$smartSearch")
search.send_keys(company_name)
search.send_keys(Keys.RETURN)
time.sleep(1)
select_month = Select(driver.find_element_by_id("ContentPlaceHolder1_cmbMonthly"))
select_month.select_by_visible_text(from_month)
select_year = Select(driver.find_element_by_id("ContentPlaceHolder1_cmbMYear"))
select_year.select_by_visible_text(from_year)
submit_btn = driver.find_element_by_name("ctl00$ContentPlaceHolder1$btnSubmit")
submit_btn.click()
time.sleep(1)
download_btn = driver.find_element_by_id("ContentPlaceHolder1_btnDownload1")
download_btn.click()
def rename_files(security , i):
try:
os.rename(r"C:/Users/kdivy/Downloads/"+str(security)+".csv", "BSE/daily/" + df['symbol'][i]+".csv")
except:
time.sleep(1)
print("C:/Users/kdivy/Downloads/"+str(security)+".csv")
def rename_monthly_files(security , i):
try:
os.rename(r"C:/Users/kdivy/Downloads/"+str(security)+".csv", "BSE/monthly/" + df['symbol'][i]+".csv")
except:
time.sleep(1)
print("C:/Users/kdivy/Downloads/"+str(security)+".csv")
for i in range(0,40):
#download_data(security_list[i], "1" , "Jan" , "2011" , "1" , "Jan" , "2021")
download_monthly_data(security_list[i],"Jan" , "2011")
time.sleep(1)
for i in range(0,40):
#rename_files(security_list[i] , i)
rename_monthly_files(security_list[i] , i) | StarcoderdataPython |
1603820 | """
Class for parsing and converting the discord flavor of Markdown.
Part of the Gabby Gums Discord Logger.
"""
import logging
from typing import TYPE_CHECKING, Optional, Dict, List, Union, Tuple, NamedTuple, Match
import regex as re
from jinja2 import escape
log = logging.getLogger(__name__)
class DiscordMarkdown:
# TODO: Consider optimising by not using lazy quantifiers: https://www.rexegg.com/regex-quantifiers.html
codeblock_pattern = re.compile(r"(?P<stag>```)(?:(?P<lang>[a-zA-Z0-9-]+?)\n+)?\n*(?P<content>[\s\S]+?)\n*(?P<etag>```)") # Multiline.
inlinecodeblock_pattern = re.compile(r"(?<!\\)(`)(?P<content>[^`]*?[^`])(\1)(?!`)")
strikethrough_pattern = re.compile(r"(?<!\\)~~(?P<content>.+?)(?<!\\)~~(?!_)") # Singleline.
spoiler_pattern = re.compile(r"(?<!\\)\|\|(?P<content>.+?)(?<!\\)\|\|") # Singleline.
bold_pattern = re.compile(r"(?<!\\)\*\*(?P<content>.+?)(?<!\\)\*\*") # Singleline.
underline_pattern = re.compile(r"(?<!\\)__(?P<content>.+?)(?<!\\)__") # Singleline.
italics_pattern = re.compile(r"(?:(?<!\\)\*(?P<s_content>.+?)(?<!\\)\*)|(?:(?<!\\)_(?P<u_content>.+?)(?<!\\)_)")
blockQuote_pattern = re.compile(r"^(?: *>>> ([\s\S]*))|^(?: *> ([^\n]*\n*))", flags=re.MULTILINE) # (r"(?: *>>> ([\s\S]*))|(?: *> ([^\n]*))") # (?: *>>> ([\s\S]*))|
symbols_pattern = re.compile(r"(?P<content>[^a-zA-Z0-9\s])")
escaped_symbols_pattern = re.compile(r"\\(?P<content>[^a-zA-Z0-9\s])")
suppresed_embed_link_pattern = re.compile(r"<(?P<content>http[s]?:\/\/\S+?)>")
web_link_pattern = re.compile(r"\[(.+)\]\([^\n\S]*?(http[s]?:\/\/[\S]+?)[^\n\S]*?\)|(http[s]?:\/\/[\S]+)") # TODO: Consider optimising by having normal links match first.
nitro_emote_pattern = re.compile(r"<(?P<animated>a)?:(?P<name>[0-9a-zA-Z_]{2,32}):(?P<id>[0-9]{15,21})>")
womboji_pattern = re.compile(r"([a-zA-Z0-9!-;=?-~\s]*)?\s*<a?:[0-9a-zA-Z_]{2,32}:[0-9]{15,21}>\s*([a-zA-Z0-9!-;=?-~\s]*)?") # http://www.asciitable.com/
@classmethod
def escape_symbols_repl(cls, m: Match) -> str:
content = m.group('content')
return "\\"+content
@classmethod
def escape_symbols(cls, _input: str) -> str:
"""Adds an extra escape char to every escapable character. Used for code blocks so the escape characters will remain at the end."""
output = cls.symbols_pattern.sub(cls.escape_symbols_repl, _input)
return output
@classmethod
def remove_escaped_symbol_repl(cls, m: Match) -> str:
content = m.group('content')
return content
@classmethod
def remove_escaped_symbol(cls, _input: str) -> str:
"""Removes the escape characters."""
output = cls.escaped_symbols_pattern.sub(cls.remove_escaped_symbol_repl, _input)
return output
@classmethod
def codeblock_repl(cls, m: Match) -> str:
e_tag = "</div>"
if m.group("lang") is not None:
s_tag = f'<div class="pre pre--multiline language-{m.group("lang")}">'
else:
s_tag = '<div class="pre pre--multiline nohighlight">'
# Clean up the content
content = m.group('content')
content = cls.escape_symbols(content)
replacement = f"{s_tag}{content}{e_tag}"
return replacement
@classmethod
def codeblock(cls, _input: str) -> str:
output = cls.codeblock_pattern.sub(cls.codeblock_repl, _input)
return output
@classmethod
def inline_codeblock_repl(cls, m: Match):
s_tag = '<span class="pre pre--inline">'
e_tag = '</span>'
# Clean up the content
content = m.group('content') # Markup(match.group('content')).striptags()
content = cls.escape_symbols(content)
replacement = f"{s_tag}{content}{e_tag}"
return replacement
@classmethod
def inline_codeblock(cls, _input: str) -> str:
output = cls.inlinecodeblock_pattern.sub(cls.inline_codeblock_repl, _input)
return output
# region foldpls
@classmethod
def spoiler(cls, _input: str) -> str:
s_tag = '<span class="spoiler">'
e_tag = "</span>"
repl = r"{}\g<content>{}".format(s_tag, e_tag)
output = cls.spoiler_pattern.sub(repl, _input)
return output
@classmethod
def bold(cls, _input: str) -> str:
first_tag = '<strong>'
end_tag = "</strong>"
repl = r"{}\g<content>{}".format(first_tag, end_tag)
output = cls.bold_pattern.sub(repl, _input)
return output
@classmethod
def underline(cls, _input: str) -> str:
first_tag = '<u>'
end_tag = "</u>"
repl = r"{}\g<content>{}".format(first_tag, end_tag)
output = cls.underline_pattern.sub(repl, _input)
return output
@classmethod
def italics_repl(cls, m: Match) -> Optional[str]:
s_tag = '<em>'
e_tag = "</em>"
if m.group("s_content") is not None:
replacement = f"{s_tag}{m.group('s_content')}{e_tag}"
elif m.group("u_content") is not None:
replacement = f"{s_tag}{m.group('u_content')}{e_tag}"
else:
log.warning("No content match in italics_repl")
replacement = None
return replacement
@classmethod
def italics(cls, _input: str) -> str:
output = cls.italics_pattern.sub(cls.italics_repl, _input)
return output
@classmethod
def strikethrough(cls, _input: str) -> str:
first_tag = "<s>"
end_tag = "</s>"
repl = r"{}\g<content>{}".format(first_tag, end_tag)
output = cls.strikethrough_pattern.sub(repl, _input)
return output
# endregion
@classmethod
def blockquote_repl(cls, m: Match) -> str:
s_tag = '<div class="quote">'
e_tag = "</div>"
if m.group(1) is not None: # Triple
replacement = f"{s_tag}{m.group(1)}{e_tag}"
# log.info(f"Matched 3bq")
return replacement
elif m.group(2) is not None: # Single
content = m.group(2).replace('\n', '') # Get the content and strip the newline
replacement = f"{s_tag}{content}{e_tag}"
# log.info(f"Matched 1bq")
return replacement
else:
pass
# log.info(f"No bq match found. can we even get here?")
@classmethod
def blockquote(cls, _input: str) -> str:
output = cls.blockQuote_pattern.sub(cls.blockquote_repl, _input)
return output
@classmethod
def remove_suppressed_embed_arrows(cls, _input: str) -> str:
# s_tag = '<span class="spoiler">'
# e_tag = "</span>"
repl = r"\g<content>"
output = cls.suppresed_embed_link_pattern.sub(repl, _input)
return output
@classmethod
def linkify_repl(cls, m: Match) -> str:
s_tag = '<a href="'
m_tag = '">'
e_tag = "</a>"
if m.group(3) is not None: # Normal Web Link
replacement = f"{s_tag}{m.group(3)}{m_tag}{m.group(3)}{e_tag}"
return replacement
elif m.group(2) is not None: # Inline link
replacement = f"{s_tag}{m.group(2)}{m_tag}{m.group(1)}{e_tag}"
# log.info(f"Matched 1bq")
return replacement
else:
log.warning("No linkify_repl match???")
@classmethod
def linkify(cls, _input: str) -> str:
output = cls.web_link_pattern.sub(cls.linkify_repl, _input)
return output
@classmethod
def emojify_repl(cls, m: Match) -> str:
# animated, name, id
s_tag = '<img class="emoji" alt="'
m1_tag = '" title="'
m2_tag = '" src="'
e_tag = '">'
if m.group('animated'):
# animated emoji
emoji_url = f"https://cdn.discordapp.com/emojis/{m.group('id')}.gif"
else:
emoji_url = f"https://cdn.discordapp.com/emojis/{m.group('id')}.png"
replacement = f"{s_tag}{m.group('name')}{m1_tag}{m.group('name')}{m2_tag}{emoji_url}{e_tag}"
return replacement
@classmethod
def wombojify_repl(cls, m: Match) -> str:
s_tag = '<img class="emoji emoji--large" alt="'
m1_tag = '" title="'
m2_tag = '" src="'
e_tag = '">'
if m.group('animated'):
# animated emoji
emoji_url = f"https://cdn.discordapp.com/emojis/{m.group('id')}.gif"
else:
emoji_url = f"https://cdn.discordapp.com/emojis/{m.group('id')}.png"
replacement = f"{s_tag}{m.group('name')}{m1_tag}{m.group('name')}{m2_tag}{emoji_url}{e_tag}"
return replacement
@classmethod
def emojify(cls, _input: str, original_txt: str) -> str:
womboji = True
# check if we need big or small emoji:
womboji_matchs: List[Tuple[str, str]] = cls.womboji_pattern.findall(original_txt)
for match in womboji_matchs:
if match[0] != '' or match[1] != '':
womboji = False
break
if womboji:
output = cls.nitro_emote_pattern.sub(cls.wombojify_repl, _input)
else:
output = cls.nitro_emote_pattern.sub(cls.emojify_repl, _input)
return output
@classmethod
def markdown(cls, _input: str) -> str:
output = _input
# First ensure the input is "safe"
output = escape(_input)
# CODE BLOCKS MUST BE FIRST
output = cls.codeblock(output) # Codeblock MUST be before inline codeblocks
output = cls.inline_codeblock(output) # inline Codeblock MUST be next
output = cls.remove_suppressed_embed_arrows(output)
output = cls.blockquote(output)
output = cls.spoiler(output)
output = cls.strikethrough(output)
output = cls.bold(output)
output = cls.underline(output)
output = cls.italics(output)
output = cls.linkify(output)
output = cls.emojify(output, _input)
# UNESCAPING MUST BE LAST
output = cls.remove_escaped_symbol(output)
return output
markdown = DiscordMarkdown()
| StarcoderdataPython |
3376056 | if sm.getChr().getJob() == 2300:
sm.jobAdvance(2310)
sm.addSP(2)
sm.completeQuest(parentID)
sm.dispose()
| StarcoderdataPython |
100535 | import numpy as np
from sklearn import model_selection
import typing as t
from copy import copy
from ..mltypes import RandomState
from ..data.dataset import Dataset
class DataSplit:
def get_splits(self, dataset: Dataset) -> t.Generator[t.Tuple[Dataset, Dataset], None, None]:
raise NotImplementedError
class TrainTestSplit(DataSplit):
def __init__(self,
train_size: t.Union[float, int] = 0.7,
random_state: RandomState = None,
shuffle: bool = True
):
self.train_size = train_size
self.random_state = random_state
self.shuffle = shuffle
def get_splits(self, dataset: Dataset) -> t.Generator[t.Tuple[Dataset, Dataset], None, None]:
all_rows = dataset.ids
train_rows, test_rows = model_selection.train_test_split(all_rows,
train_size=self.train_size,
random_state=self.random_state,
shuffle=self.shuffle)
train_dataset = copy(dataset)
train_dataset.ids = train_rows
test_dataset = copy(dataset)
test_dataset.ids = test_rows
yield train_dataset, test_dataset
| StarcoderdataPython |
161758 | <gh_stars>1-10
""" This file is create and managed by <NAME>
----------------------------------------------
It can be use only for education purpose
"""
# List Modification
mix_list = [1, 'Programmer', 5.0, True]
print(mix_list)
# Mutable : Because re-assign value
mix_list[0] = 2
print(mix_list)
# Adding item in list
mix_list.append('Python')
print(mix_list)
# Shortcut version of adding item
mix_list += ['Solo']
print(mix_list)
# Adding item at order place
mix_list.insert(1, 'I am')
print(mix_list)
# Delete a list item
del mix_list[0]
print(mix_list)
| StarcoderdataPython |
180698 | <filename>challenges/Azurad/poller/for-release/machine.py
#!/usr/bin/env python
from generator.actions import Actions
import numpy as np
import random
import struct
MAX_ITEMS = 1000
INSERT = 0x2064
RETR = 0xA097
ADD = 0x2C56
SUB = 0x973D
MUL = 0xE925
SUM = 0x71B8
PROD = 0x1510
FLAG = 0x4141
RAND = 0xC4C5
QUIT = 0xA80B
RESERVED = [INSERT, RETR, ADD, SUB, MUL, SUM, PROD, FLAG, RAND, QUIT]
def random_r():
if random.randint(0, 10) == 0:
return random.randint(0, MAX_ITEMS-1)
return random.randint(0, 20)
class TemplateGenerator(Actions):
def start(self):
#self.delay(100)
self.state['r'] = {}
self.state['rand'] = (np.uint32(0x4347c000), 0)
self.read(length=8, expect=struct.pack('>II', 0xdeadbeef, 0x4347c000))
def menu(self):
pass
def insert(self):
r = random_r()
value = np.uint32(random.randint(0, 0xFFFFFFFF))
self.state['r'][r] = value
self.write(struct.pack('>HII', INSERT, r, value))
def retr(self):
r = random.choice(self.state['r'].keys())
self.write(struct.pack('>HI', RETR, r))
self.read(length=4, expect=struct.pack('>I', self.state['r'][r]))
def retr_all(self):
for r in self.state['r'].keys():
self.write(struct.pack('>HI', RETR, r))
self.read(length=4, expect=struct.pack('>I', self.state['r'][r]))
def add(self):
dst = random_r()
a = random.choice(self.state['r'].keys())
b = random.choice(self.state['r'].keys())
self.state['r'][dst] = self.state['r'][a] + self.state['r'][b]
self.write(struct.pack('>HIII', ADD, a, b, dst))
def sub(self):
dst = random_r()
a = random.choice(self.state['r'].keys())
b = random.choice(self.state['r'].keys())
self.state['r'][dst] = self.state['r'][a] - self.state['r'][b]
self.write(struct.pack('>HIII', SUB, a, b, dst))
def mul(self):
dst = random_r()
a = random.choice(self.state['r'].keys())
b = random.choice(self.state['r'].keys())
self.state['r'][dst] = self.state['r'][a] * self.state['r'][b]
self.write(struct.pack('>HIII', MUL, a, b, dst))
def sum(self):
start = random.choice(self.state['r'].keys())
end = start
x = 0
while end in self.state['r']:
x += self.state['r'][end]
end += 1
r = random_r()
self.state['r'][r] = np.uint32(x)
self.write(struct.pack('>HIII', SUM, start, end, r))
def prod(self):
start = random.choice(self.state['r'].keys())
end = start
x = 1
while end in self.state['r']:
x *= self.state['r'][end]
end += 1
r = random_r()
self.state['r'][r] = np.uint32(x)
self.write(struct.pack('>HIII', PROD, start, end, r))
def flag(self):
pw = struct.unpack('<I', self.magic_page[0:4])[0]
if self.chance(0.5):
pw = random.randint(0, 0xffffffff)
self.write(struct.pack('>HI', FLAG, pw))
return
r = random_r()
i = random.randint(0, 0xffffffff)
val = struct.unpack('<I', self.magic_page[(i & 0x3ff)*4 : (i &0x3ff)*4 + 4])[0]
self.state['r'][r] = np.uint32(val)
self.write(struct.pack('>HIII', FLAG, pw, r, i))
def rand(self):
state, idx = self.state['rand']
state ^= struct.unpack('<H', self.magic_page[idx*2:idx*2+2])[0]
state ^= struct.unpack('<H', self.magic_page[idx*2+4:idx*2+6])[0]
idx = (idx + 4) & 0x3ff
state *= 1234567
state = np.uint32(state)
self.state['rand'] = (state, idx)
r = random_r()
self.write(struct.pack('>HI', RAND, r))
self.state['r'][r] = state
def echo(self):
value = None
while value is None:
value = random.randint(0, 0xFFFF)
if value in RESERVED:
value = None
self.write(struct.pack('>H', value))
self.read(length=4, expect=struct.pack('>I', value))
def quit(self):
self.retr_all()
self.write(struct.pack('>H', QUIT))
| StarcoderdataPython |
3250800 | from typing import Any, Union
from bot.event import Event, EventType
from pydantic.utils import deep_update
def event(
event_data: Union[str, dict[str, Any]],
event_type: EventType = EventType.NEW_MESSAGE,
) -> Event:
default = {
"chat": {"chatId": "test"},
"from": "<EMAIL>",
"msgId": 999,
}
if isinstance(event_data, str):
# for simple cases
default["text"] = event_data
else:
default = deep_update(default, event_data)
return Event(event_type, default)
def part(type: str, **payload: Any) -> dict[str, Any]:
return {"type": type, "payload": payload}
def mention(user_id: int) -> dict[str, Any]:
return part("mention", userId=user_id)
def odesli_response() -> dict[str, Any]:
return {
"entityUniqueId": "ITUNES_SONG::1443109064",
"userCountry": "US",
"pageUrl": "https://song.link/us/i/1443109064",
"entitiesByUniqueId": {
"ITUNES_SONG::1443109064": {
"id": "1443109064",
"type": "song",
"title": "Kitchen",
"artistName": "<NAME>",
"thumbnailUrl": "https://is4-ssl.mzstatic.com/image/thumb/Music118/v4/ac/2c/60/ac2c60ad-14c3-a8b2-d962-dc08de2da546/source/512x512bb.jpg",
"thumbnailWidth": 512,
"thumbnailHeight": 512,
"apiProvider": "itunes",
"platforms": ["appleMusic", "itunes"],
},
},
"linksByPlatform": {
"appleMusic": {
"url": "https://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"nativeAppUriMobile": "music://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"nativeAppUriDesktop": "itms://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"entityUniqueId": "ITUNES_SONG::1443109064",
},
"spotify": {
"url": "https://open.spotify.com/track/0Jcij1eWd5bDMU5iPbxe2i",
"nativeAppUriDesktop": "spotify:track:0Jcij1eWd5bDMU5iPbxe2i",
"entityUniqueId": "SPOTIFY_SONG::0Jcij1eWd5bDMU5iPbxe2i",
},
"youtube": {
"url": "https://www.youtube.com/watch?v=w3LJ2bDvDJs",
"entityUniqueId": "YOUTUBE_VIDEO::w3LJ2bDvDJs",
},
},
}
| StarcoderdataPython |
1685919 | <filename>improver_tests/nbhood/recursive_filter/test_RecursiveFilter.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the nbhood.RecursiveFilter plugin."""
import unittest
from datetime import timedelta
import iris
import numpy as np
from iris.cube import Cube
from iris.tests import IrisTest
from improver.nbhood.recursive_filter import RecursiveFilter
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_variable_cube,
)
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
from improver.utilities.pad_spatial import pad_cube_with_halo
from improver.utilities.warnings_handler import ManageWarnings
def _mean_points(points):
"""Create an array of the mean of adjacent points in original array"""
return np.array((points[:-1] + points[1:]) / 2, dtype=np.float32)
class Test_RecursiveFilter(IrisTest):
"""Test class for the RecursiveFilter tests, setting up cubes."""
def setUp(self):
"""Create test cubes."""
self.iterations = 1
# Generate data cube with dimensions 1 x 5 x 5
data = np.array(
[
[
[0.00, 0.00, 0.10, 0.00, 0.00],
[0.00, 0.00, 0.25, 0.00, 0.00],
[0.10, 0.25, 0.50, 0.25, 0.10],
[0.00, 0.00, 0.25, 0.00, 0.00],
[0.00, 0.00, 0.10, 0.00, 0.00],
]
],
dtype=np.float32,
)
self.cube = set_up_variable_cube(
data, name="precipitation_amount", units="kg m^-2 s^-1"
)
self.x_name = "smoothing_coefficient_x"
self.y_name = "smoothing_coefficient_y"
# Generate smoothing_coefficients_cubes with correct dimensions 5 x 4
smoothing_coefficients_cube_x = set_up_variable_cube(
np.full((5, 4), 0.5, dtype=np.float32), name=self.x_name
)
mean_x_points = _mean_points(
smoothing_coefficients_cube_x.coord(axis="y").points
)
smoothing_coefficients_cube_x.coord(axis="x").points = mean_x_points
smoothing_coefficients_cube_y = set_up_variable_cube(
np.full((4, 5), 0.5, dtype=np.float32), name=self.y_name
)
mean_y_points = _mean_points(
smoothing_coefficients_cube_y.coord(axis="x").points
)
smoothing_coefficients_cube_y.coord(axis="y").points = mean_y_points
self.smoothing_coefficients = [
smoothing_coefficients_cube_x,
smoothing_coefficients_cube_y,
]
# Generate an alternative y smoothing_coefficients_cube with correct dimensions 5 x 4
smoothing_coefficients_cube_y_half = smoothing_coefficients_cube_y * 0.5
smoothing_coefficients_cube_y_half.rename(self.y_name)
self.smoothing_coefficients_alternative = [
smoothing_coefficients_cube_x,
smoothing_coefficients_cube_y_half,
]
# Generate smoothing_coefficients_cube with incorrect name
smoothing_coefficients_wrong_name = smoothing_coefficients_cube_x.copy()
smoothing_coefficients_wrong_name.rename("air_temperature")
self.smoothing_coefficients_wrong_name = [
smoothing_coefficients_wrong_name,
smoothing_coefficients_cube_y,
]
# Generate smoothing_coefficients_cubes with incorrect dimensions 6 x 6
smoothing_coefficients_cube_wrong_x = set_up_variable_cube(
np.full((6, 6), 0.5, dtype=np.float32), name=self.x_name
)
smoothing_coefficients_cube_wrong_y = set_up_variable_cube(
np.full((6, 6), 0.5, dtype=np.float32), name=self.y_name
)
self.smoothing_coefficients_wrong_dimensions = [
smoothing_coefficients_cube_wrong_x,
smoothing_coefficients_cube_wrong_y,
]
# Generate smoothing_coefficients_cubes with incorrect coordinate values
smoothing_coefficients_cube_wrong_x_points = (
smoothing_coefficients_cube_x.copy()
)
smoothing_coefficients_cube_wrong_x_points.coord(axis="x").points = (
smoothing_coefficients_cube_wrong_x_points.coord(axis="x").points + 10
)
smoothing_coefficients_cube_wrong_y_points = (
smoothing_coefficients_cube_y.copy()
)
smoothing_coefficients_cube_wrong_y_points.coord(axis="y").points = (
smoothing_coefficients_cube_wrong_y_points.coord(axis="y").points + 10
)
self.smoothing_coefficients_wrong_points = [
smoothing_coefficients_cube_wrong_x_points,
smoothing_coefficients_cube_wrong_y_points,
]
class Test__init__(Test_RecursiveFilter):
"""Test plugin initialisation."""
def test_basic(self):
"""Test using the default arguments."""
result = RecursiveFilter()
self.assertIsNone(result.iterations)
self.assertEqual(result.edge_width, 15)
def test_iterations(self):
"""Test when iterations value less than unity is given (invalid)."""
iterations = 0
msg = "Invalid number of iterations: must be >= 1: 0"
with self.assertRaisesRegex(ValueError, msg):
RecursiveFilter(
iterations=iterations, edge_width=1,
)
@ManageWarnings(record=True)
def test_iterations_warn(self, warning_list=None):
"""Test when the iteration value is more than 3 it warns."""
iterations = 5
warning_msg = (
"More than two iterations degrades the conservation"
"of probability assumption."
)
RecursiveFilter(iterations=iterations)
self.assertTrue(any(item.category == UserWarning for item in warning_list))
self.assertTrue(any(warning_msg in str(item) for item in warning_list))
class Test__validate_coefficients(Test_RecursiveFilter):
"""Test the _validate_coefficients method"""
def test_return_order(self):
"""Test that the coefficients cubes are returned in x, y order."""
x, y = RecursiveFilter()._validate_coefficients(
self.cube, self.smoothing_coefficients
)
self.assertEqual(x.name(), self.x_name)
self.assertEqual(y.name(), self.y_name)
x, y = RecursiveFilter()._validate_coefficients(
self.cube, self.smoothing_coefficients[::-1]
)
self.assertEqual(x.name(), self.x_name)
self.assertEqual(y.name(), self.y_name)
def test_smoothing_coefficients_wrong_name(self):
"""Test that an error is raised if the smoothing_coefficients_cube has
an incorrect name"""
msg = (
"The smoothing coefficient cube name air_temperature does not "
"match the expected name smoothing_coefficient_x"
)
with self.assertRaisesRegex(ValueError, msg):
RecursiveFilter(edge_width=1)._validate_coefficients(
self.cube, self.smoothing_coefficients_wrong_name
)
def test_smoothing_coefficients_mismatched_x_dimension(self):
"""Test that an error is raised if the x smoothing_coefficients_cube is
of an incorrect shape compared to the data cube."""
msg = (
"The smoothing coefficients x dimension does not have the "
"expected length or values"
)
with self.assertRaisesRegex(ValueError, msg):
RecursiveFilter(edge_width=1)._validate_coefficients(
self.cube, self.smoothing_coefficients_wrong_dimensions
)
def test_smoothing_coefficients_mismatched_x_points(self):
"""Test that an error is raised if the x smoothing_coefficients_cube
has mismatched coordinate points compared to the data cube."""
msg = (
"The smoothing coefficients x dimension does not have the "
"expected length or values"
)
with self.assertRaisesRegex(ValueError, msg):
RecursiveFilter(edge_width=1)._validate_coefficients(
self.cube, self.smoothing_coefficients_wrong_points
)
def test_smoothing_coefficients_exceed_max(self):
"""Test that an error is raised if any smoothing coefficient value
exceeds the allowed maximum of 0.5."""
self.smoothing_coefficients[0].data += 1.0
msg = (
"All smoothing_coefficient values must be less than 0.5. "
"A large smoothing_coefficient value leads to poor "
"conservation of probabilities"
)
with self.assertRaisesRegex(ValueError, msg):
RecursiveFilter(edge_width=1)._validate_coefficients(
self.cube, self.smoothing_coefficients
)
class Test__pad_coefficients(Test_RecursiveFilter):
"""Test the _pad_coefficients method"""
def test_padding_default(self):
"""Test that the returned smoothing_coefficients array is padded as
expected with the default edge_width.
Using default edge_width of 15 cells, which is doubled and applied to both
sides of the array, so array should be padded with 15 * 4 extra rows/columns.
"""
expected_shape_x = (65, 64)
expected_shape_y = (64, 65)
expected_result_x = np.full(expected_shape_x, 0.5)
expected_result_y = np.full(expected_shape_y, 0.5)
result_x, result_y = RecursiveFilter()._pad_coefficients(
*self.smoothing_coefficients
)
self.assertIsInstance(result_x.data, np.ndarray)
self.assertIsInstance(result_y.data, np.ndarray)
self.assertArrayEqual(result_x.data, expected_result_x)
self.assertArrayEqual(result_y.data, expected_result_y)
self.assertEqual(result_x.shape, expected_shape_x)
self.assertEqual(result_y.shape, expected_shape_y)
def test_padding_set_edge_width(self):
"""Test that the returned smoothing_coefficients arrays are padded as
expected with a set edge_width.
Using an edge_width of 1 cell, which is doubled and applied to both
sides of the array, so array should be padded with 1 * 4 extra rows/columns.
"""
expected_shape_x = (9, 8)
expected_shape_y = (8, 9)
expected_result_x = np.full(expected_shape_x, 0.5)
expected_result_y = np.full(expected_shape_y, 0.5)
result_x, result_y = RecursiveFilter(edge_width=1)._pad_coefficients(
*self.smoothing_coefficients
)
self.assertArrayEqual(result_x.data, expected_result_x)
self.assertArrayEqual(result_y.data, expected_result_y)
self.assertEqual(result_x.shape, expected_shape_x)
self.assertEqual(result_y.shape, expected_shape_y)
def test_padding_non_constant_values(self):
"""Test that the returned smoothing_coefficients array contains the
expected values when padded symmetrically with non-constant smoothing
coefficients.
Using an edge_width of 1 cell, which is doubled and applied to both
sides of the array, so array should be padded with 1 * 4 extra rows/columns.
"""
expected_shape = (9, 8)
expected_result = np.full(expected_shape, 0.5)
expected_result[1:3, 1:3] = 0.25
self.smoothing_coefficients[0].data[0, 0] = 0.25
result, _ = RecursiveFilter(edge_width=1)._pad_coefficients(
*self.smoothing_coefficients
)
self.assertArrayEqual(result.data, expected_result)
self.assertEqual(result.shape, expected_shape)
class Test__recurse_forward(Test_RecursiveFilter):
"""Test the _recurse_forward method"""
def test_first_axis(self):
"""Test that the returned _recurse_forward array has the expected
type and result."""
expected_result = np.array(
[
[0.0000, 0.00000, 0.100000, 0.00000, 0.0000],
[0.0000, 0.00000, 0.175000, 0.00000, 0.0000],
[0.0500, 0.12500, 0.337500, 0.12500, 0.0500],
[0.0250, 0.06250, 0.293750, 0.06250, 0.0250],
[0.0125, 0.03125, 0.196875, 0.03125, 0.0125],
]
)
result = RecursiveFilter(edge_width=1)._recurse_forward(
self.cube.data[0, :], self.smoothing_coefficients[1].data, 0
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_result)
def test_second_axis(self):
"""Test that the returned _recurse_forward array has the expected
type and result."""
expected_result = np.array(
[
[0.0, 0.000, 0.0500, 0.02500, 0.012500],
[0.0, 0.000, 0.1250, 0.06250, 0.031250],
[0.1, 0.175, 0.3375, 0.29375, 0.196875],
[0.0, 0.000, 0.1250, 0.06250, 0.031250],
[0.0, 0.000, 0.0500, 0.02500, 0.012500],
]
)
result = RecursiveFilter(edge_width=1)._recurse_forward(
self.cube.data[0, :], self.smoothing_coefficients[0].data, 1
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_result)
class Test__recurse_backward(Test_RecursiveFilter):
"""Test the _recurse_backward method"""
def test_first_axis(self):
"""Test that the returned _recurse_backward array has the expected
type and result."""
expected_result = np.array(
[
[0.0125, 0.03125, 0.196875, 0.03125, 0.0125],
[0.0250, 0.06250, 0.293750, 0.06250, 0.0250],
[0.0500, 0.12500, 0.337500, 0.12500, 0.0500],
[0.0000, 0.00000, 0.175000, 0.00000, 0.0000],
[0.0000, 0.00000, 0.100000, 0.00000, 0.0000],
]
)
result = RecursiveFilter(edge_width=1)._recurse_backward(
self.cube.data[0, :], self.smoothing_coefficients[1].data, 0
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_result)
def test_second_axis(self):
"""Test that the returned _recurse_backward array has the expected
type and result."""
expected_result = np.array(
[
[0.012500, 0.02500, 0.0500, 0.000, 0.0],
[0.031250, 0.06250, 0.1250, 0.000, 0.0],
[0.196875, 0.29375, 0.3375, 0.175, 0.1],
[0.031250, 0.06250, 0.1250, 0.000, 0.0],
[0.012500, 0.02500, 0.0500, 0.000, 0.0],
]
)
result = RecursiveFilter(edge_width=1)._recurse_backward(
self.cube.data[0, :], self.smoothing_coefficients[0].data, 1
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_result)
class Test__run_recursion(Test_RecursiveFilter):
"""Test the _run_recursion method"""
def test_return_type(self):
"""Test that the _run_recursion method returns an iris.cube.Cube."""
edge_width = 1
cube = iris.util.squeeze(self.cube)
smoothing_coefficients_x, smoothing_coefficients_y = RecursiveFilter(
edge_width=edge_width
)._pad_coefficients(*self.smoothing_coefficients)
padded_cube = pad_cube_with_halo(cube, 2 * edge_width, 2 * edge_width)
result = RecursiveFilter(edge_width=1)._run_recursion(
padded_cube,
smoothing_coefficients_x,
smoothing_coefficients_y,
self.iterations,
)
self.assertIsInstance(result, Cube)
def test_result_basic(self):
"""Test that the _run_recursion method returns the expected value."""
edge_width = 1
cube = iris.util.squeeze(self.cube)
smoothing_coefficients_x, smoothing_coefficients_y = RecursiveFilter(
edge_width=edge_width
)._pad_coefficients(*self.smoothing_coefficients)
padded_cube = pad_cube_with_halo(cube, 2 * edge_width, 2 * edge_width)
result = RecursiveFilter(edge_width=edge_width)._run_recursion(
padded_cube,
smoothing_coefficients_x,
smoothing_coefficients_y,
self.iterations,
)
expected_result = 0.12302627
self.assertAlmostEqual(result.data[4][4], expected_result)
def test_different_smoothing_coefficients(self):
"""Test that the _run_recursion method returns expected values when
smoothing_coefficient values are different in the x and y directions"""
edge_width = 1
cube = iris.util.squeeze(self.cube)
smoothing_coefficients_x, smoothing_coefficients_y = RecursiveFilter(
edge_width=edge_width
)._pad_coefficients(*self.smoothing_coefficients_alternative)
padded_cube = pad_cube_with_halo(cube, 2 * edge_width, 2 * edge_width)
result = RecursiveFilter(edge_width=edge_width)._run_recursion(
padded_cube, smoothing_coefficients_x, smoothing_coefficients_y, 1
)
# slice back down to the source grid - easier to visualise!
unpadded_result = result.data[2:-2, 2:-2]
expected_result = np.array(
[
[0.01320939, 0.02454378, 0.04346254, 0.02469828, 0.01359563],
[0.03405095, 0.06060188, 0.09870366, 0.06100013, 0.03504659],
[0.0845406, 0.13908109, 0.18816182, 0.14006987, 0.08701254],
[0.03405397, 0.06060749, 0.09871361, 0.06100579, 0.03504971],
[0.01322224, 0.02456765, 0.04350482, 0.0247223, 0.01360886],
],
dtype=np.float32,
)
self.assertArrayAlmostEqual(unpadded_result, expected_result)
class Test_process(Test_RecursiveFilter):
"""Test the process method. """
# Test output from plugin returns expected values
def test_return_type_and_shape(self):
"""Test that the RecursiveFilter plugin returns an iris.cube.Cube of
the expected shape."""
# Output data array should have same dimensions as input data array
expected_shape = (1, 5, 5)
plugin = RecursiveFilter(iterations=self.iterations,)
result = plugin(self.cube, smoothing_coefficients=self.smoothing_coefficients,)
self.assertIsInstance(result, Cube)
self.assertEqual(result.shape, expected_shape)
self.assertEqual(result.shape, expected_shape)
def test_smoothing_coefficient_cubes(self):
"""Test that the RecursiveFilter plugin returns the correct data."""
plugin = RecursiveFilter(iterations=self.iterations,)
result = plugin(self.cube, smoothing_coefficients=self.smoothing_coefficients,)
expected = 0.14994797
self.assertAlmostEqual(result.data[0][2][2], expected)
def test_smoothing_coefficient_cubes_masked_data(self):
"""Test that the RecursiveFilter plugin returns the correct data
when a masked data cube.
"""
plugin = RecursiveFilter(iterations=self.iterations,)
mask = np.zeros(self.cube.data.shape)
mask[0][3][2] = 1
self.cube.data = np.ma.MaskedArray(self.cube.data, mask=mask)
result = plugin(self.cube, smoothing_coefficients=self.smoothing_coefficients)
expected = 0.184375
self.assertAlmostEqual(result.data[0][2][2], expected)
self.assertArrayEqual(result.data.mask, mask)
def test_coordinate_reordering_with_different_smoothing_coefficients(self):
"""Test that x and y smoothing_coefficients still apply to the right
coordinate when the input cube spatial dimensions are (x, y) not
(y, x)"""
enforce_coordinate_ordering(self.cube, ["realization", "longitude", "latitude"])
plugin = RecursiveFilter(iterations=self.iterations,)
result = plugin(
self.cube, smoothing_coefficients=self.smoothing_coefficients_alternative
)
expected_result = np.array(
[
[0.02554158, 0.05397786, 0.1312837, 0.05397786, 0.02554158],
[0.03596632, 0.07334216, 0.1668669, 0.07334216, 0.03596632],
[0.05850913, 0.11031596, 0.21073693, 0.11031596, 0.05850913],
[0.03596632, 0.07334216, 0.1668669, 0.07334216, 0.03596632],
[0.02554158, 0.05397786, 0.1312837, 0.05397786, 0.02554158],
],
dtype=np.float32,
)
self.assertSequenceEqual(
[x.name() for x in result.coords(dim_coords=True)],
["realization", "longitude", "latitude"],
)
self.assertArrayAlmostEqual(result.data[0], expected_result)
def test_error_multiple_times_masked(self):
"""Test that the plugin raises an error when given a masked cube with
multiple time points"""
point = self.cube.coord("time").cell(0).point
time_points = [point - timedelta(seconds=3600), point]
cube = add_coordinate(self.cube, time_points, "time", is_datetime=True)
mask = np.zeros(cube.data.shape, dtype=int)
mask[0, 0, 2, 2] = 1
mask[1, 0, 2, 3] = 1
cube.data = np.ma.MaskedArray(cube.data, mask=mask)
plugin = RecursiveFilter(iterations=self.iterations,)
msg = "multiple time points is unsupported"
with self.assertRaisesRegex(ValueError, msg):
plugin(cube, smoothing_coefficients=self.smoothing_coefficients)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3210322 | # -*- coding: utf-8 -*-
from mealy.constants import ErrorAnalyzerConstants
from sklearn.metrics import accuracy_score, balanced_accuracy_score
import numpy as np
def compute_confidence_decision(primary_model_true_accuracy, primary_model_predicted_accuracy):
difference_true_pred_accuracy = np.abs(primary_model_true_accuracy - primary_model_predicted_accuracy)
decision = difference_true_pred_accuracy <= ErrorAnalyzerConstants.TREE_ACCURACY_TOLERANCE
fidelity = 1. - difference_true_pred_accuracy
# TODO Binomial test
return fidelity, decision
def compute_accuracy_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def compute_primary_model_accuracy(y):
n_test_samples = y.shape[0]
return float(np.count_nonzero(y == ErrorAnalyzerConstants.CORRECT_PREDICTION)) / n_test_samples
def compute_fidelity_score(y_true, y_pred):
difference_true_pred_accuracy = np.abs(compute_primary_model_accuracy(y_true) -
compute_primary_model_accuracy(y_pred))
fidelity = 1. - difference_true_pred_accuracy
return fidelity
def fidelity_balanced_accuracy_score(y_true, y_pred):
return compute_fidelity_score(y_true, y_pred) + balanced_accuracy_score(y_true, y_pred)
def error_decision_tree_report(y_true, y_pred, output_format='str'):
"""Return a report showing the main Error Decision Tree metrics.
Args:
y_true (numpy.ndarray): Ground truth values of wrong/correct predictions of the error tree primary model.
Expected values in [ErrorAnalyzerConstants.WRONG_PREDICTION, ErrorAnalyzerConstants.CORRECT_PREDICTION].
y_pred (numpy.ndarray): Estimated targets as returned by the error tree. Expected values in
[ErrorAnalyzerConstants.WRONG_PREDICTION, ErrorAnalyzerConstants.CORRECT_PREDICTION].
output_format (string): Return format used for the report. Valid values are 'dict' or 'str'.
Return:
dict or str: dictionary or string report storing different metrics regarding the Error Decision Tree.
"""
tree_accuracy_score = compute_accuracy_score(y_true, y_pred)
tree_balanced_accuracy = balanced_accuracy_score(y_true, y_pred)
primary_model_predicted_accuracy = compute_primary_model_accuracy(y_pred)
primary_model_true_accuracy = compute_primary_model_accuracy(y_true)
fidelity, confidence_decision = compute_confidence_decision(primary_model_true_accuracy,
primary_model_predicted_accuracy)
if output_format == 'dict':
report_dict = dict()
report_dict[ErrorAnalyzerConstants.TREE_ACCURACY] = tree_accuracy_score
report_dict[ErrorAnalyzerConstants.TREE_BALANCED_ACCURACY] = tree_balanced_accuracy
report_dict[ErrorAnalyzerConstants.TREE_FIDELITY] = fidelity
report_dict[ErrorAnalyzerConstants.PRIMARY_MODEL_TRUE_ACCURACY] = primary_model_true_accuracy
report_dict[ErrorAnalyzerConstants.PRIMARY_MODEL_PREDICTED_ACCURACY] = primary_model_predicted_accuracy
report_dict[ErrorAnalyzerConstants.CONFIDENCE_DECISION] = confidence_decision
return report_dict
if output_format == 'str':
report = 'The Error Decision Tree was trained with accuracy %.2f%% and balanced accuracy %.2f%%.' % (tree_accuracy_score * 100, tree_balanced_accuracy * 100)
report += '\n'
report += 'The Decision Tree estimated the primary model''s accuracy to %.2f%%.' % \
(primary_model_predicted_accuracy * 100)
report += '\n'
report += 'The true accuracy of the primary model is %.2f.%%' % (primary_model_true_accuracy * 100)
report += '\n'
report += 'The Fidelity of the error tree is %.2f%%.' % \
(fidelity * 100)
report += '\n'
if not confidence_decision:
report += 'Warning: the built tree might not be representative of the primary model performances.'
report += '\n'
report += 'The error tree predicted model accuracy is considered too different from the true model accuracy.'
report += '\n'
else:
report += 'The error tree is considered representative of the primary model performances.'
report += '\n'
return report
else:
raise ValueError("Output format should either be 'dict' or 'str'")
| StarcoderdataPython |
1659833 | <filename>Login-Pass.py<gh_stars>0
def mail():
#function for Login Portal
print("LOGIN PORTAL");
id1=raw_input("Email ID : ");
pass1=raw_input("Password : ");
login(id1,pass1);
def login(id1,pass1):
#ID PASSWORD checking function
import time;
f=open(id1+".txt","r");
c=f.readlines()
if(id1=='exit' or pass1=='exit'):
print("\n\nExiting . . .");
time.sleep(3);
cls();
print(">>> ================================ RESTART ================================");
elif(id1==c[0].strip('\n') and pass1==c[1].strip('\n')):
print("Login Successful");print("\n\nLoading . . .");
time.sleep(3);
access(id1);
else :
print("\nLogin Failed");
time.sleep(13);
cls();
mains();
def cls():
#Function to clear screen
print(100*"\n");
def access(id1):
#Welcome page
print("Welcome to the PORTAL - "+id1+" !!!");
lgt=raw_input("Logout ?(y) :");
logout(lgt);
def logout(lgt):
#Function to enable Logout
if(lgt=="y" or lgt=="Y"):
cls();
mains();
def mains():
#Main function that needs to be called for anything to start working
print("Welcome to The PORTAL\n1. Create Account\n2. Login Portal");
choice=raw_input("Option : ");
if(choice=='1'):
cls();
create_acc();
else :
cls();
mail();
def create_acc():
#Account creation function
import time;
print("ACCOUNT REGISTRATION FORUM")
acc_id=raw_input("Email id : ");
acc_pass=raw_input("Password : ");
f=open(acc_id+".txt","w+");
f.write(acc_id+"\n");
f.write(acc_pass+"\n");
f.close();
time.sleep(3);
mains();
| StarcoderdataPython |
159297 | <filename>feature_engine/encoding/decision_tree.py
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from sklearn.pipeline import Pipeline
from feature_engine.encoding.base_encoder import BaseCategoricalTransformer
from feature_engine.encoding.ordinal import OrdinalEncoder
from feature_engine.discretisation import DecisionTreeDiscretiser
from feature_engine.variable_manipulation import _define_variables
class DecisionTreeEncoder(BaseCategoricalTransformer):
"""
The DecisionTreeCategoricalEncoder() encodes categorical variables with predictions
of a decision tree model.
The categorical variable will be first encoded into integers with the
OrdinalCategoricalEncoder(). The integers can be assigned arbitrarily to the
categories or following the mean value of the target in each category.
Then a decision tree will be fit using the resulting numerical variable to predict
the target variable. Finally, the original categorical variable values will be
replaced by the predictions of the decision tree.
Parameters
----------
encoding_method: str, default='arbitrary'
The categorical encoding method that will be used to encode the original
categories to numerical values.
'ordered': the categories are numbered in ascending order according to
the target mean value per category.
'arbitrary' : categories are numbered arbitrarily.
cv : int, default=3
Desired number of cross-validation fold to be used to fit the decision
tree.
scoring: str, default='neg_mean_squared_error'
Desired metric to optimise the performance for the tree. Comes from
sklearn metrics. See the DecisionTreeRegressor or DecisionTreeClassifier
model evaluation documentation for more options:
https://scikit-learn.org/stable/modules/model_evaluation.html
regression : boolean, default=True
Indicates whether the encoder should train a regression or a classification
decision tree.
param_grid : dictionary, default=None
The list of parameters over which the decision tree should be optimised
during the grid search. The param_grid can contain any of the permitted
parameters for Scikit-learn's DecisionTreeRegressor() or
DecisionTreeClassifier().
If None, then param_grid = {'max_depth': [1, 2, 3, 4]}.
random_state : int, default=None
The random_state to initialise the training of the decision tree. It is one
of the parameters of the Scikit-learn's DecisionTreeRegressor() or
DecisionTreeClassifier(). For reproducibility it is recommended to set
the random_state to an integer.
variables : list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and select all object type variables.
Attributes
----------
encoder_ : sklearn Pipeline
Encoder pipeline containing the ordinal encoder and decision
tree discretiser.
"""
def __init__(
self,
encoding_method="arbitrary",
cv=3,
scoring="neg_mean_squared_error",
param_grid=None,
regression=True,
random_state=None,
variables=None,
):
if param_grid is None:
param_grid = {"max_depth": [1, 2, 3, 4]}
self.encoding_method = encoding_method
self.cv = cv
self.scoring = scoring
self.regression = regression
self.param_grid = param_grid
self.random_state = random_state
self.variables = _define_variables(variables)
def fit(self, X, y=None):
"""
Learns the numbers that should be used to replace the categories in each
variable.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just the categorical variables.
y : pandas series.
The target variable. Required to train the decision tree and for
ordered ordinal encoding.
"""
# check input dataframe
X = self._check_fit_input_and_variables(X)
# initialize categorical encoder
cat_encoder = OrdinalEncoder(
encoding_method=self.encoding_method, variables=self.variables
)
# initialize decision tree discretiser
tree_discretiser = DecisionTreeDiscretiser(
cv=self.cv,
scoring=self.scoring,
variables=self.variables,
param_grid=self.param_grid,
regression=self.regression,
random_state=self.random_state,
)
# pipeline for the encoder
self.encoder_ = Pipeline(
[
("categorical_encoder", cat_encoder),
("tree_discretiser", tree_discretiser),
]
)
self.encoder_.fit(X, y)
self.input_shape_ = X.shape
return self
def transform(self, X):
"""
Returns the predictions of the decision tree based of the variable's original
value.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features].
Dataframe with variables encoded with decision tree predictions.
"""
X = self._check_transform_input_and_state(X)
X = self.encoder_.transform(X)
return X
| StarcoderdataPython |
79033 | <gh_stars>1-10
import cv2
import math
import os
import numpy as np
from scipy import ndimage
import least_squares
class SpectrumBuilder:
def __init__(self, img, path, windows):
self.image = img
self.windows = windows
self.max_window_size = windows[-1]
self.height = img.shape[0]
self.width = img.shape[1]
self.path = path
list_images = list()
# /// <summary>
# /// Вычисление мультифрактального спектра: создание уровней и измерение их размерности
# /// </summary>
# /// <param name="layers">Множества уровня</param>
# /// <param name="singularity_bounds">Интервал сингулярности</param>
# /// <param name="singularity_step">Шаг сингулярности</param>
# /// <returns>Мультифракальный спектр изображения</returns>
def calculate_spectrum(self, layers, singularity_bounds, singularity_step):
current_layer_singularity = singularity_bounds.begin
spectrum = dict()
for layer in layers:
measure = self.__create_and_measure_layer(layer)
spectrum[current_layer_singularity] = measure
current_layer_singularity += singularity_step
return spectrum
# /// <summary>
# /// Создание изображения, соответствующего данному уровню, и его измерение
# /// </summary>
# /// <param name="layer">Множество уровня</param>
# /// <returns>Изображение слоя и его фрактальная размерность</returns>
def __create_and_measure_layer(self, layer):
new_height = self.height - self.max_window_size * 2
new_width = self.width - self.max_window_size * 2
layer_image = 255 * np.ones(shape=[new_height, new_width, 3], dtype=np.uint16)
revers_layer_image = np.zeros(shape=[new_height, new_width], dtype=np.uint16)
for point in layer.points:
layer_image[point.x, point.y] = (0, 0, 0)
revers_layer_image[point.x, point.y] = 1
self.__save_layer_image(layer, layer_image)
return self.__calculate_measure(revers_layer_image)
# /// <summary>
# /// Сохранение изображения множества уровня
# /// </summary>
# /// <param name="layer">Множество уровня</param>
# /// <param name="layer_image">Изображение множества уровня</param>
def __save_layer_image(self, layer, layer_image):
self.list_images.append(layer_image)
min_singularity = str(round(layer.singularity_bounds.begin, 2))
max_singularity = str(round(layer.singularity_bounds.end, 2))
layer_name = "layer(" + min_singularity + "-" + max_singularity + ")" + ".jpg"
abs_path = os.path.join(self.path, layer_name)
cv2.imwrite(abs_path, layer_image)
# /// <summary>
# /// Вычисление фрактальной размерности изображения
# /// </summary>
# /// <param name="revers_image">Анализируемое изображение</param>
# /// <returns>Фрактальная размерность изображения</returns>
def __calculate_measure(self, revers_img):
intensities = self.__calculate_black_windows_convolve(revers_img, self.windows)
x = np.log(self.windows)
y = np.log(intensities + 1)
return -least_squares.apply_method(list(zip(x, y)))
# /// <summary>
# /// Подсчёт числа квадратов, имеющих внутри себя хотя бы один чёрный пиксель
# /// </summary>
# /// <param name="layers_image">Исследуемая область изображения</param>
# /// <param name="windows">Список окон</param>
# /// <returns>Число квадратиков, имеющих внутри себя хотя бы один чёрный пиксель </returns>
def __calculate_black_windows_convolve(self, layers_img, windows):
black_windows_list = np.zeros(len(windows))
new_height = self.height - self.max_window_size * 2
new_width = self.width - self.max_window_size * 2
black_pixels = np.zeros((len(windows), new_height, new_width), np.float)
for i, win_size in enumerate(windows):
kernel_size = win_size * 2 - 1
kernel = [[1 if i < win_size and j < win_size else 0
for i in range(0, kernel_size)] for j in range(0, kernel_size)]
black_pixels[i] = ndimage.convolve(layers_img, kernel, mode='constant')
for k, window in enumerate(windows):
count_black_pixel = 0
for i in range(0, new_height - window, window):
for j in range(0, new_width - window, window):
if black_pixels[k][i][j]:
count_black_pixel += 1
black_windows_list[k] = count_black_pixel
return black_windows_list
| StarcoderdataPython |
3385235 | """
https://data.cms.gov/provider-data/topics/doctors-clinicians
These are the official datasets used on Medicare.gov provided by the Centers
for Medicare & Medicaid Services. These datasets give you useful information
about doctors, clinicians, and groups listed on Medicare Care Compare.
General information about doctors and clinicians in the Provider Data Catalog
and on Medicare Care Compare profile pages comes primarily from the Provider,
Enrollment, Chain, and Ownership System (PECOS) and is checked against Medicare
claims data. This information is updated twice a month.
For a clinician or group's information to appear on Care Compare, they must
have:
Current and “approved” Medicare enrollment records in PECOS
A valid physical practice location or address
A valid specialty
A National Provider Identifier (NPI) for a clinician
At least one Medicare Fee-for-Service claim within the last six months for a
clinician
At least two approved clinicians reassigning their benefits to the group
https://data.cms.gov/provider-data/archived-data/doctors-clinicians
https://wayback.archive-it.org/org-551/20160104131342/https://data.medicare.gov/data/archives/physician-compare
The Medicare Fee-For-Service Public Provider Enrollment (PPEF) dataset includes
information on providers who are actively approved to bill Medicare or have
completed the 855O at the time the data was pulled from the Provider Enrollment
and Chain Ownership System (PECOS). The release of this provider enrollment
data is not related to other provider information releases such as Physician
Compare or Data Transparency.
https://data.cms.gov/provider-characteristics/medicare-provider-supplier-enrollment/medicare-fee-for-service-public-provider-enrollment
https://data.cms.gov/resources/fee-for-service-public-provider-enrollment-methodology
https://www.nber.org/research/data/medicare-fee-service-public-provider-enrollment-data
You’re required to revalidate—or renew—your enrollment record periodically to
maintain Medicare billing privileges. In general, providers and suppliers
revalidate every five years but DMEPOS suppliers revalidate every three years.
CMS also reserves the right to request off-cycle revalidations.
https://www.cms.gov/Medicare/Provider-Enrollment-and-Certification/Revalidations
The Provider Enrollment data will be published on
https://data.cms.gov/public-provider-enrollment and will be updated on a
quarterly basis. The initial data will consist of individual and organization
provider and supplier enrollment information similar to what is on Physician
Compare; however, it will be directly from PECOS and will only be updated
through updates to enrollment information.
https://www.cms.gov/newsroom/fact-sheets/public-provider-and-supplier-enrollment-files
"""
import os
import re
import pandas as pd
import requests
from ..constants import PC_UPDATE_URL, RAW_PC_DIR
from ..utils.utils import unzip_checkfirst, wget_checkfirst
def physician_compare_get_date_updated():
t = requests.get('https://data.medicare.gov/data/physician-compare').text
d = re.findall(
'data was last updated on\n\s+[A-Za-z]+\s+[0-9]+,\s+[0-9]+', t)
return pd.to_datetime(' '.join(d[0].split()[5:])).isoformat().split('T')[0]
def physician_compare_archive_list():
stub = 'https://data.medicare.gov/data/archives/physician-compare'
t = requests.get(stub).text
return [x.split('<a href="')[1] for x in
re.findall('<a href="[A-Za-z./0-9_:]+', t) if 'medicare.gov' in x]
def get_physician_compare_update():
"""
Note: this renames the natural filename,
Physician_Compare_National_Downloadable_File.csv,
to one including the update date.
"""
ud = physician_compare_get_date_updated()
to_dir = os.path.join(RAW_PC_DIR, 'Updates')
name1 = 'Physician_Compare_National_Downloadable_File%s%s.csv' % ('_', ud)
name2 = 'Physician_Compare_National_Downloadable_File%s%s.csv' % ('', '')
update_path = os.path.join(to_dir, name1)
if not os.path.isfile(update_path):
wget_checkfirst(PC_UPDATE_URL, to_dir)
os.rename(os.path.join(to_dir, name2), update_path)
else:
print('Already up-to-date')
return update_path
def main():
# 1. archived data on CMS
urls = physician_compare_archive_list()
zipfiles = [wget_checkfirst(u, RAW_PC_DIR) for u in urls]
[unzip_checkfirst(z, os.path.splitext(z)[0]) for z in zipfiles]
# 2. check if there is new updated data on CMS
get_physician_compare_update()
# 3. note that some archived files were downloaded from the NBER,
# https://data.nber.org/compare/physician/ .
# Those files needed to be extracted on my PC before being uploaded.
# They are now in the NBER folder, and others exist on the NBER
# server that I did not download.
if __name__ == "__main__":
main()
| StarcoderdataPython |
174964 | <filename>teams/api/views/teams_views.py
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from teams.api.serializers.teams_serializers import TeamSerializer
from teams.models import Teams
class TeamAPIList(generics.ListAPIView):
permission_classes = [IsAuthenticated]
queryset = Teams.objects.all().order_by('-created_on')
serializer_class = TeamSerializer
class CreateTeam(generics.CreateAPIView):
permission_classes = [IsAuthenticated]
queryset = Teams.objects.all()
serializer_class = TeamSerializer
class UpdateTeam(generics.UpdateAPIView):
permission_classes = [IsAuthenticated]
queryset = Teams.objects.all()
serializer_class = TeamSerializer
class RetrieveTeam(generics.RetrieveAPIView):
permission_classes = [IsAuthenticated]
queryset = Teams.objects.all()
serializer_class = TeamSerializer
class DestroyTeam(generics.DestroyAPIView):
permission_classes = [IsAuthenticated]
queryset = Teams.objects.all()
serializer_class = TeamSerializer
| StarcoderdataPython |
169490 | # Class to store Trie(Patterns)
# It handles all cases particularly the case where a pattern Pi is a subtext of a pattern Pj for i != j
class Trie_Patterns:
def __init__(self, patterns, start, end):
self.build_trie(patterns, start, end)
# The trie will be a dictionary of dictionaries where:
# ... The key of the external dictionary is the node ID (integer),
# ... The internal dictionary:
# ...... It contains all the trie edges outgoing from the corresponding node
# ...... Its keys are the letters on those edges
# ...... Its values are the node IDs to which these edges lead
# Time Complexity: O(|patterns|)
# Space Complexity: O(|patterns|)
def build_trie(self, patterns, start, end):
self.trie = dict()
self.trie[0] = dict()
self.node_patterns_mapping = dict()
self.max_node_no = 0
for i in range(len(patterns)):
self.insert(patterns[i], i, start, end)
def insert(self, pattern, pattern_no, start, end):
(index, node) = self.search_text(pattern, start, end)
i = index
while i <= (end+1):
if i == end + 1:
c = '$' # to handle the case where Pi is a substring of Pj for i != j
else:
c = pattern[i]
self.max_node_no += 1
self.trie[node][c] = self.max_node_no
self.trie[self.max_node_no] = dict()
node = self.max_node_no
i += 1
if not node in self.node_patterns_mapping:
self.node_patterns_mapping[node] = []
self.node_patterns_mapping[node].append(pattern_no)
def search_text(self, pattern, start, end):
if len(self.trie) == 0:
return (0, -1)
node = 0
i = start
while i <= (end+1):
if i == end + 1:
c = '$' # to handle the case where Pi is a substring of Pj for i != j
else:
c = pattern[i]
if c in self.trie[node]:
node = self.trie[node][c]
i += 1
continue
else:
break
return (i, node)
# Prints the trie in the form of a dictionary of dictionaries
# E.g. For the following patterns: ["AC", "T"] {0:{'A':1,'T':2},1:{'C':3}}
def print_tree(self):
for node in self.trie:
for c in self.trie[node]:
print("{}->{}:{}".format(node, self.trie[node][c], c))
print(self.node_patterns_mapping)
# Time Complexity: O(|text| * |longest pattern|)
def multi_pattern_matching(self, text, start, end):
if len(self.trie) == 0:
return []
(i, node) = self.search_text(text, start, end)
return self.node_patterns_mapping[node] if node in self.node_patterns_mapping else [] | StarcoderdataPython |
1600458 | <gh_stars>0
from datetime import date
# -----------------------------------------------------------------------------
# 1.1. Raw strings with r" "
print(r"C:\some\name") # good
print("C:\some\name") # bad
# -----------------------------------------------------------------------------
# 1.2. Formatting with repeated ocurrencies
print(
"""
Hello {name},
Welcome to {company}. Your new email is: {name}@{company}.
Regards,
{date:%Y-%m-%d}
""".format(
name="john", company="awesomecompany", date=date.today()
)
)
# -----------------------------------------------------------------------------
# 1.3. Formatting with f" " (Python 3.6+)
# This will probably change in a for loop or something similar
path = "src/data"
mdate = date(2019, 2, 16)
print(f"{path}/{mdate:%Y-%m-%d}.xlsx") # Good
print(path + "/" + mdate.strftime("%Y-%m-%d") + ".xlsx") # Not that good
print("{}/{:%Y-%m-%d}.xlsx".format(path, mdate)) # Old python
| StarcoderdataPython |
1712353 |
from .sk import ServerSocket
from .packet import Packet
import random
class Client :
def __createSocketId(self, length=7, step=3) :
random_hash = 0
for _ in range(step) :
random_hash += random.random()
return str(random_hash).split(".")[1][:length]
def __init__(self, clients, socket, addr) :
self.clients = clients
self.socket = socket
self.addr = addr
self.id = self.__createSocketId()
self.thread = None
self.thread_state = True # client working for thread
self.data = None
def emit(self, event_name, data, id=None) :
send_packet = Packet(event_name, data)
if not id == None :
for client in self.clients :
if client.id == id :
ServerSocket.send(send_packet.encode(), client.socket)
break
else :
ServerSocket.send(send_packet.encode(), self.socket)
def emitall(self, event_name, data) :
for client in self.clients :
self.emit(event_name, data, id=client.id)
def close(self) :
self.socket.close()
self.thread_state = False
class Server(Client) :
def __init__(self, socket) :
self.socket = socket
self.data = None
def close(self) :
self.socket.close()
def emit(self, event_name, data) :
send_packet = Packet(event_name, data)
ServerSocket.send(send_packet.encode(), self.socket) | StarcoderdataPython |
8257 | import json
from btse_futures.constants import OrderType, Side, TimeInForce
class Order:
"""
Class to represent a BTSE Order
...
Attributes
----------
size : int
order quantity or size. e.g. 1
price : float
price. e.g. 7000.0
side: str
order side. BUY or SELL
time_in_force: str
time the order is in force. Possible options defined in TimeInForce. e.g. GTC
symbol: str
instrument symbol. e.g. BTCPFC
type: str
order type. "LIMIT", "MARKET", or "OCO"
txType: str
transaction type
postOnly: bool
Is order post only?
reduceOnly: bool
Is order reduce only?
triggerPrice: float
Trigger price. Relevant only for LIMIT and OCO order types
stopPrice: float
Stop price.
trailValue: float
Trail value.
clOrderId: str
User defined order id
trigger: str
If an order is a stop loss or take profit order, then this parameter determines the trigger price.
Available values are: 1. markPrice = Mark Price (Default) and 2. lastPrice = Last transacted Price
Documentation: https://www.btse.com/apiexplorer/futures/?shell#tocs_orderformv2
"""
def __init__(self, size: int, price: float, side: str, time_in_force: str, symbol: str, type: str, txType: str, postOnly: bool, reduceOnly: bool, triggerPrice: float, stopPrice: float = None, trailValue: float = None, clOrderId: str = None, trigger: str = None) -> None:
assert(isinstance(size, int))
assert(isinstance(price, float))
assert(isinstance(side, str))
assert(isinstance(time_in_force, str))
assert(isinstance(symbol, str))
assert(isinstance(type, str))
assert(isinstance(postOnly, bool))
assert(isinstance(reduceOnly, bool))
assert(isinstance(triggerPrice, float))
self.size = size
self.price = price
self.side = side
self.time_in_force = time_in_force
self.symbol = symbol
self.type = type
self.txType = txType
self.postOnly = postOnly
self.reduceOnly = reduceOnly
self.triggerPrice = triggerPrice
self.stopPrice = stopPrice
self.trailValue = trailValue
self.clOrderId = clOrderId
self.trigger = trigger
@property
def quantity(self):
return self.size
def to_json(self):
json_string = json.dumps(self.order_without_none_values())
print(f'json string: {json_string}')
return json_string
def order_without_none_values(self):
order_dict = self.__dict__
for key, value in list(order_dict.items()):
if value is None:
del order_dict[key]
return order_dict
class OpenOrder:
"""
open order endpoint response format
https://www.btse.com/apiexplorer/futures/#tocs_positionrespv2_1
Example:
--------
`{
"orderType": 0,
"price": 6875,
"size": 4,
"side": "BUY",
"filledSize": 3,
"orderValue": 20.625,
"pegPriceMin": 0,
"pegPriceMax": 0,
"pegPriceDeviation": 0,
"cancelDuration": 0,
"timestamp": 1576661434072,
"orderID": "string",
"stealth": 0.2,
"triggerOrder": true,
"triggered": true,
"triggerPrice": 0,
"triggerOriginalPrice": 0,
"triggerOrderType": 1001,
"triggerTrailingStopDeviation": 0,
"triggerStopPrice": 0,
"symbol": "string",
"trailValue": 0,
"clOrderID": "market001",
"reduceOnly": true,
"orderState": "string"
}`
"""
def __init__(self) -> None:
self.orderType = 0
self.price = 0
self.size = 0
self.side = ''
self.filledSize = 0
self.orderValue = 0.0
self.pegPriceMin = 0
self.pegPriceMax = 0
self.pegPriceDeviation = 0
self.cancelDuration = 0
self.timestamp = 0
self.orderID = ''
self.stealth = 0.0
self.triggerOrder = ''
self.triggered = ''
self.triggerPrice = 0
self.triggerOriginalPrice = 0
self.triggerOrderType = 0
self.triggerTrailingStopDeviation = 0
self.triggerStopPrice = 0
self.symbol = ''
self.trailValue = 0
self.clOrderID = ''
self.reduceOnly = ''
self.orderState = ''
@staticmethod
def from_dict(data):
open_order = OpenOrder()
open_order.orderType = data.get('orderType')
open_order.price = data.get('price')
open_order.size = data.get('size')
open_order.side = data.get('side')
open_order.filledSize = data.get('filledSize')
open_order.orderValue = data.get('orderValue')
open_order.pegPriceMin = data.get('pegPriceMin')
open_order.pegPriceMax = data.get('pegPriceMax')
open_order.pegPriceDeviation = data.get('pegPriceDeviation')
open_order.cancelDuration = data.get('cancelDuration')
open_order.timestamp = data.get('timestamp')
open_order.orderID = data.get('orderID')
open_order.stealth = data.get('stealth')
open_order.triggerOrder = data.get('triggerOrder')
open_order.triggered = data.get('triggered')
open_order.triggerPrice = data.get('triggerPrice')
open_order.triggerOriginalPrice = data.get('triggerOriginalPrice')
open_order.triggerOrderType = data.get('triggerOrderType')
open_order.triggerTrailingStopDeviation = data.get(
'triggerTrailingStopDeviation')
open_order.triggerStopPrice = data.get('triggerStopPrice')
open_order.symbol = data.get('symbol')
open_order.trailValue = data.get('trailValue')
open_order.clOrderID = data.get('clOrderID')
open_order.reduceOnly = data.get('reduceOnly')
open_order.orderState = data.get('orderState')
return open_order
class OrderResponseV21:
"""
Order Response V2.1
Documentation -- https://www.btse.com/apiexplorer/futures/?shell#tocs_orderrespv2_1
"""
def __init__(self) -> None:
self.status = 0
self.symbol = ''
self.orderType = 0
self.price = 0.0
self.side = ''
self.size = 0
self.orderID = ''
self.timestamp = 0
self.triggerPrice = 0.0
self.trigger = ''
self.deviation = 0.0
self.stealth = 0.0
self.message = ''
self.avgFillPrice = 0.0
self.fillSize = 0.0
self.clOrderID = ''
@staticmethod
def from_dict(data):
order_response_v21 = OrderResponseV21()
order_response_v21.status = data.get('status')
order_response_v21.symbol = data.get('symbol')
order_response_v21.orderType = data.get('orderType')
order_response_v21.price = data.get('price')
order_response_v21.side = data.get('side')
order_response_v21.size = data.get('size')
order_response_v21.orderID = data.get('orderID')
order_response_v21.timestamp = data.get('timestamp')
order_response_v21.triggerPrice = data.get('triggerPrice')
order_response_v21.trigger = data.get('trigger')
order_response_v21.deviation = data.get('deviation')
order_response_v21.stealth = data.get('stealth')
order_response_v21.message = data.get('message')
order_response_v21.avgFillPrice = data.get('avgFillPrice')
order_response_v21.fillSize = data.get('fillSize')
order_response_v21.clOrderID = data.get('clOrderID')
return order_response_v21
| StarcoderdataPython |
22540 | <filename>convert.py<gh_stars>0
from gensim.scripts.glove2word2vec import glove2word2vec
(count, dimensions) = glove2word2vec("dataset/glove.42B.300d.txt", "dataset/cropus/42B_w2v.txt") | StarcoderdataPython |
161214 | <reponame>deniskolosov/django-doctor-appointment
from django.shortcuts import render
from django.views.generic import CreateView
from django.contrib import messages
from datetimewidget.widgets import DateTimeWidget
from .models import Appointment
class AppointmentCreateMixin(object):
fields = ('patient_name',
'patient_middlename',
'patient_surname',
'doctor',
'appointment_time')
@property
def success_msg(self):
return NonImplemented
def form_valid(self, form):
messages.success(self.request, self.success_msg)
return super(AppointmentCreateMixin, self).form_valid(form)
class AppointmentCreateView(AppointmentCreateMixin, CreateView):
model = Appointment
fields = ('patient_name',
'patient_middlename',
'patient_surname',
'doctor',
'appointment_time')
success_msg = "Спасибо! Вы записались на прием."
def get_form(self, form_class):
form = super(AppointmentCreateView, self).get_form()
dateTimeOptions = {
'weekStart': '1',
'format': 'dd/mm/yyyy HH',
'daysOfWeekDisabled': "'0,6'",
'minuteStep': '60',
}
form.fields['appointment_time'].widget = DateTimeWidget(
options=dateTimeOptions, usel10n=True, bootstrap_version=3)
return form
def form_valid(self, form):
import datetime
start_date = form.cleaned_data['appointment_time']
end_date = form.cleaned_data['appointment_time'] + \
datetime.timedelta(hours=1)
if not datetime.time(9, 00) \
<= start_date.time() < datetime.time(18, 00):
form.add_error('appointment_time', 'Часы приема — 09:00-18:00')
return self.form_invalid(form)
if start_date.weekday() == 5 or start_date.weekday() == 6:
form.add_error('appointment_time', 'Дни приема — ПН-ПТ')
return self.form_invalid(form)
if Appointment.objects.filter(appointment_time__range=(start_date,
end_date)):
form.add_error('appointment_time', 'К сожалению, время занято!')
return self.form_invalid(form)
return super(AppointmentCreateView, self).form_valid(form)
template_name = 'appointments/appointment_form.html'
| StarcoderdataPython |
61426 | <filename>paymeuz/migrations/0002_auto_20200826_1127.py
# Generated by Django 3.1 on 2020-08-26 06:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paymeuz', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='transaction',
old_name='_id',
new_name='trans_id',
),
migrations.AddField(
model_name='transaction',
name='request_id',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='transaction',
name='state',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='transaction',
name='status',
field=models.CharField(choices=[(0, 'processing'), (1, 'paid'), (2, 'failed')], default=0, max_length=10),
),
]
| StarcoderdataPython |
37397 | <filename>FaceTemplateMatching.py<gh_stars>1-10
import cv2
from threading import Thread
import datetime
import time
import sys
class FPSCounter:
def __init__(self):
self._start = None
self._end = None
self._noFrames = 0
def start(self):
self._start = datetime.datetime.now()
return self
def stop(self):
self._end = datetime.datetime.now()
def update(self):
self._noFrames += 1
def elapsed(self):
return (self._end - self._start).total_seconds()
def fps(self):
return self._noFrames/self.elapsed()
class FrameGrabber:
def __init__(self, src=0):
self.vidStream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.vidStream.read()
self.stopped = False
def start(self):
Thread(target=self.grabFrame, args=()).start()
return self
def grabFrame(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.vidStream.read()
def read(self):
return self.frame
def stop(self):
self.vidStream.release()
self.stopped = True
vidStream = FrameGrabber(src=0).start()
cascadeFace = cv2.CascadeClassifier('lbpcascade_frontalface.xml')
if (len(sys.argv) == 1):
template = cv2.imread('template.png', 0)
else:
imgPath = sys.argv[1]
template = cv2.imread(imgPath, 0)
if template is None:
#If no template file exists, open video stream to capture template
while (True):
tempFrame = vidStream.read()
cv2.imshow('Template Capture', tempFrame)
template = cv2.cvtColor(tempFrame, cv2.COLOR_BGR2GRAY)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.imwrite("template.png", template)
break
w,h = template.shape[::-1]
#Reducing the template image to crop out the face
face = cascadeFace.detectMultiScale(template, scaleFactor=1.3, minNeighbors=5, minSize=(25,25))
padding = 30
for (x,y,w,h) in face:
cv2.rectangle(template, (x,y-30), (x + w, y + h+20), (0,255,0), 2)
cropped = template[y-30:y+h+20, x:x+w]
cv2.imshow('Template', template)
cv2.imshow('Cropped', cropped)
cv2.waitKey(1)
fps = FPSCounter().start()
while True:
frame = vidStream.read()
cv2.imshow('Frame', frame)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('Gray', gray)
faceCam = cascadeFace.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(25,25))
for (x,y,w,h) in faceCam:
croppedResized = cv2.resize(cropped, (w,h), interpolation=cv2.INTER_LINEAR)
cv2.imshow('Resized', croppedResized)
mat = cv2.matchTemplate(gray, croppedResized, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(mat)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h + 30)
cv2.rectangle(frame, top_left, bottom_right, (0,255,0), 2)
time.sleep(0.001)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.update()
fps.stop()
print('FPS: ', fps.fps())
print('Elapsed seconds: ', fps.elapsed())
vidStream.stop()
cv2.destroyAllWindows()
| StarcoderdataPython |
59260 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
import create_datasets
import data_utils
import predict
import trainer
def test_validated_missing_field() -> None:
tensor_dict = {}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
with pytest.raises(KeyError):
trainer.validated(tensor_dict, values_spec)
def test_validated_incompatible_type() -> None:
tensor_dict = {"x": tf.constant(["a", "b", "c"])}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
with pytest.raises(TypeError):
trainer.validated(tensor_dict, values_spec)
def test_validated_incompatible_shape() -> None:
tensor_dict = {"x": tf.constant([1.0])}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
with pytest.raises(ValueError):
trainer.validated(tensor_dict, values_spec)
def test_validated_ok() -> None:
tensor_dict = {"x": tf.constant([1.0, 2.0, 3.0])}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
trainer.validated(tensor_dict, values_spec)
tensor_dict = {"x": tf.constant([[1.0], [2.0], [3.0]])}
values_spec = {"x": tf.TensorSpec(shape=(None, 1), dtype=tf.float32)}
trainer.validated(tensor_dict, values_spec)
def test_serialize_deserialize() -> None:
unlabeled_data = data_utils.read_data("test_data/56980685061237.npz")
labels = data_utils.read_labels("test_data/labels.csv")
data = data_utils.label_data(unlabeled_data, labels)
for training_point in data_utils.generate_training_points(data):
serialized = trainer.serialize(training_point)
inputs, outputs = trainer.deserialize(serialized)
assert set(inputs.keys()) == set(trainer.INPUTS_SPEC.keys())
assert set(outputs.keys()) == set(trainer.OUTPUTS_SPEC.keys())
@mock.patch.object(trainer, "PADDING", 2)
def test_e2e_local() -> None:
with tempfile.TemporaryDirectory() as temp_dir:
train_data_dir = os.path.join(temp_dir, "datasets", "train")
eval_data_dir = os.path.join(temp_dir, "datasets", "eval")
model_dir = os.path.join(temp_dir, "model")
tensorboard_dir = os.path.join(temp_dir, "tensorboard")
checkpoint_dir = os.path.join(temp_dir, "checkpoints")
# Create the dataset TFRecord files.
create_datasets.run(
raw_data_dir="test_data",
raw_labels_dir="test_data",
train_data_dir=train_data_dir,
eval_data_dir=eval_data_dir,
train_eval_split=[80, 20],
)
assert os.listdir(train_data_dir), "no training files found"
assert os.listdir(eval_data_dir), "no evaluation files found"
# Train the model and save it.
trainer.run(
train_data_dir=train_data_dir,
eval_data_dir=eval_data_dir,
model_dir=model_dir,
tensorboard_dir=tensorboard_dir,
checkpoint_dir=checkpoint_dir,
train_epochs=10,
batch_size=32,
)
assert os.listdir(model_dir), "no model files found"
assert os.listdir(tensorboard_dir), "no tensorboard files found"
assert os.listdir(checkpoint_dir), "no checkpoint files found"
# Load the trained model and make a prediction.
with open("test_data/56980685061237.npz", "rb") as f:
input_data = pd.DataFrame(np.load(f)["x"])
predictions = predict.run(model_dir, input_data.to_dict("list"))
# Check that we get non-empty predictions.
assert "is_fishing" in predictions
assert len(predictions["is_fishing"]) > 0
| StarcoderdataPython |
42579 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 15:06:45 2019
@author: garci
"""
import matplotlib.pyplot as plt
import numpy as np
import csv
import xlwings as xw
import pandas
import os
'''MAKE X-Y PLOTS WITH 2-COLUMN FILES
<NAME>, 2019 '''
'''lastRow credit: answered Sep 14 '16 at 11:39 - Stefan
https://stackoverflow.com/questions/33418119/xlwings-function-to-find-the-last-row-with-data'''
def lastRow(idx, workbook, col=1):
""" Find the last row in the worksheet that contains data.
idx: Specifies the worksheet to select. Starts counting from zero.
workbook: Specifies the workbook
col: The column in which to look for the last cell containing data.
"""
ws = workbook.sheets[idx]
lwr_r_cell = ws.cells.last_cell # lower right cell
lwr_row = lwr_r_cell.row # row of the lower right cell
lwr_cell = ws.range((lwr_row, col)) # change to your specified column
if lwr_cell.value is None:
lwr_cell = lwr_cell.end('up') # go up untill you hit a non-empty cell
return lwr_cell.row
import time
'''MAKE AN XY PLOT FOR A SINGLE EXCEL FILE (SPECIFY FOLDER PATH AND FILE NAME)'''
def readxl(path,file,sheet='Sheet1'):
book = xw.Book(path+file)
x=book.sheets[sheet].range('A1:A'+str(lastRow(sheet,book))).value
y=book.sheets[sheet].range('B1:B'+str(lastRow(sheet,book))).value
pltitle=book.sheets[sheet].range('I1').value
# y=(y-np.min(y))*627.509
book.close()
plt.figure()
plt.plot(x,y)
# plt.xlim(4,1.7)
plt.title(pltitle)
# plt.xlabel('')
# print(end)
plt.savefig(str(int(time.time())))
'''MAKE AN XY PLOT WITH TWO INDEPENDENT VARS (Y AND Z) FOR A SINGLE EXCEL FILE
(SPECIFY FOLDER PATH AND FILE NAME)'''
def readxl2(path,file,sheet='Sheet1'):
book = xw.Book(path+file)
x=book.sheets[sheet].range('A1:A'+str(lastRow(sheet,book))).value
y=book.sheets[sheet].range('B1:B'+str(lastRow(sheet,book))).value
z=book.sheets[sheet].range('C1:C'+str(lastRow(sheet,book))).value
pltitle = book.sheets[sheet].range('I1').value
label1 = book.sheets[sheet].range('I2').value
label2 = book.sheets[sheet].range('I3').value
book.close()
plt.figure()
plt.plot(x,y,label = label1)
plt.plot(x,z,label = label2)
plt.title(pltitle)
plt.xlabel('time')
plt.savefig(file+'.png')
plt.legend()
def readxlCG(path,file,sheet='Sheet1'):
book = xw.Book(path+file)
x=book.sheets[sheet].range('B2:B'+str(lastRow(sheet,book))).value
y=book.sheets[sheet].range('D2:D'+str(lastRow(sheet,book))).value
x2=book.sheets[sheet].range('F2:F'+str(lastRow(sheet,book))).value
y2=book.sheets[sheet].range('H2:H'+str(lastRow(sheet,book))).value
# pltitle = book.sheets[sheet].range('I1').value
# label1 = book.sheets[sheet].range('I2').value
# label2 = book.sheets[sheet].range('I3').value
book.close()
plt.figure()
plt.plot(x,y,'o',label = 'no CG')
plt.plot(x2,y2,'o',label = 'CG')
# plt.title(pltitle)
# plt.xlabel('time')
# plt.savefig(file+'.png')
plt.legend()
'MAKE XY PLOTS FOR ALL TAB DELIMITED FILES IN A FOLDER (SPECIFY FOLDER PATH)'
def writetab_bulk(path):
asps = []
for root, dirs, files in os.walk(path):
for file in files:
if not file.endswith('.xlsx') and not file.endswith('.csv')\
and not file.endswith('.png') and not file.endswith('.txt')\
and not file.endswith('.xls'):
asps.append(file)
print(asps)
index=1
for file in asps:
df= pandas.read_fwf(path+file,header=None,infer_nrows=10000)
xdim, ydim = df.shape[0], df.shape[1]
if xdim > 1 and ydim == 2:
df.columns =['a','b']
print(file,index)
# print(df)
plt.figure()
plt.plot(df['a'],df['b'])
plt.title(file)
plt.xlabel('time / ps')
plt.savefig(file+'.png')
# plt.close()
index+=1
# return df
'(optional): create function with directory path (keep uncommented if unaware)'
#from proc_out_key import pathkey
#path = pathkey()
'''COMMAND SECTION (INPUT)'''
'MAKE XY PLOTS FOR ALL TAB DELIMITED FILES IN A FOLDER (SPECIFY FOLDER PATH)'
#path = r'C:\Users\***\file_folder/'
#df=writetab_bulk(path)
'''MAKE AN XY PLOT FOR A SINGLE EXCEL FILE (SPECIFY FOLDER PATH AND FILE NAME)'''
#path = r'C:\Users\***\file_folder/'
#file = '**.xlsx'
#writexl(path,file) | StarcoderdataPython |
1626327 | <reponame>cmancone/clearskies-aws<gh_stars>0
#!/usr/bin/env python3
import unittest
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
glob = '?*test.py'
if len(sys.argv) > 1:
path = sys.argv[1]
# generally expecting a directory. If it is a file then find the parent directory
if os.path.isfile(path):
glob = os.path.basename(path)
path = os.path.dirname(path)
elif not os.path.isdir(path):
raise ValueError("Cannot find file or directory named %s" % path)
# convert to python speak
path = path.replace('/', '.').strip('.')
else:
path = '.'
tests = unittest.TestLoader().discover(path, pattern=glob)
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
| StarcoderdataPython |
109953 | import pytest # type: ignore
from hopeit.app.config import AppConfig, AppDescriptor, \
EventDescriptor, EventType, EventPlugMode
from hopeit.server.config import ServerConfig, LoggingConfig
@pytest.fixture
def mock_plugin_config():
return AppConfig(
app=AppDescriptor(name='mock_plugin', version='test'),
env={
'plugin': {
'plugin_value': 'test_plugin_value',
'custom_value': 'test_custom_value'
}
},
events={
'plugin_event': EventDescriptor(
type=EventType.GET,
plug_mode=EventPlugMode.ON_APP
)
},
server=ServerConfig(
logging=LoggingConfig(log_level="DEBUG", log_path="work/logs/test/")
)
)
| StarcoderdataPython |
108647 | <filename>test/1d_text.py
#!/usr/bin/env python
# the earlier script for ploting the result in ASCII format
# for ploting the HDF5 file, see the scripts in th 'pfm' directory
import sys
import numpy as np
import matplotlib.pyplot as plt
def get_color():
for item in ['r','g','b','c','m','y','k', 'r','g','b','c','m','y','k', 'r','g','b','c','m','y','k', 'r','g','b','c','m','y','k']:
yield item
def main(argv):
if(len(argv)<2):
print 'usage : python 1d_text.py input_file count'
print 'example : python 1d_text.py 1.txt 100'
return 1
fname = argv[1]
cnt = int(argv[2])
ds = np.zeros(cnt)
index = 0
print 'input file is {0} with {1} lines \n'.format(fname,cnt)
with open(fname) as fp:
for line in fp:
ds[index] = float(line)
index += 1
posx = np.zeros(cnt)
for i in range(cnt):
posx[i] = i*1.0/cnt
acolor = get_color()
plt.plot(posx,ds)
plt.xlabel('x')
plt.ylabel('T')
plt.title(fname)
axes = plt.gca()
axes.set_ylim([0,1.0])
plt.show()
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
88887 | <reponame>gneumann333/jumpscaleX_core
from Jumpscale import j
class UserSessionBase(j.baseclasses.object):
pass
class UserSessionAdmin(UserSessionBase):
def _init(self):
self.admin = True
self.threebot_id = None
self.threebot_name = None
self.threebot_circles = []
self.kwargs = []
self.response_type = j.data.types.get("e", default="auto,json,msgpack").clean(0)
self.content_type = j.data.types.get("e", default="auto,json,msgpack").clean(0)
self.threebot_client = None
def admin_check(self):
return True
class UserSession(UserSessionBase):
def _init(self):
self._admin = None
self.threebot_id = 0
self.threebot_name = None
self.threebot_circles = []
self.kwargs = []
self.response_type = j.data.types.get("e", default="auto,json,msgpack").clean(0)
self.content_type = j.data.types.get("e", default="auto,json,msgpack").clean(0)
@property
def threebot_client(self):
if not self.threebot_name:
return
return j.clients.threebot.client_get(threebot=self.threebot_id)
@property
def admin(self):
if self._admin is None:
if self.threebot_name == j.myidentities.me.tname:
self._admin = True
elif int(self.threebot_id) == j.myidentities.me.tid:
self._admin = True
elif self.threebot_name in j.myidentities.me.admins:
self._admin = True
return self._admin
def admin_check(self):
if not self.admin:
raise j.exceptions.Permission("only admin user can access this method")
| StarcoderdataPython |
3205231 | import main
import state
import utils
import os
from flask import Flask, render_template, request, redirect, cli
from werkzeug.utils import secure_filename
from nfc_reader import start_nfc_thread
from utils import printt
DEVENV = False
try:
# pylint: disable=import-error
import RPi.GPIO as GPIO
except:
DEVENV = True
app = Flask(__name__)
cli.show_server_banner = lambda *_: None
def init():
printt('Initializing web interface...')
app.config['TEMPLATES_AUTO_RELOAD'] = True
printt('Ready!')
def run_wait():
if DEVENV:
app.run(host='0.0.0.0', port=5000)
else:
app.run(host='0.0.0.0', port=80)
@app.route('/')
def index():
player = state.get_player()
vm = {
'nfc_status': state.get_nfc_status(),
'song_name': state.get_song_name(),
'is_playing': player.is_state(player.STATE_PLAYING),
'is_paused': player.is_state(player.STATE_PAUSED),
'is_stopped': player.is_state(player.STATE_STOPPED),
'version': main.VERSION
}
return render_template('index.html', vm=vm)
# ACTIONS
@app.route('/actions/initnfc')
def action_initnfc():
if not state.get_nfc_status():
start_nfc_thread()
return redirect('/')
@app.route('/actions/reloadsongs')
def action_reloadsongs():
player = state.get_player()
player.reload_songs()
return redirect('/tags')
@app.route('/actions/stop')
def action_stop():
player = state.get_player()
player.stop()
return redirect('/')
@app.route('/actions/play')
def action_play():
player = state.get_player()
player.play()
return redirect('/')
@app.route('/actions/pause')
def action_pause():
player = state.get_player()
player.pause()
return redirect('/')
@app.route('/actions/vol')
def action_vol():
try:
vol = float(request.args.get('vol'))
player = state.get_player()
player.set_vol(vol)
except:
pass
return redirect('/')
# LOGS
@app.route('/logs')
def logs():
log_path = '/var/log/nfcmb.log'
err_path = '/var/log/nfcmb_err.log'
log = ''
err = ''
if os.path.exists(log_path):
with open(log_path) as f:
log = f.read()
if os.path.exists(err_path):
with open(err_path) as f2:
err = f2.read()
return render_template('logs.html', vm={
'log': log,
'err': err
})
# SETTINGS
@app.route('/settings')
def settings():
return render_template('settings.html')
@app.route('/actions/settings/update')
def settings_update():
main.update()
return redirect('/settings')
@app.route('/actions/settings/reboot')
def settings_reboot():
main.reboot()
return redirect('/settings')
# TAGS
@app.route('/tags')
def tags():
storage = state.get_storage()
tags = storage.get_tags()
vm = {
'tags': tags
}
return render_template('tags.html', vm=vm)
@app.route('/tags/add', methods=['GET'])
def tags_add():
return render_template('tags_add.html', vm={
'error': request.args.get('error'),
'last_tag': state.get_last_tag()
})
@app.route('/tags/add', methods=['POST'])
def tags_add_post():
storage = state.get_storage()
songfile = request.files['song']
songname = songfile.filename.replace(' ', '_')
if songfile is not None \
and request.form['uid'] is not None \
and len(request.form['uid']) > 0 \
and songname.lower().endswith('.mp3'):
storage.add_song(songfile, secure_filename(songname))
else:
return redirect('/tags/add?error=1')
newtag = {
'uid': request.form['uid'],
'name': songname
}
try:
storage.add_tag(newtag)
except:
pass
return redirect('/tags')
@app.route('/actions/tags/play')
def tags_play():
storage = state.get_storage()
tags = storage.get_tags()
uid = request.args.get('uid')
tag = utils.select_tag(tags, uid)
if tag is not None:
player = state.get_player()
player.load(name=storage.to_full_path(tag['name']))
player.play()
return redirect('/tags')
@app.route('/actions/tags/delete')
def tag_delete():
uid = request.args.get('uid')
try:
storage = state.get_storage()
storage.remove_tag(uid)
except Exception as e:
printt(e)
return redirect('/tags') | StarcoderdataPython |
1777058 | <filename>scripts/remove_reads.py
#!/usr/bin/env python
from Bio.SeqIO.QualityIO import FastqGeneralIterator
import sys
if len(sys.argv) != 3:
sys.exit('\nusage: python remove_reads.py fastqFile idFile \n\
\nThis is a prgram to remove records from id list output is a fastq file \
\n 1. sequence file needs to be fastq \
\n 2. idfile is one id per line text file (without "@" sign in front of the id )\n')
# read mapped ID
index = {line.strip():1 for line in open(sys.argv[2],'r')}
sys.stderr.write ('Indexed!\n')
# print sequences
count = 0
for id, seq, qual in FastqGeneralIterator(open(sys.argv[1])):
try:
a = index[id.split(' ')[0]]
pass
except KeyError:
print "@%s\n%s\n+\n%s" %(id,seq,qual)
| StarcoderdataPython |
4839601 | """Resources that represent both individual and collections of design workflow executions."""
import sys
from typing import Optional, Union, Iterator
from uuid import UUID
from citrine._rest.collection import Collection
from citrine._utils.functions import shadow_classes_in_module
from citrine._session import Session
import citrine.informatics.executions.design_execution
from citrine.informatics.executions import DesignExecution
from citrine.informatics.scores import Score
from citrine.resources.response import Response
shadow_classes_in_module(citrine.informatics.executions.design_execution, sys.modules[__name__])
class DesignExecutionCollection(Collection["DesignExecution"]):
"""A collection of DesignExecutions."""
_path_template = '/projects/{project_id}/design-workflows/{workflow_id}/executions' # noqa
_individual_key = None
_collection_key = 'response'
_resource = DesignExecution
def __init__(self,
project_id: UUID,
session: Session,
workflow_id: Optional[UUID] = None):
self.project_id: UUID = project_id
self.session: Session = session
self.workflow_id: UUID = workflow_id
def build(self, data: dict) -> DesignExecution:
"""Build an individual DesignWorkflowExecution."""
execution = DesignExecution.build(data)
execution._session = self.session
execution.project_id = self.project_id
return execution
def trigger(self, execution_input: Score):
"""Trigger a Design Workflow execution given a score."""
path = self._get_path()
data = self.session.post_resource(path, {'score': execution_input.dump()})
self._check_experimental(data)
return self.build(data)
def register(self, model: DesignExecution) -> DesignExecution:
"""Cannot register an execution."""
raise NotImplementedError("Cannot register a DesignExecution.")
def update(self, model: DesignExecution) -> DesignExecution:
"""Cannot update an execution."""
raise NotImplementedError("Cannot update a DesignExecution.")
def archive(self, uid: Union[UUID, str]):
"""Archive a Design Workflow execution.
Parameters
----------
uid: Union[UUID, str]
Unique identifier of the execution to archive
"""
raise NotImplementedError(
"Design Executions cannot be archived")
def restore(self, uid: UUID):
"""Restore an archived Design Workflow execution.
Parameters
----------
uid: UUID
Unique identifier of the execution to restore
"""
raise NotImplementedError(
"Design Executions cannot be restored")
def list(self, *,
page: Optional[int] = None,
per_page: int = 100,
) -> Iterator[DesignExecution]:
"""
Paginate over the elements of the collection.
Leaving page and per_page as default values will yield all elements in the
collection, paginating over all available pages.
Parameters
---------
page: int, optional
The "page" of results to list. Default is to read all pages and yield
all results. This option is deprecated.
per_page: int, optional
Max number of results to return per page. Default is 100. This parameter
is used when making requests to the backend service. If the page parameter
is specified it limits the maximum number of elements in the response.
predictor_id: uuid, optional
list executions that targeted the predictor with this id
Returns
-------
Iterator[ResourceType]
Resources in this collection.
"""
return self._paginator.paginate(page_fetcher=self._fetch_page,
collection_builder=self._build_collection_elements,
page=page,
per_page=per_page)
def delete(self, uid: Union[UUID, str]) -> Response:
"""Design Workflow Executions cannot be deleted or archived."""
raise NotImplementedError(
"Design Executions cannot be deleted")
| StarcoderdataPython |
4827083 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import math
import torch
import unittest
import gpytorch
import numpy as np
from torch.autograd import Variable
from gpytorch.utils import approx_equal
from gpytorch.lazy import NonLazyVariable
class PyTorchCompatibleTestCase(unittest.TestCase):
# Writing a separate function for compatibility with PyTorch 0.3 and PyTorch 0.4
def assert_scalar_almost_equal(self, scalar1, scalar2, **kwargs):
# PyTorch 0.3 - make everything tensors
if isinstance(scalar1, Variable):
scalar1 = scalar1.data
if isinstance(scalar2, Variable):
scalar2 = scalar2.data
if not torch.is_tensor(scalar1):
scalar1 = torch.Tensor([scalar1])
if not torch.is_tensor(scalar2):
scalar2 = torch.Tensor([scalar2])
# PyTorch 0.4
if hasattr(scalar1, 'item'):
self.assertAlmostEqual(scalar1.item(), scalar2.item(), **kwargs)
# PyTorch 0.3
else:
self.assertAlmostEqual(scalar1[0], scalar2[0], **kwargs)
class TestMatmulNonBatch(PyTorchCompatibleTestCase):
def setUp(self):
mat = torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
])
vec = torch.randn(3)
vecs = torch.randn(3, 4)
self.mat_var = Variable(mat, requires_grad=True)
self.mat_var_clone = Variable(mat, requires_grad=True)
self.vec_var = Variable(vec, requires_grad=True)
self.vec_var_clone = Variable(vec, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
def test_matmul_vec(self):
# Forward
res = NonLazyVariable(self.mat_var).matmul(self.vec_var)
actual = self.mat_var_clone.matmul(self.vec_var_clone)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.Tensor(3)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data))
self.assertTrue(approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data))
def test_matmul_multiple_vecs(self):
# Forward
res = NonLazyVariable(self.mat_var).matmul(self.vecs_var)
actual = self.mat_var_clone.matmul(self.vecs_var_clone)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.Tensor(3, 4)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
class TestMatmulBatch(PyTorchCompatibleTestCase):
def setUp(self):
mats = torch.Tensor([
[
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
], [
[10, -2, 1],
[-2, 10, 0],
[1, 0, 10],
]
])
vecs = torch.randn(2, 3, 4)
self.mats_var = Variable(mats, requires_grad=True)
self.mats_var_clone = Variable(mats, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
def test_matmul_multiple_vecs(self):
# Forward
res = NonLazyVariable(self.mats_var).matmul(self.vecs_var)
actual = self.mats_var_clone.matmul(self.vecs_var_clone)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.Tensor(2, 3, 4)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
class TestInvMatmulNonBatch(unittest.TestCase):
def setUp(self):
mat = torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
])
vec = torch.randn(3)
vecs = torch.randn(3, 4)
self.mat_var = Variable(mat, requires_grad=True)
self.mat_var_clone = Variable(mat, requires_grad=True)
self.vec_var = Variable(vec, requires_grad=True)
self.vec_var_clone = Variable(vec, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
def test_inv_matmul_vec(self):
# Forward
res = NonLazyVariable(self.mat_var).inv_matmul(self.vec_var)
actual = self.mat_var_clone.inverse().matmul(self.vec_var_clone)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.randn(3)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data))
self.assertTrue(approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data))
def test_inv_matmul_multiple_vecs(self):
# Forward
res = NonLazyVariable(self.mat_var).inv_matmul(self.vecs_var)
actual = self.mat_var_clone.inverse().matmul(self.vecs_var_clone)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.randn(3, 4)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
class TestInvMatmulBatch(PyTorchCompatibleTestCase):
def setUp(self):
mats = torch.Tensor([
[
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
], [
[10, -2, 1],
[-2, 10, 0],
[1, 0, 10],
]
])
vecs = torch.randn(2, 3, 4)
self.mats_var = Variable(mats, requires_grad=True)
self.mats_var_clone = Variable(mats, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
def test_inv_matmul_multiple_vecs(self):
# Forward
res = NonLazyVariable(self.mats_var).inv_matmul(self.vecs_var)
actual = torch.cat([
self.mats_var_clone[0].inverse().unsqueeze(0),
self.mats_var_clone[1].inverse().unsqueeze(0),
]).matmul(self.vecs_var_clone)
self.assertTrue(approx_equal(res, actual))
# Backward
grad_output = torch.randn(2, 3, 4)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
class TestInvQuadLogDetNonBatch(PyTorchCompatibleTestCase):
def setUp(self):
if os.getenv('UNLOCK_SEED') is None or os.getenv('UNLOCK_SEED').lower() == 'false':
self.rng_state = torch.get_rng_state()
torch.manual_seed(1)
mat = torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
])
vec = torch.randn(3)
vecs = torch.randn(3, 4)
self.mat_var = Variable(mat, requires_grad=True)
self.vec_var = Variable(vec, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.mat_var_clone = Variable(mat, requires_grad=True)
self.vec_var_clone = Variable(vec, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
self.log_det = math.log(np.linalg.det(mat.numpy()))
def tearDown(self):
if hasattr(self, 'rng_state'):
torch.set_rng_state(self.rng_state)
def test_inv_quad_log_det_vector(self):
# Forward pass
actual_inv_quad = (
self.mat_var_clone.
inverse().
matmul(self.vec_var_clone).
mul(self.vec_var_clone).
sum()
)
with gpytorch.settings.num_trace_samples(1000):
nlv = NonLazyVariable(self.mat_var)
res_inv_quad, res_log_det = nlv.inv_quad_log_det(inv_quad_rhs=self.vec_var, log_det=True)
self.assert_scalar_almost_equal(res_inv_quad, actual_inv_quad, places=1)
self.assert_scalar_almost_equal(res_log_det, self.log_det, places=1)
# Backward
inv_quad_grad_output = torch.Tensor([3])
log_det_grad_output = torch.Tensor([4])
actual_inv_quad.backward(gradient=inv_quad_grad_output)
self.mat_var_clone.grad.data.add_(self.mat_var_clone.data.inverse() * log_det_grad_output)
res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)
res_log_det.backward(gradient=log_det_grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1))
self.assertTrue(approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data))
def test_inv_quad_only_vector(self):
# Forward pass
res = NonLazyVariable(self.mat_var).inv_quad(self.vec_var)
actual = self.mat_var_clone.inverse().matmul(self.vec_var_clone).mul(self.vec_var_clone).sum()
self.assert_scalar_almost_equal(res, actual, places=1)
# Backward
inv_quad_grad_output = torch.randn(1)
actual.backward(gradient=inv_quad_grad_output)
res.backward(gradient=inv_quad_grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1))
self.assertTrue(approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data))
def test_inv_quad_log_det_many_vectors(self):
# Forward pass
actual_inv_quad = (
self.mat_var_clone.
inverse().
matmul(self.vecs_var_clone).
mul(self.vecs_var_clone).
sum()
)
with gpytorch.settings.num_trace_samples(1000):
nlv = NonLazyVariable(self.mat_var)
res_inv_quad, res_log_det = nlv.inv_quad_log_det(inv_quad_rhs=self.vecs_var, log_det=True)
self.assert_scalar_almost_equal(res_inv_quad, actual_inv_quad, places=1)
self.assert_scalar_almost_equal(res_log_det, self.log_det, places=1)
# Backward
inv_quad_grad_output = torch.Tensor([3])
log_det_grad_output = torch.Tensor([4])
actual_inv_quad.backward(gradient=inv_quad_grad_output)
self.mat_var_clone.grad.data.add_(self.mat_var_clone.data.inverse() * log_det_grad_output)
res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)
res_log_det.backward(gradient=log_det_grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_inv_quad_only_many_vectors(self):
# Forward pass
res = NonLazyVariable(self.mat_var).inv_quad(self.vecs_var)
actual = self.mat_var_clone.inverse().matmul(self.vecs_var_clone).mul(self.vecs_var_clone).sum()
self.assert_scalar_almost_equal(res, actual, places=1)
# Backward
inv_quad_grad_output = torch.randn(1)
actual.backward(gradient=inv_quad_grad_output)
res.backward(gradient=inv_quad_grad_output)
self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_log_det_only(self):
# Forward pass
with gpytorch.settings.num_trace_samples(1000):
res = NonLazyVariable(self.mat_var).log_det()
self.assert_scalar_almost_equal(res, self.log_det, places=1)
# Backward
grad_output = torch.Tensor([3])
actual_mat_grad = self.mat_var_clone.data.inverse().mul(grad_output)
res.backward(gradient=grad_output)
self.assertTrue(approx_equal(actual_mat_grad, self.mat_var.grad.data, epsilon=1e-1))
class TestInvQuadLogDetBatch(PyTorchCompatibleTestCase):
def setUp(self):
if os.getenv('UNLOCK_SEED') is None or os.getenv('UNLOCK_SEED').lower() == 'false':
self.rng_state = torch.get_rng_state()
torch.manual_seed(1)
mats = torch.Tensor([
[
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
], [
[10, -2, 1],
[-2, 10, 0],
[1, 0, 10],
]
])
vecs = torch.randn(2, 3, 4)
self.mats_var = Variable(mats, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.mats_var_clone = Variable(mats, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
self.log_dets = torch.Tensor([
math.log(np.linalg.det(mats[0].numpy())),
math.log(np.linalg.det(mats[1].numpy())),
])
def tearDown(self):
if hasattr(self, 'rng_state'):
torch.set_rng_state(self.rng_state)
def test_inv_quad_log_det_many_vectors(self):
# Forward pass
actual_inv_quad = torch.cat([
self.mats_var_clone[0].inverse().unsqueeze(0),
self.mats_var_clone[1].inverse().unsqueeze(0),
]).matmul(self.vecs_var_clone).mul(self.vecs_var_clone).sum(2).sum(1)
with gpytorch.settings.num_trace_samples(1000):
nlv = NonLazyVariable(self.mats_var)
res_inv_quad, res_log_det = nlv.inv_quad_log_det(inv_quad_rhs=self.vecs_var, log_det=True)
for i in range(self.mats_var.size(0)):
self.assert_scalar_almost_equal(res_inv_quad.data[i], actual_inv_quad.data[i], places=1)
self.assert_scalar_almost_equal(res_log_det.data[i], self.log_dets[i], places=1)
# Backward
inv_quad_grad_output = torch.Tensor([3, 4])
log_det_grad_output = torch.Tensor([4, 2])
actual_inv_quad.backward(gradient=inv_quad_grad_output)
mat_log_det_grad = torch.cat([
self.mats_var_clone[0].data.inverse().mul(log_det_grad_output[0]).unsqueeze(0),
self.mats_var_clone[1].data.inverse().mul(log_det_grad_output[1]).unsqueeze(0),
])
self.mats_var_clone.grad.data.add_(mat_log_det_grad)
res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)
res_log_det.backward(gradient=log_det_grad_output)
self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_inv_quad_only_many_vectors(self):
# Forward pass
res = NonLazyVariable(self.mats_var).inv_quad(self.vecs_var)
actual = torch.cat([
self.mats_var_clone[0].inverse().unsqueeze(0),
self.mats_var_clone[1].inverse().unsqueeze(0),
]).matmul(self.vecs_var_clone).mul(self.vecs_var_clone).sum(2).sum(1)
for i in range(self.mats_var.size(0)):
self.assert_scalar_almost_equal(res.data[i], actual.data[i], places=1)
# Backward
inv_quad_grad_output = torch.randn(2)
actual.backward(gradient=inv_quad_grad_output)
res.backward(gradient=inv_quad_grad_output)
self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1))
self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_log_det_only(self):
# Forward pass
with gpytorch.settings.num_trace_samples(1000):
res = NonLazyVariable(self.mats_var).log_det()
for i in range(self.mats_var.size(0)):
self.assert_scalar_almost_equal(res.data[i], self.log_dets[i], places=1)
# Backward
grad_output = torch.Tensor([3, 4])
actual_mat_grad = torch.cat([
self.mats_var_clone[0].data.inverse().mul(grad_output[0]).unsqueeze(0),
self.mats_var_clone[1].data.inverse().mul(grad_output[1]).unsqueeze(0),
])
res.backward(gradient=grad_output)
self.assertTrue(approx_equal(actual_mat_grad, self.mats_var.grad.data, epsilon=1e-1))
class TestRootDecomposition(PyTorchCompatibleTestCase):
def setUp(self):
if os.getenv('UNLOCK_SEED') is None or os.getenv('UNLOCK_SEED').lower() == 'false':
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
mat = torch.Tensor([
[5.0212, 0.5504, -0.1810, 1.5414, 2.9611],
[0.5504, 2.8000, 1.9944, 0.6208, -0.8902],
[-0.1810, 1.9944, 3.0505, 1.0790, -1.1774],
[1.5414, 0.6208, 1.0790, 2.9430, 0.4170],
[2.9611, -0.8902, -1.1774, 0.4170, 3.3208],
])
self.mat_var = Variable(mat, requires_grad=True)
self.mat_var_clone = Variable(mat, requires_grad=True)
def tearDown(self):
if hasattr(self, 'rng_state'):
torch.set_rng_state(self.rng_state)
def test_root_decomposition(self):
# Forward
root = NonLazyVariable(self.mat_var).root_decomposition()
res = root.matmul(root.transpose(-1, -2))
self.assertTrue(approx_equal(res.data, self.mat_var.data))
# Backward
res.trace().backward()
self.mat_var_clone.trace().backward()
self.assertTrue(approx_equal(self.mat_var.grad.data, self.mat_var_clone.grad.data))
def test_root_inv_decomposition(self):
# Forward
root = NonLazyVariable(self.mat_var).root_inv_decomposition()
res = root.matmul(root.transpose(-1, -2))
actual = self.mat_var_clone.inverse()
self.assertTrue(approx_equal(res.data, actual.data))
# Backward
res.trace().backward()
actual.trace().backward()
self.assertTrue(approx_equal(self.mat_var.grad.data, self.mat_var_clone.grad.data))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3239311 | <reponame>cammelworks/CammelBot
# coding: utf-8
from slackbot.bot import respond_to
from slackbot.bot import listen_to
from slackbot.bot import default_reply
import re
from datetime import datetime
import locale
locale.setlocale(locale.LC_CTYPE, '')
@respond_to('time')
def now(message):
strftime = datetime.now().strftime("%Y/%m/%d %H時%M分%S秒です")
message.reply(strftime)
@respond_to('今何時')
def now_time(message):
strftime = datetime.now().strftime("%Y/%m/%d %H時%M分%S秒です")
message.reply(strftime)
@respond_to('MTG', re.IGNORECASE)
def mtg(message):
message.send("<!channel> リマインド\n本日18:30からMTGです〜\n`本日のアジェンダ`はスレッドで\n報告等あるひとはお願いしま〜す:カニちゃん:")
@listen_to('いる?')
def here(message):
message.reply("ここだよ!!メェェ〜")
@default_reply()
def default(message):
mes = message.body['text']
message.reply(mes) | StarcoderdataPython |
1627674 | <gh_stars>1-10
from distutils.version import StrictVersion as SV
import unittest
import minecraft
class VersionTest(unittest.TestCase):
def test_module_version_is_a_valid_pep_386_strict_version(self):
SV(minecraft.__version__)
def test_minecraft_version_is_a_valid_pep_386_strict_version(self):
SV(minecraft.MINECRAFT_VERSION)
def test_protocol_version_is_an_int(self):
self.assertTrue(type(minecraft.PROTOCOL_VERSION) is int)
| StarcoderdataPython |
3201221 | <gh_stars>1-10
# import webtest
# import logging
# import unittest
# from google.appengine.ext import testbed
# from protorpc.remote import protojson
# import endpoints
#
# from conference import ConferenceApi
# from models import ConferenceForm
# from models import ConferenceForms
# from models import ConferenceQueryForm
# from models import ConferenceQueryForms
# def init_stubs(tb):
# tb.init_urlfetch_stub()
# tb.init_app_identity_stub()
# tb.init_blobstore_stub()
# tb.init_capability_stub()
# tb.init_channel_stub()
# tb.init_datastore_v3_stub()
# tb.init_files_stub()
# tb.init_mail_stub()
# tb.init_memcache_stub()
# tb.init_taskqueue_stub()
# tb.init_user_stub()
# tb.init_xmpp_stub()
# return tb
#
#
# class AppTest(unittest.TestCase):
# def setUp(self):
# logging.getLogger().setLevel(logging.DEBUG)
#
# tb = testbed.Testbed()
# tb.setup_env(current_version_id='testbed.version')
# tb.activate()
# self.testbed = init_stubs(tb)
#
#
# def tearDown(self):
# self.testbed.deactivate()
#
#
#
# def test_endpoint_testApi(self):
# application = endpoints.api_server([ConferenceApi], restricted=False)
#
# testapp = webtest.TestApp(application)
#
# # # # # Test init query (empty) # # # #
# req = ConferenceQueryForms()
# response = testapp.post('/_ah/spi/' + ConferenceApi.__name__ + '.' + ConferenceApi.conferenceQuery.__name__,
# protojson.encode_message(req),
# content_type='application/json')
#
# res = protojson.decode_message(ConferenceForms, response.body)
# self.assertEqual(res.items, [])
#
# # # # # Insert Item into Conference # # # #
# req = ConferenceForm(name='Hey')
# response = testapp.post('/_ah/spi/' + ConferenceApi.__name__ + '.' + ConferenceApi.conferenceCreate.__name__,
# protojson.encode_message(req),
# content_type='application/json')
# res = protojson.decode_message(ConferenceForms, response.body)
# self.assertEqual(len(res.item), 1)
#
#
#
#
# if __name__ == '__main__':
# unittest.main()
# from google.appengine.ext import testbed
# import webtest
# import endpoints
#
#
# def init_stubs(tb):
# tb.init_all_stubs()
#
# def setUp(self):
# tb = testbed.Testbed()
# tb.setup_env(current_version_id='testbed.version') #needed because endpoints expects a . in this value
# tb.activate()
# tb.init_all_stubs()
# self.testbed = tb
#
# def tearDown(self):
# self.testbed.deactivate()
#
# def test_endpoint_insert(self):
# app = endpoints.api_server([TestEndpoint], restricted=False)
# testapp = webtest.TestApp(app)
# msg = {...} # a dict representing the message object expected by insert
# # To be serialised to JSON by webtest
# resp = testapp.post_json('/_ah/spi/TestEndpoint.insert', msg)
#
# self.assertEqual(resp.json, {'expected': 'json response msg as dict'})
| StarcoderdataPython |
152064 | import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
# K-means step1
def k_means_step1(img, Class=5):
# get shape
H, W, C = img.shape
# initiate random seed
np.random.seed(0)
# reshape
img = np.reshape(img, (H * W, -1))
# select one index randomly
i = np.random.choice(np.arange(H * W), Class, replace=False)
Cs = img[i].copy()
print(Cs)
clss = np.zeros((H * W), dtype=int)
# each pixel
for i in range(H * W):
# get distance from base pixel
dis = np.sqrt(np.sum((Cs - img[i]) ** 2, axis=1))
# get argmin distance
clss[i] = np.argmin(dis)
# show
out = np.reshape(clss, (H, W)) * 50
out = out.astype(np.uint8)
return out
# read image
img = cv2.imread("../imori.jpg").astype(np.float32)
# K-means step2
out = k_means_step1(img)
# cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
1711320 | #!/usr/local/bin/python3
"""This program asks a user to guess a number up to 5 attempts."""
numguesses = 0
secret = 12
guess = 0
while numguesses < 5 and guess != secret:
guess = (int(input("Guess a number:")))
if guess < secret:
print("Guess higher")
elif guess > secret:
print("Guess lower")
numguesses += 1
if numguesses <= 5 and guess == secret:
print("Correct! Well done, the number was", secret)
else:
print("Sorry, the number was", secret) | StarcoderdataPython |
118651 | <reponame>callat-qcd/lattedb
"""Admin view for correlation functions
"""
from espressodb.base.admin import register_admins
register_admins("lattedb.correlator")
| StarcoderdataPython |
4817982 | <reponame>lycantropos/reprit
from typing import (Any as _Any,
Callable as _Callable)
from .core.hints import Domain as _Domain
ArgumentSerializer = _Callable[[_Any], str]
FieldSeeker = _Callable[[_Domain, str], _Any]
| StarcoderdataPython |
105151 | <filename>collatorx/common/ledger.py
from datetime import datetime
from uuid import uuid4
class Ledger():
LEDGER_TYPE = "default"
def __init__(self):
self._id = uuid4()
self._dob = datetime.datetime.today()
@property
def ledger_id(self):
return str(self._id)
@property
def genesis_date(self):
return self._dob.isoformat()
| StarcoderdataPython |
4810095 | import platform
import re
import threading
from time import sleep
from urllib3 import make_headers, ProxyManager
from common import DATA_DIR
import subprocess, os
class ProxyPool:
def __init__(self, proxy_list_file):
self.credit_record = {}
self.waiting_round = {}
self.proxy_list = self.__read_proxy_list(proxy_list_file)
self.available_proxy = set()
def __read_proxy_list(self, file_path):
with open(file_path) as fin:
for line in fin:
proxy_url = line.strip("\n\t\r ")
self.credit_record[proxy_url] = 0
self.waiting_round[proxy_url] = 0
return self.credit_record.keys()
def is_alive_proxy(self, proxy):
host = self.get_ip(proxy)
if platform.system() == "Windows":
command = "ping {} -n 1".format(host)
else:
command = "ping {} -c 1".format(host)
proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE, shell=True)
proc.wait()
isUpBool = False
if proc.returncode == 0:
if self.can_get_response("http://www.example.org", timeout=10, proxy=proxy):
isUpBool = True
return isUpBool
def can_get_response(self, link, timeout, proxy):
try:
header = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7"
headers = make_headers(user_agent=header)
http = ProxyManager(proxy, headers=headers)
response = http.request("GET", link, timeout=timeout)
status_code = response.status
if str(status_code).startswith("2"):
return True
else:
return False
except Exception as e:
return False
def get_ip(self, url):
ip_pattern = r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b'
return re.search(ip_pattern, url).group(0)
def update_proxy_list(self, interval=10):
while True:
for proxy in self.proxy_list:
penalty_degree = self.credit_record[proxy]
remain_waiting = self.waiting_round[proxy]
if remain_waiting > 0:
self.waiting_round[proxy] -= 1
continue
is_live_flag = False
try:
is_live_flag = self.is_alive_proxy(proxy)
except Exception as e:
print(e)
if is_live_flag:
if penalty_degree > 0:
self.credit_record[proxy] -= 1
self.available_proxy.add(proxy)
else:
self.credit_record[proxy] += 1
self.waiting_round[proxy] = min(100, remain_waiting + self.credit_record[proxy])
if proxy in self.available_proxy:
self.available_proxy.remove(proxy)
sleep(interval)
def run(self):
t = threading.Thread(target=self.update_proxy_list, )
t.start()
if __name__ == "__main__":
proxy_path = os.path.join(DATA_DIR, "proxy_list.txt")
proxy_pool = ProxyPool(proxy_path)
proxy_pool.run()
| StarcoderdataPython |
3363013 | from __future__ import division
from logging import getLogger
logger = getLogger('game_object')
from math import sqrt
import collections
import game
from Box2D import b2
from vector import Vector
class GameObject(object):
def __init__(self, name="", world=None, shape='box', size=(0.5, 0.5), location=None, position=(2, 2), facing=0, mass=1, fixed=True, solid=True, sound=None, use_sound=None, destroy_sound=None, destructable=True, collide_sound=None, head_relative=False, *args, **kwargs):
super(GameObject, self).__init__(*args, **kwargs)
self.world = world
self.world.add_object(self)
self.name = name
self.shape = shape
self.size = tuple(size)
self.location = location
self.contents = []
self.fixed = fixed
self.solid = solid
self.head_relative = head_relative
self.body = None
self.mass = mass
if location is None:
self.create_body(position=position)
self.create_fixture()
self.last_played_times = collections.defaultdict(int)
self.sound_source = None
if sound is not None:
sound = self.play_sound(sound, looping=True)
self.sound = sound
self.destroy_sound = destroy_sound
self.use_sound = use_sound
self.collide_sound = collide_sound
self.position = position
self.facing = facing
self.destructable = destructable
def create_body(self, position=None):
size = self.size[0] / 2, self.size[1] / 2
if position is None:
position = (0, 0)
self.body = self.world.world.CreateStaticBody(userData=self, position=position)
def create_fixture(self):
size = self.size[0] / 2, self.size[1] / 2
density=1
friction=1.0
restitution=0.0
if self.shape == 'circle':
self.fixture = self.body.CreateCircleFixture(radius=size[0], density=density, friction=friction, restitution=restitution)
elif self.shape == 'box':
self.fixture= self.body.CreatePolygonFixture(box=size, density=density, friction=friction, restitution=restitution)
self.fixture.sensor = not self.solid
self.body.mass = self.mass
@property
def position(self):
if self.location is not None:
return self.location.position
return Vector(*self.body.position)
@position.setter
def position(self, position):
self.body.position = tuple(position)
if self.sound_source is not None:
self.set_sound_position()
@property
def velocity(self):
return Vector(*self.body.linearVelocity)
@velocity.setter
def velocity(self, velocity):
self.body.linearVelocity = velocity
def set_sound_position(self):
if self.head_relative:
self.sound_source.head_relative.value = self.head_relative
self.sound_source.position = (0, 0, 0)
return
position = self.position
self.sound_source.position.value = position[0], position[1], 0.0
@property
def facing(self):
if self.location is not None:
return self.location.facing
return self.body.angle
@facing.setter
def facing(self, facing):
self.body.angle = facing
def destroy(self):
[self.remove(i) for i in self.contents]
if self.destroy_sound is not None:
self.play_async_after(0.0, self.destroy_sound, *self.position)
if game.player.radar.tracking is self:
game.player.radar.stop_tracking()
game.player.radar.tracking = None
if self.location is not None:
self.location.remove_item(self)
self.world.destroy(self)
def handle_collision(self, other):
logger.debug("collision: %s %s %s" % (self.position, self, other))
def use(self, user):
if self.use_sound is not None:
self.play_sound(self.use_sound)
def can_use(self):
return False
def take_damage(self, amount):
if self.destructable:
self.destroy()
def destroy_body(self):
game.world.remove_body_from_world(self.body)
self.body = None
def hold(self, other):
if other.body is not None:
other.destroy_body()
self.contents.append(other)
other.location = self
def remove(self, other):
self.contents.remove(other)
other.location = self.location
game.world.create_body_next_tick(other, position=self.position)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def play_async_after(self, delay, *args, **kwargs):
game.clock.schedule_once(lambda dt,: game.sound_manager.play_async(*args, **kwargs), delay)
def tick(self):
if self.sound_source is not None:
self.set_sound_position()
self.update_audio_occlusion()
def update_audio_occlusion(self):
location = self.location
if location is not None:
value = location.sound_source.occlusion.value
self.sound_source.occlusion.value = value
return
pos, playerpos = self.position, game.player.position
if pos == playerpos:
return
distance = sqrt((pos[0] - playerpos[0])**2+(pos[1]-playerpos[1])**2)
if distance > game.MAX_AUDIO_DISTANCE:
return
count = game.world.count_objects_between(pos, playerpos)
value= min(1.0, 0.15 * count)
self.sound_source.occlusion.value = value
def play_sound(self, sound, *args, **kwargs):
if self.sound_source is None:
self.setup_sound()
return game.sound_manager.play(sound, source=self.sound_source, **kwargs)
def setup_sound(self):
self.sound_source = game.sound_manager.create_source()
self.sound_source.head_relative.value = self.head_relative
self.set_sound_position()
def only_play_every(self, delay, sound, *args, **kwargs):
last_played = self.last_played_times[sound]
if game.clock.time() - last_played < delay:
return
self.play_sound(sound, *args, **kwargs)
self.last_played_times[sound] = game.clock.time()
| StarcoderdataPython |
1768637 | """
Client wrapper for Google App Engine memcache API
https://cloud.google.com/appengine/docs/standard/python/memcache/
"""
from vishnu.backend.client import Base
from vishnu.backend.client import PickleableSession
from google.appengine.api import memcache
import pickle
NAMESPACE = "vishnu"
class Client(Base):
"""
Client object for Google App Engine memcache API
"""
def __init__(self, sid):
super(Client, self).__init__(sid)
self._record = None
def load(self):
if not self._loaded:
found_in_cache = memcache.get(self._sid, namespace=NAMESPACE)
if found_in_cache is None:
return False
else:
self._record = pickle.loads(found_in_cache)
self._loaded = True
self._expires = self._record.expires
self._last_accessed = self._record.last_accessed
self._data = self._record.data
return True
def clear(self):
super(Client, self).clear()
if self._sid:
memcache.delete(self._sid, namespace=NAMESPACE)
def save(self, sync_only=False):
# todo: implement sync only
self._record = PickleableSession(
self._expires,
self._last_accessed,
self._data
)
memcache.set(self._sid, pickle.dumps(self._record), namespace=NAMESPACE)
| StarcoderdataPython |
4815893 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-17 21:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0010_auto_20180216_0946'),
]
operations = [
migrations.AlterField(
model_name='apparel_product',
name='product_gender_type',
field=models.CharField(choices=[('men', 'Male'), ('women', 'Female'), ('unisex', 'Unisex')], max_length=256, verbose_name='Gender'),
),
]
| StarcoderdataPython |
3321385 | import numpy as np
from dgpy.domain import BoundaryCondition
class AnalyticSolution:
def __init__(self, boundary_condition_type, solution):
assert boundary_condition_type in [
BoundaryCondition.DIRICHLET, BoundaryCondition.NEUMANN
]
self.boundary_condition_type = boundary_condition_type
self.solution = solution
def nonlinear(self, u, n_dot_flux, face):
if self.boundary_condition_type == BoundaryCondition.DIRICHLET:
return self.solution(face.inertial_coords), n_dot_flux
else:
return u, face.normal_dot(self.solution(face.inertial_coords))
def linear(self, u_correction, n_dot_flux_correction, face):
if self.boundary_condition_type == BoundaryCondition.DIRICHLET:
return np.zeros(u_correction.shape), n_dot_flux_correction
else:
return u_correction, np.zeros(n_dot_flux_correction.shape)
| StarcoderdataPython |
4808879 | <reponame>paulsbrookes/subcipher
import numpy as np
from key import Key
import itertools
default_alpha = ' etaoinshrdlcumwfgypbvkjxqz'
def remove_duplicates(values):
list_form = [x.tolist() for x in values]
list_form.sort()
filtered_list = list(list_form for list_form,_ in itertools.groupby(list_form))
filtered_arrays = [np.array(x) for x in filtered_list]
return filtered_arrays
def proliferation_generator(number):
def proliferator(input_keys):
dt = np.dtype(object)
key_list = [x.array_cycle(number) for x in input_keys]
map_list = []
for list in key_list:
for key in list:
map_list.append(key.map)
filtered_map_list = remove_duplicates(map_list)
filtered_key_list = [Key(x) for x in filtered_map_list]
return filtered_key_list
return proliferator
def key_proliferation(input_keys, number):
dt = np.dtype(object)
key_list = [x.array_cycle(number) for x in input_keys]
map_list = []
for list in key_list:
for key in list:
map_list.append(key.map)
filtered_map_list = remove_duplicates(map_list)
filtered_key_list = [Key(x) for x in filtered_map_list]
return filtered_key_list
def key_proliferation_swap(input_keys):
dt = np.dtype(object)
key_list = [x.array_swap() for x in input_keys]
map_list = []
for list in key_list:
for key in list:
map_list.append(key.map)
filtered_map_list = remove_duplicates(map_list)
filtered_key_list = [Key(x) for x in filtered_map_list]
return filtered_key_list
def key_proliferation3(input_keys):
dt = np.dtype(object)
key_list = [x.array_cycle() for x in input_keys]
map_list = []
for list in key_list:
for key in list:
map_list.append(key.map)
filtered_map_list = remove_duplicates(map_list)
filtered_key_list = [Key(x) for x in filtered_map_list]
return filtered_key_list
def best_keys(key_list, encrypted_message, natural_sample, number_returned = 10):
metric_list = []
for key in key_list:
decryption_attempt = encrypted_message.map(key)
metric = metric_function(decryption_attempt, natural_sample)
metric_list.append(metric)
ranking = np.argsort(metric_list)
top_key_list = [key_list[x] for x in ranking[0:number_returned]]
return top_key_list
def best_keys_fast(key_list, encrypted_message, natural_sample, number_returned = 10):
metric_list = []
for key in key_list:
decryption_attempt = encrypted_message.map(key)
metric = metric_function_fast(decryption_attempt, natural_sample)
metric_list.append(metric)
ranking = np.argsort(metric_list)
if number_returned > len(ranking) - 1:
number_returned = len(ranking) - 1
top_key_list = [key_list[x] for x in ranking[0:number_returned]]
return top_key_list
def metric_function(decryption_attempt, natural_sample):
decryption_attempt.triplet_frequencies()
difference = abs(natural_sample.rates[1] - decryption_attempt.rates[1])
difference = np.absolute(difference) + 1e-12
metric = -np.sum(1/difference)
return metric
def metric_function2(decryption_attempt, natural_sample):
decryption_attempt.quadruplet_frequencies()
difference = abs(natural_sample.rates[2] - decryption_attempt.rates[2])
difference = np.absolute(difference) + 1e-16
metric = -np.sum(1/difference)
return metric
def pair_metric(decryption_attempt, natural_sample):
decryption_attempt.group_frequencies(2)
difference = abs(natural_sample.rates[0] - decryption_attempt.rates[0])
difference = np.absolute(difference) + 1e-6
metric = -np.sum(1/difference)
return metric
def closeness(map):
differences = [abs(x-i) for i, x in enumerate(map)]
return np.sum(differences)
def triplet_dictionary_metric(decryption_attempt, natural_sample, alpha=default_alpha):
decryption_attempt.triplet_frequency_dictionary()
metric = 0
for group in decryption_attempt.rate_dictionary:
indices = tuple([alpha.find(group[i]) for i in range(3)])
metric += 1/(natural_sample.triplet_rates[indices]+1e-12)
return metric
def quadruplet_dictionary_metric(decryption_attempt, natural_sample, alpha=default_alpha):
decryption_attempt.quadruplet_frequency_dictionary()
metric = 0
for group in decryption_attempt.rate_dictionary:
indices = tuple([alpha.find(group[i]) for i in range(4)])
if natural_sample.rates[2][indices] == 0:
metric += 1
return metric
def dict_metric_generator(number, epsilon=5e-6, alpha=default_alpha):
def dict_metric(decryption_attempt, natural_sample):
decryption_attempt.frequency_dictionary(number)
metric = 0
for group in decryption_attempt.rate_dictionary:
indices = tuple([alpha.find(group[i]) for i in range(number)])
metric += 1/(natural_sample.rates[number-2][indices]+epsilon)
return metric
return dict_metric
| StarcoderdataPython |
1688240 | # lesson 44 Threading
# multiple tasks at one time
import threading
from queue import Queue
import time
## a lock per shared variable or shared function
print_lock = threading.Lock()
def exampleJob(worker):
time.sleep(1.0)
with print_lock:
print(threading.current_thread().name, worker)
## assigning a worker to a thread
## this is the job
def threader():
while True:
worker = q.get()
exampleJob(worker)
q.task_done()
## =======
q = Queue()
## the number of workers is 10
## each thread has one worker
for x in range(10):
# sending a function to a thread
t = threading.Thread(target=threader)
t.daemon = True
t.start()
start = time.time()
## creating 20 jobs
for worker in range(20):
q.put(worker)
## wait for thread to terminate
q.join()
print('Entire job took: ', time.time() - start)
| StarcoderdataPython |
37721 | #---- Python VM startup for LISTENERLISTENER_3_from_1 ---
import SSL_listener
incomingIP="localhost"
incomingPort=10031
incomingPrivateKeyFile="server.key"
incomingPublicKeyFile="server.crt"
outgoingIP="localhost"
outgoingPort=00000
outgoingPublicKeyFile="server.crt"
def startLISTENER_3_from_1():
incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,"" )
#-------
| StarcoderdataPython |
1674660 | <gh_stars>0
from typing import Tuple
from pandas import DataFrame
from pyspark.sql import DataFrame as SparkDF
from pyspark.sql.types import ArrayType, DoubleType, LongType, StringType, StructField, StructType
from dagster import Out, Output, op
HN_ACTION_SCHEMA = StructType(
[
StructField("id", LongType()),
StructField("parent", DoubleType()),
StructField("time", LongType()),
StructField("type", StringType()),
StructField("by", StringType()),
StructField("text", StringType()),
StructField("kids", ArrayType(LongType())),
StructField("score", DoubleType()),
StructField("title", StringType()),
StructField("descendants", DoubleType()),
StructField("url", StringType()),
]
)
ACTION_FIELD_NAMES = [field.name for field in HN_ACTION_SCHEMA.fields]
@op(
out={
"items": Out(
io_manager_key="parquet_io_manager",
metadata={"partitioned": True},
)
},
required_resource_keys={"hn_client"},
)
def download_items(context, id_range: Tuple[int, int]) -> Output[DataFrame]:
"""
Downloads all of the items for the id range passed in as input and creates a DataFrame with
all the entries.
"""
start_id, end_id = id_range
context.log.info(f"Downloading range {start_id} up to {end_id}: {end_id - start_id} items.")
rows = []
for item_id in range(start_id, end_id):
rows.append(context.resources.hn_client.fetch_item_by_id(item_id))
if len(rows) % 100 == 0:
context.log.info(f"Downloaded {len(rows)} items!")
non_none_rows = [row for row in rows if row is not None]
result = DataFrame(non_none_rows, columns=ACTION_FIELD_NAMES).drop_duplicates(subset=["id"])
result.rename(columns={"by": "user_id"}, inplace=True)
return Output(
result,
"items",
metadata={"Non-empty items": len(non_none_rows), "Empty items": rows.count(None)},
)
@op(
out=Out(
io_manager_key="warehouse_io_manager",
metadata={"table": "hackernews.comments", "partitioned": True},
),
description="Creates a dataset of all items that are comments",
)
def build_comments(context, items: SparkDF) -> SparkDF:
context.log.info(str(items.schema))
return items.where(items["type"] == "comment")
@op(
out=Out(
io_manager_key="warehouse_io_manager",
metadata={"table": "hackernews.stories", "partitioned": True},
),
description="Creates a dataset of all items that are stories",
)
def build_stories(context, items: SparkDF) -> SparkDF:
context.log.info(str(items.schema))
return items.where(items["type"] == "story")
| StarcoderdataPython |
4812789 | <filename>testing/ros_debug/workspace/dev_ws/src/ros_debug_nodes/ros_debug_nodes/broadcast_node_details.py
import rclpy
import socket
import random
from rclpy.node import Node
from std_msgs.msg import String
class NodeNetworkingPublisher(Node):
def __init__(self):
super().__init__('node_network_publisher')
self.hostname = socket.gethostname().replace('-', '_')
self.random_name = f'name_{random.randint(10000,99999)}'
self.publisher_ = self.create_publisher(String, self.get_publisher_name(''), 10)
timer_period=1.0
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
def get_publisher_name(self, prefix:str) -> str:
return f'{prefix}/h{self.hostname}/{self.random_name}'
def timer_callback(self):
topics = [t[0] for t in self.get_topic_names_and_types()]
msg=String()
msg.data = f'{self.hostname}, {self.i}: Message from {self.get_name()}'
self.publisher_.publish(msg)
self.get_logger().info(f'({self.hostname}) sees topics: {",".join(topics)}')
self.i += 1
def main(args=None):
rclpy.init(args=args)
nnp = NodeNetworkingPublisher()
rclpy.spin(nnp)
nnp.destroy_node()
rclpy.shutdown()
if __name__=='__main__':
main()
| StarcoderdataPython |
8115 | from collections import deque
def solution(N, bus_stop):
answer = [[1300 for _ in range(N)] for _ in range(N)]
bus_stop = [(x-1, y-1) for x,y in bus_stop]
q = deque(bus_stop)
for x,y in bus_stop:
answer[x][y] = 0
while q:
x, y = q.popleft()
for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)):
if (
0 <= nx < N and 0 <= ny < N
and answer[nx][ny] > answer[x][y]
):
answer[nx][ny] = answer[x][y] + 1
q.append((nx, ny))
return answer
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
)) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.