id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6578100 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-23 19:40
from __future__ import unicode_literals
from django.db import migrations
def copy_repo(apps, schema_editor):
Plan = apps.get_model('plan', 'Plan')
PlanRepository = apps.get_model('plan', 'PlanRepository')
for plan in Plan.objects.all():
PlanRepository.objects.create(plan=plan, repo=plan.repo)
class Migration(migrations.Migration):
dependencies = [
('plan', '0009_plan_repos_m2m'),
]
operations = [
migrations.RunPython(copy_repo),
]
| StarcoderdataPython |
1749895 | <filename>rassh/datatypes/response_with_code.py
class ResponseWithCode(object):
"""A response to an HTTP request, with an HTTP response code."""
def __init__(self, response: object, code: int):
try:
int_code = int(code)
except TypeError:
raise ValueError("Code must be an integer")
self.__response = response
self.__code = int_code
def get_response(self):
return self.__response
def get_code(self) -> int:
return self.__code
| StarcoderdataPython |
5005022 | <reponame>praisetompane/3_programming
def timeConversion(s):
hour_difference = 12
time = s.split(':')
hour = time[0]
minutes = time[1]
seconds = time[2][:2]
time_of_day = time[2][2:]
if time_of_day == 'AM' and hour == '12':
return f'00:{minutes}:{seconds}'
elif time_of_day == 'AM':
return f'{hour}:{minutes}:{seconds}'
elif time_of_day == 'PM' and hour == '12':
return f'12:{minutes}:{seconds}'
else:
return f'{str(int(hour) + hour_difference)}:{minutes}:{seconds}'
print(timeConversion('06:40:03AM')) | StarcoderdataPython |
9759852 | <reponame>canadiyaman/thetask
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
User = get_user_model()
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ['username']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'fadeIn second'
| StarcoderdataPython |
4828321 | <reponame>odoku/Hyena
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from datetime import date, datetime
from dateutil.tz import tzoffset
import pytest
from scrapbook import Content, Element
from scrapbook.filters import (
Bool,
CleanText,
Contains,
DateTime,
Equals,
Fetch,
FilterDict,
Join,
Map,
Normalize,
Partial,
RenameKey,
Replace,
take_first,
through,
)
from scrapbook.parsers import All
class TestMap(object):
def test_with_int(self, mocker):
fn1 = mocker.Mock(name='fn1', return_value=1)
fn2 = mocker.Mock(name='fn2', return_value=2)
result = Map(fn1, fn2)(0)
fn1.assert_called_once_with(0)
fn2.assert_called_once_with(1)
assert 2 == result
def test_with_dict(self, mocker):
fn1 = mocker.Mock(name='fn1', side_effect=lambda v: v * 2)
fn2 = mocker.Mock(name='fn2', side_effect=lambda v: v * 10)
result = Map(fn1, fn2)({'AAA': 1, 'BBB': 2})
fn1.assert_has_calls([mocker.call(1), mocker.call(2)], any_order=True)
fn2.assert_has_calls([mocker.call(2), mocker.call(4)], any_order=True)
assert {'AAA': 20, 'BBB': 40} == result
def test_with_list(self, mocker):
fn1 = mocker.Mock(name='fn1', side_effect=lambda v: v * 2)
fn2 = mocker.Mock(name='fn2', side_effect=lambda v: v * 10)
result = Map(fn1, fn2)([1, 2])
fn1.assert_has_calls([mocker.call(1), mocker.call(2)], any_order=True)
fn2.assert_has_calls([mocker.call(2), mocker.call(4)], any_order=True)
assert [20, 40] == result
def test_with_none(self, mocker):
fn1 = mocker.Mock(name='fn1', side_effect=lambda v: v)
fn2 = mocker.Mock(name='fn2', side_effect=lambda v: v)
result = Map(fn1, fn2)(None)
fn1.assert_not_called()
fn2.assert_not_called()
assert result is None
def test_on_element(self, mocker):
class El(Element):
def fn2(self, value):
pass
fn1 = mocker.Mock(name='fn1', side_effect=lambda v: v * 2)
fn2 = mocker.patch.object(target=El, attribute='fn2', side_effect=lambda v: v * 3)
element = El(xpath='//p/text()', parser=All(), filter=Map(fn1, 'fn2'))
result = element.parse(u'<p>a</p><p>b</p>')
fn1.assert_has_calls([mocker.call('a'), mocker.call('b')], any_order=True)
fn2.assert_has_calls([mocker.call('aa'), mocker.call('bb'), ], any_order=True)
assert ['aaaaaa', 'bbbbbb'] == result
def test_on_content(self, mocker):
fn1 = mocker.Mock(name='fn1', side_effect=lambda v: v * 2)
class C(Content):
field = Element(xpath='//p/text()', parser=All(), filter=Map(fn1, 'fn2'))
def fn2(self, value):
pass
fn2 = mocker.patch.object(target=C, attribute='fn2', side_effect=lambda v: v * 3)
c = C(xpath='')
result = c.parse(u'<p>a</p><p>b</p>')
fn1.assert_has_calls([mocker.call('a'), mocker.call('b')], any_order=True)
fn2.assert_has_calls([mocker.call('aa'), mocker.call('bb'), ], any_order=True)
assert ['aaaaaa', 'bbbbbb'] == result['field']
class TestThrough(object):
def test_(self):
value = 100
assert value == through(value)
def test_with_none(self):
assert through(None) is None
class TestTakeFirst(object):
def test_(self):
assert 1 == take_first([1, 2, 3, 4])
def test_with_list_include_empty_value(self):
assert 0 == take_first([None, '', 0, 1])
def test_with_none(self):
assert take_first(None) is None
class TestCleanText(object):
@pytest.mark.parametrize(['text', 'result'], [
(' aaa ', 'aaa'),
('<p>aaa</p>', 'aaa'),
('&', '&'),
('aa bb', 'aa bb'),
('<p> aaa & bbb </p>', 'aaa & bbb'),
('a\nb', 'a\nb'),
('', None),
(None, None),
])
def test_(self, text, result):
assert result == CleanText()(text)
def test_with_empty_value(self):
assert 'empty' == CleanText(empty_value='empty')('')
@pytest.mark.parametrize(['text', 'result'], [
('a\nb', 'a b'),
('a\rb', 'a b'),
('a\n\rb', 'a b'),
('a\r\nb', 'a b'),
])
def test_with_remove_line_breaks(self, text, result):
assert result == CleanText(remove_line_breaks=True)(text)
class TestEquals(object):
def test_(self):
assert Equals('AAA')('AAA')
assert not Equals('AAA')('AAABBBCCC')
assert not Equals('AAA')(None)
class TestContains(object):
def test_(self):
assert Contains('BBB')('AAABBBCCC')
assert not Contains('DDD')('AAABBBCCC')
assert not Contains('AAA')(None)
class TestFetch(object):
def test_fetch(self):
pattern = r'\d+'
result = Fetch(pattern)('10, 20, 30')
assert '10' == result
def test_fetch_with_group(self):
pattern = r'(\d+), (\d+), (\d+)'
result = Fetch(pattern)('10, 20, 30')
assert ('10', '20', '30') == result
def test_fetch_with_labeled_group(self):
pattern = r'(?P<type>\w+): (?P<count>\d+)'
result = Fetch(pattern)('Cat: 10, Dog: 20')
assert {'type': 'Cat', 'count': '10'} == result
def test_fetch_with_none(self):
pattern = r'(?P<type>\w+): (?P<count>\d+)'
result = Fetch(pattern)(None)
assert result is None
def test_fetch_all(self):
pattern = r'\d+'
result = Fetch(pattern, all=True)('10, 20, 30')
assert ['10', '20', '30'] == result
def test_fetch_all_with_group(self):
pattern = r'(\d+), (\d+), (\d+)'
result = Fetch(pattern, all=True)('10, 20, 30')
assert [('10', '20', '30')] == result
def test_fetch_all_with_labeled_group(self):
pattern = r'(?P<type>\w+): (?P<count>\d+)'
result = Fetch(pattern, all=True)('Cat: 10, Dog: 20')
assert [
{'type': 'Cat', 'count': '10'},
{'type': 'Dog', 'count': '20'},
] == result
def test_fetch_all_with_none(self):
pattern = r'(?P<type>\w+): (?P<count>\d+)'
result = Fetch(pattern, all=True)(None)
assert result is None
class TestReplace(object):
def test_(self):
pattern = r'A+'
replace = 'B'
result = Replace(pattern, replace)('AAAAAABBBAAAA')
assert 'BBBBB' == result
def test_with_none(self):
pattern = r'A+'
replace = 'B'
result = Replace(pattern, replace)(None)
assert result is None
class TestJoin(object):
def test_(self):
assert 'A,B,C' == Join(',')(['A', 'B', 'C'])
def test_with_none(self):
assert Join(',')(None) is None
class TestNormalize(object):
def test_(self):
assert '12AB&%' == Normalize()(u'12AB&%')
def test_with_none(self):
assert Normalize()(None) is None
class TestRenameKey(object):
def test_(self):
name_map = {'AAA': 'XXX', 'BBB': 'YYY'}
result = RenameKey(name_map)({'AAA': '10', 'BBB': '20'})
assert {'XXX': '10', 'YYY': '20'} == result
def test_with_none(self):
name_map = {'AAA': 'XXX', 'BBB': 'YYY'}
result = RenameKey(name_map)(None)
assert result is None
class TestFilterDict(object):
def test_(self):
keys = ['AAA']
result = FilterDict(keys)({'AAA': '10', 'BBB': '20'})
assert {'AAA': '10'} == result
def test_with_ignore(self):
keys = ['AAA']
result = FilterDict(keys, ignore=True)({'AAA': '10', 'BBB': '20'})
assert {'BBB': '20'} == result
def test_with_none(self):
keys = ['AAA']
result = FilterDict(keys)(None)
assert result is None
class TestPartial(object):
def test_with_args(self):
def add(a, b, c):
return a + b + c
result = Partial(add, args=(10, 20))(30)
assert 60 == result
def test_with_kwargs(self):
def add(a, b):
return a + b
result = Partial(add, kwargs={'b': 10})(5)
assert 15 == result
def test_with_arg_name(self):
def add(a, b, c):
return a + b + c
result = Partial(add, kwargs={'a': 10, 'c': 30}, arg_name='b')(20)
assert 60 == result
class TestDateTime(object):
@pytest.mark.parametrize(['value', 'result'], [
('2001', datetime(2001, 1, 1)),
('2001-02', datetime(2001, 2, 1)),
('2001-02-03', datetime(2001, 2, 3)),
('2001-02-03 04:05:06', datetime(2001, 2, 3, 4, 5, 6)),
('2001-02-03T04:05:06+09:00', datetime(2001, 2, 3, 4, 5, 6, 0, tzoffset(None, 3600 * 9))),
])
def test_(self, value, result):
dt = DateTime()(value)
assert dt == result
@pytest.mark.parametrize(['value', 'format', 'result'], [
('2001', '%Y', datetime(2001, 1, 1)),
('02 2001', '%m %Y', datetime(2001, 2, 1)),
])
def test_with_format(self, value, format, result):
dt = DateTime(format=format)(value)
assert dt == result
def test_with_truncate_time(self):
dt = DateTime(truncate_time=True)('2001-02-03 04:05:06')
assert dt == date(2001, 2, 3)
def test_with_truncate_timezone(self):
dt = DateTime(truncate_timezone=True)('2001-02-03T04:05:06+09:00')
assert dt.tzinfo is None
class TestBool(object):
@pytest.mark.parametrize(['value', 'result'], [
('true', True),
('false', False),
])
def test_(self, value, result):
assert result == Bool()(value)
@pytest.mark.parametrize(['value', 'result'], [
('OK', True),
('ok', True),
('true', False),
('ng', False),
])
def test_with_true_values(self, value, result):
assert result == Bool('OK', 'ok')(value)
| StarcoderdataPython |
4913205 | import re
from marshmallow import pre_load
from ma import ma
from models.user import UserModel
class NonASCIIError(Exception):
def __init__(self, message):
super().__init__(message)
class LengthTooShortError(Exception):
def __init__(self, message):
super().__init__(message)
class LengthTooLongError(Exception):
def __init__(self, message):
super().__init__(message)
class RequiredError(Exception):
def __init__(self, message="required user_id and password"):
super().__init__(message)
class GetUserSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = UserModel
load_instance = True
@pre_load
def _pre_load(self, data, **kwargs):
if not data.get('user_id') or not data.get("password"):
raise RequiredError()
return data
class PatchUserSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = UserModel
load_instance = True
@pre_load
def _pre_load(self, data, **kwargs):
if not data.get('user_id') or not data.get("password"):
raise RequiredError()
if not data.get("nickname") and not data.get("comment"):
raise RequiredError(message="required nickname or comment")
return data
class CloseSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = UserModel
load_instance = True
@pre_load
def _pre_load(self, data, **kwargs):
if not data.get('user_id') or not data.get("password"):
raise RequiredError()
return data
class SignupSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = UserModel
load_instance = True
@pre_load
def _pre_load(self, data, **kwargs):
if data.get('password'):
data['password'] = re.sub(r'\s', '', data['password'])
if len(data['password']) < 6:
raise LengthTooShortError("invalid password: too short")
elif len(data['password']) > 20:
raise LengthTooLongError("invalid password: too long")
try:
data['password'].encode('ascii')
except UnicodeEncodeError:
raise NonASCIIError("invalid password: Non-ASCII character")
else:
raise RequiredError()
if data.get('user_id'):
data['user_id'] = re.sub(r'\s', '', data['user_id'])
if len(data['user_id']) < 6:
raise LengthTooShortError("invalid user_id: too short")
elif len(data['user_id']) > 20:
raise LengthTooLongError("invalid user_id: too long")
try:
data['user_id'].encode('ascii')
except UnicodeEncodeError:
raise NonASCIIError("invalid user_id: Non-ASCII character")
else:
raise RequiredError()
if not data.get('nickname'):
data['nickname'] = data['user_id']
return data
| StarcoderdataPython |
11243298 | import json
from pbx_gs_python_utils.utils.Lambdas_Helpers import slack_message
from pbx_gs_python_utils.utils.Misc import Misc
def run(event, context):
team_id = 'T7F3AUXGV'
channel = 'DDKUZTK6X'
text = "in API Gateway test..."
attachments = [ {'text': "{0}".format(Misc.json_format(event)) , 'color':'good'}]
#attachments = [{'text': "{0}".format(event), 'color': 'good'}]
slack_message(text, attachments, channel,team_id)
result = Misc.json_format({'text': text})
return {
'headers' : {'Content-Type': 'application/json'},
"statusCode" : 209,
"body" : result
} | StarcoderdataPython |
206013 |
import clr
clr.AddReference('C:\\Program Files\\Siemens\\Automation\\Portal V15_1\PublicAPI\\V15.1\\Siemens.Engineering.dll')
from System.IO import DirectoryInfo
import Siemens.Engineering as tia
project_path = DirectoryInfo ('C:\\Jonas\\TIA')
project_name = 'PythonTest'
#Starting TIA
print ('Starting TIA with UI')
mytia = tia.TiaPortal(tia.TiaPortalMode.WithUserInterface)
#Creating new project
print ('Creating project')
myproject = mytia.Projects.Create(project_path, project_name)
#Addding Stations
print ('Creating station 1')
station1_mlfb = 'OrderNumber:6ES7 515-2AM01-0AB0/V2.6'
station1 = myproject.Devices.CreateWithItem(station1_mlfb, 'station1', 'station1')
print ('Creating station 2')
station2_mlfb = 'OrderNumber:6ES7 518-4AP00-0AB0/V2.6'
station2 = myproject.Devices.CreateWithItem(station2_mlfb, 'station2', 'station2')
print ("Press any key to quit")
input()
quit()
| StarcoderdataPython |
1906704 | <gh_stars>0
"""
DBA 1337_TECH, AUSTIN TEXAS © MAY 2021
Proof of Concept code, No liabilities or warranties expressed or implied.
"""
from django.conf import settings
from django.contrib.auth import get_user
from django.shortcuts import redirect
def custom_login_required(view):
# view argument must be a function
def new_view(request, *args, **kwargs):
# View argument must be a function
user = get_user(request)
if user.is_authenticated():
return view(request, *args, **kwargs)
else:
url = '{}?next={}'.format(settings.LOGIN_URL, request.path)
return redirect(url)
# TODO: ADD IN ZERO KNOWLEDGE AUTHENTICATION_WZK Implementation
# My Idea to make this authentication work is using an && between django's
# Built in authentication and my own ZKA_wzk implementation. That way it
# would take both to fail catastrophically in order for a user to be compromised
| StarcoderdataPython |
1864925 | # import time
# import pdb
# import threading
# import logging
# from multiprocessing import Pool, Process
# import pytest
# from utils.utils import *
# from common.constants import *
# COMPACT_TIMEOUT = 180
# field_name = default_float_vec_field_name
# binary_field_name = default_binary_vec_field_name
# default_single_query = {
# "bool": {
# "must": [
# {"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type":"L2",
# "params": {"nprobe": 10}}}}
# ]
# }
# }
# default_binary_single_query = {
# "bool": {
# "must": [
# {"vector": {binary_field_name: {"topk": 10, "query": gen_binary_vectors(1, default_dim),
# "metric_type":"JACCARD", "params": {"nprobe": 10}}}}
# ]
# }
# }
# default_query, default_query_vecs = gen_query_vectors(binary_field_name, default_binary_entities, 1, 2)
#
#
# def ip_query():
# query = copy.deepcopy(default_single_query)
# query["bool"]["must"][0]["vector"][field_name].update({"metric_type": "IP"})
# return query
#
#
# class TestCompactBase:
# """
# ******************************************************************
# The following cases are used to test `compact` function
# ******************************************************************
# """
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_compact_collection_name_None(self, connect, collection):
# '''
# target: compact collection where collection name is None
# method: compact with the collection_name: None
# expected: exception raised
# '''
# collection_name = None
# with pytest.raises(Exception) as e:
# status = connect.compact(collection_name)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_compact_collection_name_not_existed(self, connect, collection):
# '''
# target: compact collection not existed
# method: compact with a random collection_name, which is not in db
# expected: exception raised
# '''
# collection_name = gen_unique_str("not_existed")
# with pytest.raises(Exception) as e:
# status = connect.compact(collection_name)
#
# @pytest.fixture(
# scope="function",
# params=gen_invalid_strs()
# )
# def get_collection_name(self, request):
# yield request.param
#
# @pytest.fixture(
# scope="function",
# params=gen_invalid_ints()
# )
# def get_threshold(self, request):
# yield request.param
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_compact_collection_name_invalid(self, connect, get_collection_name):
# '''
# target: compact collection with invalid name
# method: compact with invalid collection_name
# expected: exception raised
# '''
# collection_name = get_collection_name
# with pytest.raises(Exception) as e:
# status = connect.compact(collection_name)
# # assert not status.OK()
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_compact_threshold_invalid(self, connect, collection, get_threshold):
# '''
# target: compact collection with invalid name
# method: compact with invalid threshold
# expected: exception raised
# '''
# threshold = get_threshold
# if threshold != None:
# with pytest.raises(Exception) as e:
# status = connect.compact(collection, threshold)
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_add_entity_and_compact(self, connect, collection):
# '''
# target: test add entity and compact
# method: add entity and compact collection
# expected: data_size before and after Compact
# '''
# # vector = gen_single_vector(dim)
# ids = connect.bulk_insert(collection, default_entity)
# assert len(ids) == 1
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_and_compact(self, connect, collection):
# '''
# target: test add entities and compact
# method: add entities and compact collection
# expected: data_size before and after Compact
# '''
# ids = connect.bulk_insert(collection, default_entities)
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# # assert status.OK()
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# # assert status.OK()
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_part_and_compact(self, connect, collection):
# '''
# target: test add entities, delete part of them and compact
# method: add entities, delete a few and compact collection
# expected: status ok, data size maybe is smaller after compact
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# delete_ids = [ids[0], ids[-1]]
# status = connect.delete_entity_by_id(collection, delete_ids)
# assert status.OK()
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# size_before = info["partitions"][0]["data_size"]
# logging.getLogger().info(size_before)
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# size_after = info["partitions"][0]["data_size"]
# logging.getLogger().info(size_after)
# assert(size_before >= size_after)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_part_and_compact_threshold(self, connect, collection):
# '''
# target: test add entities, delete part of them and compact
# method: add entities, delete a few and compact collection
# expected: status ok, data size maybe is smaller after compact
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# delete_ids = [ids[0], ids[-1]]
# status = connect.delete_entity_by_id(collection, delete_ids)
# assert status.OK()
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# size_before = info["partitions"][0]["data_size"]
# logging.getLogger().info(size_before)
# status = connect.compact(collection, 0.1)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# size_after = info["partitions"][0]["data_size"]
# logging.getLogger().info(size_after)
# assert(size_before >= size_after)
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_all_and_compact(self, connect, collection):
# '''
# target: test add entities, delete them and compact
# method: add entities, delete all and compact collection
# expected: status ok, no data size in collection info because collection is empty
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# status = connect.delete_entity_by_id(collection, ids)
# assert status.OK()
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# assert not info["partitions"][0]["segments"]
#
# # TODO: enable
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_partition_delete_half_and_compact(self, connect, collection):
# '''
# target: test add entities into partition, delete them and compact
# method: add entities, delete half of entities in partition and compact collection
# expected: status ok, data_size less than the older version
# '''
# connect.create_partition(collection, default_tag)
# assert connect.has_partition(collection, default_tag)
# ids = connect.bulk_insert(collection, default_entities, partition_name=default_tag)
# connect.flush([collection])
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# delete_ids = ids[:default_nb//2]
# status = connect.delete_entity_by_id(collection, delete_ids)
# assert status.OK()
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# logging.getLogger().info(info["partitions"])
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info_after = connect.get_collection_stats(collection)
# logging.getLogger().info(info_after["partitions"])
# assert info["partitions"][1]["segments"][0]["data_size"] >= info_after["partitions"][1]["segments"][0]["data_size"]
#
# @pytest.fixture(
# scope="function",
# params=gen_simple_index()
# )
# def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "GPU":
# if not request.param["index_type"] not in ivf():
# pytest.skip("Only support index_type: idmap/ivf")
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("CPU not support index_type: ivf_sq8h")
# return request.param
#
# @pytest.mark.tags(CaseLabel.L2)
# def test_compact_after_index_created(self, connect, collection, get_simple_index):
# '''
# target: test compact collection after index created
# method: add entities, create index, delete part of entities and compact
# expected: status ok, index description no change, data size smaller after compact
# '''
# count = 10
# ids = connect.bulk_insert(collection, default_entities)
# connect.flush([collection])
# connect.create_index(collection, field_name, get_simple_index)
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# delete_ids = ids[:default_nb//2]
# status = connect.delete_entity_by_id(collection, delete_ids)
# assert status.OK()
# connect.flush([collection])
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before >= size_after)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_add_entity_and_compact_twice(self, connect, collection):
# '''
# target: test add entity and compact twice
# method: add entity and compact collection twice
# expected: status ok, data size no change
# '''
# ids = connect.bulk_insert(collection, default_entity)
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(collection)
# assert status.OK()
# connect.flush([collection])
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact twice
# info = connect.get_collection_stats(collection)
# size_after_twice = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_after == size_after_twice)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_part_and_compact_twice(self, connect, collection):
# '''
# target: test add entities, delete part of them and compact twice
# method: add entities, delete part and compact collection twice
# expected: status ok, data size smaller after first compact, no change after second
# '''
# ids = connect.bulk_insert(collection, default_entities)
# connect.flush([collection])
# delete_ids = [ids[0], ids[-1]]
# status = connect.delete_entity_by_id(collection, delete_ids)
# assert status.OK()
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# size_before = info["partitions"][0]["data_size"]
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# size_after = info["partitions"][0]["data_size"]
# assert(size_before >= size_after)
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact twice
# info = connect.get_collection_stats(collection)
# size_after_twice = info["partitions"][0]["data_size"]
# assert(size_after == size_after_twice)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_compact_multi_collections(self, connect):
# '''
# target: test compact works or not with multiple collections
# method: create 50 collections, add entities into them and compact in turn
# expected: status ok
# '''
# nb = 100
# num_collections = 20
# entities = gen_entities(nb)
# collection_list = []
# for i in range(num_collections):
# collection_name = gen_unique_str("test_compact_multi_collection_%d" % i)
# collection_list.append(collection_name)
# connect.create_collection(collection_name, default_fields)
# for i in range(num_collections):
# ids = connect.bulk_insert(collection_list[i], entities)
# connect.delete_entity_by_id(collection_list[i], ids[:nb//2])
# status = connect.compact(collection_list[i])
# assert status.OK()
# connect.drop_collection(collection_list[i])
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_add_entity_after_compact(self, connect, collection):
# '''
# target: test add entity after compact
# method: after compact operation, add entity
# expected: status ok, entity added
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# # get collection info before compact
# info = connect.get_collection_stats(collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
# ids = connect.bulk_insert(collection, default_entity)
# connect.flush([collection])
# res = connect.count_entities(collection)
# assert res == default_nb+1
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_index_creation_after_compact(self, connect, collection, get_simple_index):
# '''
# target: test index creation after compact
# method: after compact operation, create index
# expected: status ok, index description no change
# '''
# ids = connect.bulk_insert(collection, default_entities)
# connect.flush([collection])
# status = connect.delete_entity_by_id(collection, ids[:10])
# assert status.OK()
# connect.flush([collection])
# status = connect.compact(collection)
# assert status.OK()
# status = connect.create_index(collection, field_name, get_simple_index)
# assert status.OK()
# # status, result = connect.get_index_info(collection)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_delete_entities_after_compact(self, connect, collection):
# '''
# target: test delete entities after compact
# method: after compact operation, delete entities
# expected: status ok, entities deleted
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# status = connect.compact(collection)
# assert status.OK()
# connect.flush([collection])
# status = connect.delete_entity_by_id(collection, ids)
# assert status.OK()
# connect.flush([collection])
# assert connect.count_entities(collection) == 0
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_search_after_compact(self, connect, collection):
# '''
# target: test search after compact
# method: after compact operation, search vector
# expected: status ok
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# status = connect.compact(collection)
# assert status.OK()
# query = copy.deepcopy(default_single_query)
# query["bool"]["must"][0]["vector"][field_name]["query"] = [default_entity[-1]["values"][0],
# default_entities[-1]["values"][0],
# default_entities[-1]["values"][-1]]
# res = connect.search(collection, query)
# logging.getLogger().debug(res)
# assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"])
# assert res[0]._distances[0] > epsilon
# assert res[1]._distances[0] < epsilon
# assert res[2]._distances[0] < epsilon
#
#
# class TestCompactBinary:
# """
# ******************************************************************
# The following cases are used to test `compact` function
# ******************************************************************
# """
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_add_entity_and_compact(self, connect, binary_collection):
# '''
# target: test add binary vector and compact
# method: add vector and compact collection
# expected: status ok, vector added
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entity)
# assert len(ids) == 1
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_and_compact(self, connect, binary_collection):
# '''
# target: test add entities with binary vector and compact
# method: add entities and compact collection
# expected: status ok, entities added
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# assert len(ids) == default_nb
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_part_and_compact(self, connect, binary_collection):
# '''
# target: test add entities, delete part of them and compact
# method: add entities, delete a few and compact collection
# expected: status ok, data size is smaller after compact
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# assert len(ids) == default_nb
# connect.flush([binary_collection])
# delete_ids = [ids[0], ids[-1]]
# status = connect.delete_entity_by_id(binary_collection, delete_ids)
# assert status.OK()
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# logging.getLogger().info(info["partitions"])
# size_before = info["partitions"][0]["data_size"]
# logging.getLogger().info(size_before)
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# logging.getLogger().info(info["partitions"])
# size_after = info["partitions"][0]["data_size"]
# logging.getLogger().info(size_after)
# assert(size_before >= size_after)
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_all_and_compact(self, connect, binary_collection):
# '''
# target: test add entities, delete them and compact
# method: add entities, delete all and compact collection
# expected: status ok, no data size in collection info because collection is empty
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# assert len(ids) == default_nb
# connect.flush([binary_collection])
# status = connect.delete_entity_by_id(binary_collection, ids)
# assert status.OK()
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# assert status.OK()
# logging.getLogger().info(info["partitions"])
# assert not info["partitions"][0]["segments"]
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_add_entity_and_compact_twice(self, connect, binary_collection):
# '''
# target: test add entity and compact twice
# method: add entity and compact collection twice
# expected: status ok
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entity)
# assert len(ids) == 1
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact twice
# info = connect.get_collection_stats(binary_collection)
# size_after_twice = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_after == size_after_twice)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_insert_delete_part_and_compact_twice(self, connect, binary_collection):
# '''
# target: test add entities, delete part of them and compact twice
# method: add entities, delete part and compact collection twice
# expected: status ok, data size smaller after first compact, no change after second
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# assert len(ids) == default_nb
# connect.flush([binary_collection])
# delete_ids = [ids[0], ids[-1]]
# status = connect.delete_entity_by_id(binary_collection, delete_ids)
# assert status.OK()
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# size_before = info["partitions"][0]["data_size"]
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# size_after = info["partitions"][0]["data_size"]
# assert(size_before >= size_after)
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact twice
# info = connect.get_collection_stats(binary_collection)
# size_after_twice = info["partitions"][0]["data_size"]
# assert(size_after == size_after_twice)
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_compact_multi_collections(self, connect):
# '''
# target: test compact works or not with multiple collections
# method: create 10 collections, add entities into them and compact in turn
# expected: status ok
# '''
# nq = 100
# num_collections = 10
# tmp, entities = gen_binary_entities(nq)
# collection_list = []
# for i in range(num_collections):
# collection_name = gen_unique_str("test_compact_multi_collection_%d" % i)
# collection_list.append(collection_name)
# connect.create_collection(collection_name, default_binary_fields)
# for i in range(num_collections):
# ids = connect.bulk_insert(collection_list[i], entities)
# assert len(ids) == nq
# status = connect.delete_entity_by_id(collection_list[i], [ids[0], ids[-1]])
# assert status.OK()
# connect.flush([collection_list[i]])
# status = connect.compact(collection_list[i])
# assert status.OK()
# status = connect.drop_collection(collection_list[i])
# assert status.OK()
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_add_entity_after_compact(self, connect, binary_collection):
# '''
# target: test add entity after compact
# method: after compact operation, add entity
# expected: status ok, entity added
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# connect.flush([binary_collection])
# # get collection info before compact
# info = connect.get_collection_stats(binary_collection)
# size_before = info["partitions"][0]["segments"][0]["data_size"]
# status = connect.compact(binary_collection)
# assert status.OK()
# # get collection info after compact
# info = connect.get_collection_stats(binary_collection)
# size_after = info["partitions"][0]["segments"][0]["data_size"]
# assert(size_before == size_after)
# ids = connect.bulk_insert(binary_collection, default_binary_entity)
# connect.flush([binary_collection])
# res = connect.count_entities(binary_collection)
# assert res == default_nb + 1
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_delete_entities_after_compact(self, connect, binary_collection):
# '''
# target: test delete entities after compact
# method: after compact operation, delete entities
# expected: status ok, entities deleted
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# connect.flush([binary_collection])
# status = connect.compact(binary_collection)
# assert status.OK()
# connect.flush([binary_collection])
# status = connect.delete_entity_by_id(binary_collection, ids)
# assert status.OK()
# connect.flush([binary_collection])
# res = connect.count_entities(binary_collection)
# assert res == 0
#
# @pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_search_after_compact(self, connect, binary_collection):
# '''
# target: test search after compact
# method: after compact operation, search vector
# expected: status ok
# '''
# ids = connect.bulk_insert(binary_collection, default_binary_entities)
# assert len(ids) == default_nb
# connect.flush([binary_collection])
# status = connect.compact(binary_collection)
# assert status.OK()
# query_vecs = [default_raw_binary_vectors[0]]
# distance = jaccard(query_vecs[0], default_raw_binary_vectors[0])
# query = copy.deepcopy(default_binary_single_query)
# query["bool"]["must"][0]["vector"][binary_field_name]["query"] = [default_binary_entities[-1]["values"][0],
# default_binary_entities[-1]["values"][-1]]
#
# res = connect.search(binary_collection, query)
# assert abs(res[0]._distances[0]-distance) <= epsilon
#
# @pytest.mark.timeout(COMPACT_TIMEOUT)
# def test_search_after_compact_ip(self, connect, collection):
# '''
# target: test search after compact
# method: after compact operation, search vector
# expected: status ok
# '''
# ids = connect.bulk_insert(collection, default_entities)
# assert len(ids) == default_nb
# connect.flush([collection])
# status = connect.compact(collection)
# query = ip_query()
# query["bool"]["must"][0]["vector"][field_name]["query"] = [default_entity[-1]["values"][0],
# default_entities[-1]["values"][0],
# default_entities[-1]["values"][-1]]
# res = connect.search(collection, query)
# logging.getLogger().info(res)
# assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"])
# assert res[0]._distances[0] < 1 - epsilon
# assert res[1]._distances[0] > 1 - epsilon
# assert res[2]._distances[0] > 1 - epsilon
| StarcoderdataPython |
6572745 | from collections import deque
class Node:
def __init__(self,val):
self.data = val
self.left = None
self.right = None
class Solution:
def merge(self, result, array, reverse=False):
if reverse == False:
while len(array):
node = array.popleft()
result.append(node.data)
else:
while len(array):
node = array.pop()
result.append(node.data)
return result
def zigZagTraversal(self, root):
outerQueue = deque([root])
innerQueue = deque([])
level = 0
result = []
while len(outerQueue):
innerQueue = outerQueue.copy()
if level % 2 == 0:
result = self.merge(result, outerQueue)
else:
result = self.merge(result, outerQueue, True)
while len(innerQueue):
node = innerQueue.popleft()
if node.left:
outerQueue.append(node.left)
if node.right:
outerQueue.append(node.right)
level += 1
return result
| StarcoderdataPython |
8019584 | <gh_stars>1-10
# Generated by Django 3.0.4 on 2020-03-19 13:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business', '0014_auto_20200317_1403'),
]
operations = [
migrations.AddField(
model_name='category',
name='name_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='category',
name='name_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='category',
name='name_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='category',
name='name_it',
field=models.CharField(max_length=255, null=True),
),
]
| StarcoderdataPython |
6656069 | <gh_stars>0
import connect
from getpass import getpass
import mysql.connector
def test_transfer():
print('Enter information about the database with the Titles table, that you would like to check: ')
host = input("Host: ")
user = input("User: ")
password = getpass()
database = input("Database: ")
db = mysql.connector.connect(
host=host, user=user, passwd=password, database=database)
cursor = db.cursor()
sql_query = f"SELECT COUNT(*) FROM Titles"
cursor.execute(sql_query)
(number_of_rows,) = cursor.fetchone()
db.close()
assert number_of_rows == 443308
# def test_transfer():
# number_of_inserted_rows = connect.transfer()
# assert number_of_inserted_rows == 443308
| StarcoderdataPython |
12845152 | # coding=utf-8
from .misc import AbstractAttribTracer
from .transformer import AbstractDataTransformer
from .translator import AbstractTranslator
from .translator_hub import AbstractTranslatorsHub
| StarcoderdataPython |
1806253 | import click
from sceptre.context import SceptreContext
from sceptre.cli.helpers import catch_exceptions
from sceptre.plan.plan import SceptrePlan
from sceptre.cli.helpers import stack_status_exit_code
@click.command(
name="diff", short_help="Creates a diff between local and CloudFormation templates.")
@click.argument("path")
@click.pass_context
@catch_exceptions
def diff_command(ctx, path):
context = SceptreContext(
command_path=path,
project_path=ctx.obj.get("project_path"),
user_variables=ctx.obj.get("user_variables"),
options=ctx.obj.get("options"),
ignore_dependencies=ctx.obj.get("ignore_dependencies")
)
plan = SceptrePlan(context)
response = plan.diff()
exit(stack_status_exit_code(response.values()))
| StarcoderdataPython |
9759711 | <reponame>katarinabrdnik/analiza-podatkov
import requests
import os.path
import re
import orodja
STEVILO_STRANI = 125
STEVILO_ALBUMOV_NA_STRAN = 40
vzorec_bloka = re.compile(
r'<div id="pos\d\d?\d?\d?"'
r'.*?'
r'class="linkfire_container lazyload">',
flags=re.DOTALL
)
vzorec_albuma = re.compile(
r'<div class="topcharts_position">(?P<mesto>\d+)<span class="topcharts_position_desktop">.*?'
r'<div class="topcharts_item_title"><a href=".*?" '
r'class="release" title="\[Album(?P<id>\d+)\]">(?P<naslov>.*?)</a></div>.*?'
r'class="artist">(?P<izvajalec>.*?)</a></div>',
flags=re.DOTALL
)
vzorec_datuma_izdaje = re.compile(
r'<div class="topcharts_item_releasedate">(?P<datum_izdaje>.*?\d\d\d\d)\n'
)
vzorec_povprecna_ocena = re.compile(
r'<span class="topcharts_stat topcharts_avg_rating_stat">(?P<povprecna_ocena>\d\.\d\d)</span>'
)
vzorec_stevila_ocen = re.compile(
r'<span class="topcharts_stat topcharts_ratings_stat">(?P<stevilo_ocen>\d?\d?,?\d\d\d)</span>'
)
vzorec_stevila_kritik = re.compile(
r'<span class="topcharts_stat topcharts_reviews_stat">(?P<stevilo_kritik>\d?\d?,?\d?\d)</span>'
)
vzorec_zanrov = re.compile(
r'<a class="genre topcharts_item_genres" href="/genre/.*?/">(?P<zanr>.*?)</a>,?\s?</span>'
)
vzorec_sekundarnih_zanrov = re.compile(
r'<a class="genre topcharts_item_secondarygenres" href="/genre/.*?/">(?P<sekundarni_zanr>.*?)</a>,?\s?</span>'
)
vzorec_oznake = re.compile(
r'<span class="topcharts_item_descriptors">(?P<oznaka>),?\s?</span>'
)
headers = {'User-Agent': 'My User Agent 1.0'}
def ime_datoteke(st_strani):
return f"najpopularnejsi-albumi-{st_strani}.html"
#for st_strani in range(1, 126):
# if os.path.isfile("/analiza-podatkov/pobrani_html/najpopularnejsi-albumi-{st_strani}.html") == False:
# url = (
# f"https://rateyourmusic.com/charts/popular/album/all-time/exc:live,archival/{st_strani}/#results"
# )
# print(f"Zajemam {url}")
# response = requests.get(url, headers=headers)
# vsebina = response.text
# with open(ime_datoteke(st_strani), 'w') as datoteka:
# datoteka.write(vsebina)
najdeni_albumi = 0
#s to zanko sem si pomagala, ko sem preverila, če vzorec najde dovolj podatkov
for stran in range(1, STEVILO_STRANI + 1):
count = STEVILO_ALBUMOV_NA_STRAN
datoteka = f'najpopularnejsi-albumi/najpopularnejsi-albumi-{stran}.html'
vsebina = orodja.vsebina_datoteke(datoteka)
for zadetek in re.finditer(vzorec_albuma, vsebina):
najdeni_albumi += 1
#print(najdeni_albumi)
#print(najdeni_albumi)
#našlo je 5000 blokov, epsko!
def izloci_zanre(niz):
zanri = []
for zanr in vzorec_zanrov.finditer(niz):
zanri.append(zanr.groupdict()['zanr'])
return zanri
def izloci_sekundarne_zanre(niz):
sekundarni_zanri = []
for zanr in vzorec_sekundarnih_zanrov.finditer(niz):
sekundarni_zanri.append(zanr.groupdict()['sekundarni_zanr'])
return sekundarni_zanri
def izloci_oznake(niz):
oznake = []
for oznaka in vzorec_oznake.finditer(niz):
oznake.append(oznaka.groupdict()['oznaka'])
return oznake
def izloci_podatke_albuma(blok):
album = vzorec_albuma.search(blok).groupdict()
album['mesto'] = int(album['mesto'])
album['id'] = int(album['id'])
album['naslov'] = album['naslov']
album['izvajalec'] = album['izvajalec']
datum_izdaje = vzorec_datuma_izdaje.search(blok)
if datum_izdaje:
album['datum izdaje'] = datum_izdaje['datum_izdaje']
else:
None
povprecna_ocena = vzorec_povprecna_ocena.search(blok)
if povprecna_ocena:
album['povprecna ocena'] = povprecna_ocena['povprecna_ocena']
else:
album['povprecna ocena'] = None
stevilo_ocen = vzorec_stevila_ocen.search(blok)
album['stevilo ocen'] = stevilo_ocen['stevilo_ocen'].replace(',','') if stevilo_ocen else None
string_kritik = str(vzorec_stevila_kritik.search(blok)['stevilo_kritik'])
album['stevilo kritik'] = int(string_kritik.replace(',', ''))
zanri = izloci_zanre(blok)
if zanri != []:
album['zanri'] = ', '.join(zanri)
else:
album['zanri'] = None
sekundarni_zanri = izloci_sekundarne_zanre(blok)
if sekundarni_zanri != []:
album['sekundarni zanri'] = ', '.join(sekundarni_zanri)
else:
album['sekundarni zanri'] = None
oznake = izloci_oznake(blok)
if oznake != []:
album['oznake'] = ', '.join(oznake)
else:
album['oznake'] = None
return album
def albumi_na_strani(stran):
ime_dat = f'najpopularnejsi-albumi/najpopularnejsi-albumi-{stran}.html'
vsebina = orodja.vsebina_datoteke(ime_dat)
for blok in vzorec_bloka.finditer(vsebina):
yield izloci_podatke_albuma(blok.group(0)) #vrne celoten match
albumi = []
for stran in range(1, 126):
for album in albumi_na_strani(stran):
albumi.append(album)
albumi.sort(key=lambda album: album['mesto'])
orodja.zapisi_json(albumi, 'obdelani-podatki/albumi.json')
orodja.zapisi_csv(
albumi,
['mesto', 'id', 'naslov', 'izvajalec', 'datum izdaje', 'povprecna ocena', 'stevilo ocen',
'stevilo kritik', 'zanri', 'sekundarni zanri', 'oznake'],
'obdelani-podatki/albumi.csv'
) | StarcoderdataPython |
6470951 | import random
from flask import Flask, request
from datetime import datetime
from pymessenger import Bot
from NLP import wit_response
from tabulate import tabulate
import pandas
app = Flask("Schedule Bot")
ACCESS_TOKEN = ""
bot = Bot(ACCESS_TOKEN)
VERIFY_TOKEN = ""
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
times = ['08', '09', '10', '11', '12', '13', '14', '15']
@app.route('/', methods=['GET'])
def verify():
if request.args.get("hub.mode") == "subscribe" and request.args.get("hub.challenge"):
if not request.args.get("hub.verify_token") == VERIFY_TOKEN:
return "Verification token mismatch", 403
return request.args["hub.challenge"], 200
return "BUILD SUCCEEDED", 200
@app.route('/', methods=['POST'])
def webhook():
print(request.data)
data = request.get_json()
if data['object'] == "page":
entries = data['entry']
for entry in entries:
messaging = entry['messaging']
for messaging_event in messaging:
sender_id = messaging_event['sender']['id']
if messaging_event.get('message'):
message_text = messaging_event['message'].get('text')
if messaging_event['message'].get('attachments'):
response_sent_nontext = get_attachments()
send_message(sender_id, response_sent_nontext)
response = None
entity, value = wit_response(message_text)
if entity == 'developer':
response = "<NAME> created me :)"
if entity == 'S1':
response = "Cool! :D \n Enter time :)"
if entity == 'timetable':
df = pandas.read_csv('timetable.csv')
response = "Here is your time table :D\n\n" + tabulate(df, tablefmt="grid")
if entity == 'user_greetings':
response = "Welcome to Schedule Chatbot! :D\nPlease enter your section :)"
if entity == 'datetime':
dt = "{0}".format(str(value))
u = datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S.000+05:30')
v = u.strftime('%A %H:%M %Y-%m-%d').split()
index_of_day = days.index(v[0])
x = v[1][0:2]
if x in times:
index_of_time = times.index(x) + 1
df = pandas.read_csv('s1.csv')
response = "You have " + df.loc[index_of_day][index_of_time] + " :)"
else:
response = "You don't have any class at that time!"
if response == None:
response = "I have no idea what you are saying. I'm still learning :)"
bot.send_text_message(sender_id, response)
return "ok", 200
def get_attachments():
return "I've no idea what to do with it :("
def send_message(sender_id, response):
# sends user the text message provided via input response parameter
bot.send_text_message(sender_id, response)
return "success"
if __name__ == "__main__":
app.run(port=8000, use_reloader=True)
| StarcoderdataPython |
1854850 | #!/usr/bin/env python3
"""
This document is created by magic at 2018/8/17
"""
def binary_search(values, target):
"""
:param values:
:param target:
:return:
"""
left, right = 0, len(values) - 1
while left <= right:
mid = int((left + right) / 2)
if target < values[mid]:
right = mid - 1
elif target > values[mid]:
left = mid + 1
else:
return mid
return False
if __name__ == '__main__':
v = [1, 2, 3, 5, 7, 8]
assert binary_search(v, 2) == 1
assert binary_search(v, 5) == 3
assert binary_search(v, 4) is False
| StarcoderdataPython |
3581001 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_emarsys', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NewEventInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('event_name', models.CharField(max_length=1024)),
('recipient_email', models.CharField(max_length=1024)),
('context', jsonfield.fields.JSONField(null=True)),
('data', jsonfield.fields.JSONField()),
('when', models.DateTimeField(auto_now_add=True)),
('source', models.CharField(max_length=1024, choices=[('automatic', 'automatic'), ('manual', 'manual')])),
('result', models.CharField(max_length=1024, blank=True)),
('result_code', models.CharField(max_length=1024, blank=True)),
('state', models.CharField(default='sending', max_length=1024, choices=[('sending', 'sending'), ('error', 'error'), ('success', 'success')])),
('emarsys_id', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NewEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=1024)),
('emarsys_id', models.IntegerField()),
],
options={
'ordering': ['name'],
'permissions': [('can_trigger_event', 'Can trigger emarsys events.')],
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
1703874 | <gh_stars>0
# Copyright (c) 2019 <NAME>
# https://github.com/grzracz
# Files available under MIT license
import sys # (sys.argv)
import socket # (socket.gethostbyname())
import subprocess # for using ipconfig (subnet mask) and ping
# tries to find ip from domain
def viable_ip(domain_name):
viable = False
try:
socket.gethostbyname(domain_name)
viable = True
finally:
return viable
# converts ip address to decimal integer
def ip_to_int(ip):
ip = ip_dec_to_bin(ip)
result = 0
power = 31
for x in range(0, len(ip)):
if ip[x] == ".":
continue
else:
result += int(ip[x]) * 2 ** power
power -= 1
return result
# converts decimal ip to binary ip
def ip_dec_to_bin(ip):
bin_ip = ""
i1 = 0
for x in range(0, 3):
i2 = ip.find('.', i1)
substring = ip[i1:i2]
i1 = i2 + 1
bin_ip += '{0:08b}'.format(int(substring)) + "."
bin_ip += '{0:08b}'.format(int(ip[i1:]))
return bin_ip
# converts binary ip to decimal
def ip_bin_to_dec(ip):
dec_ip = ""
i1 = 0
for x in range(1, 4):
i2 = ip.find('.', i1)
substring = ip[i1:i2]
i1 = i2 + 1
num = 0
for y in range(0, 8):
num += int(substring[y]) * 2**(7 - y)
dec_ip += str(num) + '.'
num = 0
substring = ip[i1:]
for y in range(0, 8):
num += int(substring[y]) * 2**(7 - y)
dec_ip += str(num)
return dec_ip
# performs logical and on two ip addresses
def logical_and(ip1, ip2):
ip1 = ip_dec_to_bin(ip1)
ip2 = ip_dec_to_bin(ip2)
if len(ip1) != len(ip2):
sys.stderr.write("INPUT ERROR: Incorrect usage of logical_and function")
return 0
result = ""
for x in range(0, len(ip1)):
if ip1[x] == '.':
result += '.'
else:
if ip1[x] == '1' and ip2[x] == '1':
result += '1'
else:
result += '0'
return ip_bin_to_dec(result)
# performs logical or on two ip addresses
def logical_or(ip1, ip2):
ip1 = ip_dec_to_bin(ip1)
ip2 = ip_dec_to_bin(ip2)
if len(ip1) != len(ip2):
sys.stderr.write("INPUT ERROR: Incorrect usage of logical_or function")
return 0
result = ""
for x in range(0, len(ip1)):
if ip1[x] == '.':
result += '.'
else:
if ip1[x] == '0' and ip2[x] == '0':
result += '0'
else:
result += '1'
return ip_bin_to_dec(result)
# performs logical not on an ip address
def logical_not(ip):
ip = ip_dec_to_bin(ip)
result = ""
for x in range(0, len(ip)):
if ip[x] == '.':
result += '.'
else:
if ip[x] == '0':
result += '1'
else:
result += '0'
return ip_bin_to_dec(result)
# converts cidr to ip address
def cidr_to_ip(cidr):
if type(cidr) == str:
cidr = cidr.replace('/', '')
cidr = int(cidr)
result = ""
for x in range(1, 33):
if x > 1 and (x - 1) % 8 == 0:
result += "."
if cidr > 0:
result += '1'
cidr -= 1
else:
result += '0'
cidr -= 1
return ip_bin_to_dec(result)
# converts mask to CIDR and combines with ip
def get_address_from_system():
return get_ip_from_system() + "/" + str(ip_dec_to_bin(get_mask_from_system()).count('1')) # ip/cidr
# gets subnet mask from system
def get_mask_from_system():
ip = socket.gethostbyname((socket.gethostname()))
proc = subprocess.Popen('ipconfig', stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if str(ip).encode() in line:
break
mask = str(proc.stdout.readline()).rstrip().split(":")[-1].replace(' ', '') # extracting subnet mask
return mask[:-5] # removing \r and \n
# gets ip from system
def get_ip_from_system():
return socket.gethostbyname((socket.gethostname()))
# checks if ip is correct
def correct_ip_address(ip):
# Only numbers and dot/slash?
characters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '/']
for i in ip:
if i not in characters:
return False
# 4 modules?
if ip.count('.') != 3:
return False
for x in range(1, len(ip)):
if ip[x] == '.' and ip[x-1] == '.':
return False
# All modules in range 0-255?
i1 = 0
for x in range(0, 4):
i2 = ip.find('.', i1)
if i2 == -1:
i2 = ip.find('/', i1)
substring = ip[i1:i2]
i1 = i2 + 1
if substring == '':
return False
if int(substring) < 0 or int(substring) > 255:
return False
# Subnet mask in range 0-32?
if ip.find('/') < 0:
return False
substring = ip[ip.find('/') + 1:]
if substring == '':
return False
if int(substring) < 0 or int(substring) > 32:
return False
return True # if nothing returned false before
# returns network address
def network_address(address):
return logical_and(address[:address.find('/')], cidr_to_ip(address[address.find('/'):])) # logical_and(ip, mask)
# returns network class based on its first octave
def network_class(ip):
lead = ip[:ip.find('.')]
if int(lead) < 128:
return "A (very big)"
elif int(lead) < 192:
return "B (medium size)"
elif int(lead) < 224:
return "C (small)"
elif int(lead) < 240:
return "D (for group transmission)"
else:
return "E (reserved for IETF)"
# returns if address is public or private
def is_private(address):
ip = address[:address.find('/')]
if ip[:ip.find('.')] == "10":
return True
elif ip[:ip.find('.')] == "172":
octave2 = ip[ip.find('.') + 1: ip.find('.', ip.find('.') + 1)]
if 16 <= int(octave2) <= 31:
return True
elif ip[:ip.find('.', ip.find('.') + 1)] == "192.168":
return True
else:
return False
# returns network mask from address
def network_mask(address):
return cidr_to_ip(address[address.find('/'):])
# returns network broadcast address
def network_broadcast_address(address):
return logical_or(network_address(address), logical_not(network_mask(address)))
# returns first host address
def first_host_address(address):
ip = network_address(address)
first_host = ip[:ip.rfind('.') + 1] + str(int(ip[ip.rfind('.') + 1:]) + 1) # get last octave and increment
return first_host
# returns last host address
def last_host_address(address):
ip = network_broadcast_address(address)
last_host = ip[:ip.rfind('.') + 1] + str(int(ip[ip.rfind('.') + 1:]) - 1) # get last octave and decrement
return last_host
# returns max number of hosts
def max_host_number(address):
return ip_to_int(logical_and(network_broadcast_address(address), logical_not(network_mask(address)))) - 1
# pings ip address and prints/saves output
def ping(ip, file_name):
ping_process = subprocess.Popen(["ping", "-n", "5", ip], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
line = ping_process.stdout.readline()
file_name.write(line[:-1].decode())
print(line[:-2].decode())
if line == b'':
break
# main
if len(sys.argv) == 1:
print("No parameters given, using current computer address...")
_address = get_address_from_system()
elif len(sys.argv) == 2:
_address = sys.argv[1]
else:
_address = sys.argv[1]
sys.stderr.write("INPUT ERROR: Too many parameters, using the first one...\n")
if not correct_ip_address(_address):
sys.stderr.write("INPUT ERROR: Incorrect address, using current computer address...\n")
_address = get_address_from_system()
if not correct_ip_address(_address):
sys.exit("FATAL ERROR: Unable to get current computer address, quitting...")
else:
print("IP Address and Subnet Mask are correct.")
print("\nIP Address:", _address[:_address.find('/')] + ", binary:", ip_dec_to_bin(_address[:_address.find('/')]))
print("Subnet Mask (CIDR):", _address[_address.find('/'):])
print("Data:")
network_address_value = network_address(_address)
print("Network address:", network_address_value + ", binary:", ip_dec_to_bin(network_address_value))
print("Network class:", network_class(_address))
print("Network type:", "private" if is_private(_address) else "public")
network_mask_value = network_mask(_address)
print("Subnet Mask:", network_mask_value + ", binary:", ip_dec_to_bin(network_mask_value))
network_broadcast_address_value = network_broadcast_address(_address)
print("Broadcast address:", network_broadcast_address_value + ", binary:",
ip_dec_to_bin(network_broadcast_address_value))
first_host_address_value = first_host_address(_address)
print("First host address:", first_host_address_value + ", binary:", ip_dec_to_bin(first_host_address_value))
last_host_address_value = last_host_address(_address)
print("Last host address:", last_host_address_value + ", binary:", ip_dec_to_bin(last_host_address_value))
max_host_number_value = max_host_number(_address)
print("Max number of hosts:", max_host_number_value)
name = _address[:_address.find('/')] + "-" + _address[_address.find('/') + 1:] + "-info.txt"
file = open(name, 'w+')
print("\nSaving values to a text file (" + name + ")...")
file.write("IP Address: " + _address[:_address.find('/')] + " (" + ip_dec_to_bin(_address[:_address.find('/')]) + ")\n")
file.write("Subnet Mask (CIDR): " + _address[_address.find('/'):] + "\n\n")
file.write("Network address: " + network_address_value + " (" + ip_dec_to_bin(network_address_value) + ")\n")
file.write("Network class: " + network_class(_address) + '\n')
file.write("Network type: " + ("private" if is_private(_address) else "public") + '\n')
file.write("Subnet Mask: " + network_mask_value + " (" + ip_dec_to_bin(network_mask_value) + ")\n")
file.write("Broadcast address: " + network_broadcast_address_value + " (" +
ip_dec_to_bin(network_broadcast_address_value) + ")\n")
file.write("First host address: " + first_host_address_value + " (" + ip_dec_to_bin(first_host_address_value) + ")\n")
file.write("Last host address: " + last_host_address_value + " (" + ip_dec_to_bin(last_host_address_value) + ")\n")
file.write("Max number of hosts: " + str(max_host_number_value) + '\n')
ip_addr = _address[:_address.find('/')]
if ip_addr != network_address(_address) and ip_addr != network_broadcast_address(_address):
if is_private(_address):
if network_address(_address) != network_address(get_address_from_system()):
print("This host is in a different private network. Unable to ping.")
else:
print("This address is in your local network.")
user_input = input("Do you want to ping it? Y/N: ")
if user_input == 'Y' or user_input == 'y':
ping(ip_addr, file)
else:
print("This host is public.")
user_input = input("Do you want to ping it? Y/N: ")
if user_input == 'Y' or user_input == 'y':
ping(ip_addr, file)
for _x in range(1, len(sys.argv)):
if sys.argv[_x] == ip_addr:
continue
if viable_ip(sys.argv[_x]):
print("Parameter " + sys.argv[_x] + " is a pingable domain.")
user_input = input("Do you want to ping it? Y/N: ")
if user_input == 'Y' or user_input == 'y':
file.write("\nPinging " + sys.argv[_x] + ":")
ping(socket.gethostbyname(sys.argv[_x]), file)
file.close()
| StarcoderdataPython |
1635789 | """
Core implementation of :mod:`facet.simulation.partition`
"""
import logging
import math
import operator as op
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, Iterable, Optional, Sequence, Tuple, TypeVar
import numpy as np
import pandas as pd
from pytools.api import AllTracker, inheritdoc
from pytools.fit import FittableMixin
log = logging.getLogger(__name__)
__all__ = [
"Partitioner",
"RangePartitioner",
"ContinuousRangePartitioner",
"IntegerRangePartitioner",
"CategoryPartitioner",
]
#
# Type variables
#
T_Self = TypeVar("T_Self")
T_Values = TypeVar("T_Values")
T_Values_Numeric = TypeVar("T_Values_Numeric", int, float)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# Class definitions
#
class Partitioner(
FittableMixin[Iterable[T_Values]], Generic[T_Values], metaclass=ABCMeta
):
"""
Abstract base class of all partitioners.
"""
DEFAULT_MAX_PARTITIONS = 20
def __init__(self, max_partitions: Optional[int] = None) -> None:
"""
:param max_partitions: the maximum number of partitions to generate; must
be at least 2 (default: {DEFAULT_MAX_PARTITIONS})
"""
if max_partitions is None:
self._max_partitions = Partitioner.DEFAULT_MAX_PARTITIONS
elif max_partitions < 2:
raise ValueError(f"arg max_partitions={max_partitions} must be at least 2")
else:
self._max_partitions = max_partitions
__init__.__doc__ = __init__.__doc__.replace(
"{DEFAULT_MAX_PARTITIONS}", repr(DEFAULT_MAX_PARTITIONS)
)
@property
def max_partitions(self) -> int:
"""
The maximum number of partitions to be generated by this partitioner.
"""
return self._max_partitions
@property
@abstractmethod
def partitions_(self) -> Sequence[T_Values]:
"""
Return central values of the partitions.
Requires that this partitioner has been fitted with a set of observed values.
:return: a sequence of central values for each partition
"""
@property
@abstractmethod
def frequencies_(self) -> Sequence[int]:
"""
Return the count of observed elements in each partition.
:return: a sequence of value counts for each partition
"""
@property
@abstractmethod
def is_categorical(self) -> bool:
"""
``True`` if this is partitioner handles categorical values, ``False`` otherwise.
"""
@abstractmethod
def fit(self: T_Self, values: Iterable[T_Values], **fit_params: Any) -> T_Self:
"""
Calculate the partitioning for the given observed values.
:param values: a sequence of observed values as the empirical basis for
calculating the partitions
:param fit_params: optional fitting parameters
:return: ``self``
"""
@inheritdoc(match="[see superclass]")
class RangePartitioner(
Partitioner[T_Values_Numeric], Generic[T_Values_Numeric], metaclass=ABCMeta
):
"""
Abstract base class for numerical partitioners.
"""
def __init__(
self,
max_partitions: int = None,
lower_bound: Optional[T_Values_Numeric] = None,
upper_bound: Optional[T_Values_Numeric] = None,
) -> None:
"""
:param max_partitions: the maximum number of partitions to make
(default: 20); should be at least 2
:param lower_bound: the lower bound of the elements in the partition
:param upper_bound: the upper bound of the elements in the partition
"""
super().__init__(max_partitions)
if (
lower_bound is not None
and upper_bound is not None
and lower_bound > upper_bound
):
raise ValueError(
f"arg lower_bound > arg upper_bound: [{lower_bound}, {upper_bound})"
)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._step: Optional[T_Values_Numeric] = None
self._frequencies: Optional[Sequence[int]] = None
self._partitions: Optional[Sequence[T_Values_Numeric]] = None
self._partition_bounds: Optional[
Sequence[Tuple[T_Values_Numeric, T_Values_Numeric]]
] = None
@property
def lower_bound(self) -> T_Values_Numeric:
"""
The lower bound of the partitioning.
``Null`` if no explicit lower bound is set.
"""
return self._lower_bound
@property
def upper_bound(self) -> T_Values_Numeric:
"""
The upper bound of the partitioning.
``Null`` if no explicit upper bound is set.
"""
return self._upper_bound
@property
def is_categorical(self) -> bool:
"""
``False``
"""
return False
@property
def partitions_(self) -> Sequence[T_Values_Numeric]:
"""[see superclass]"""
self._ensure_fitted()
return self._partitions
@property
def partition_bounds_(self) -> Sequence[Tuple[T_Values_Numeric, T_Values_Numeric]]:
"""
Return the endpoints of the intervals that delineate each partitions.
:return: sequence of tuples (x, y) for every partition, where x is the
inclusive lower bound of a partition range, and y is the exclusive upper
bound of a partition range
"""
self._ensure_fitted()
return self._partition_bounds
@property
def partition_width_(self) -> T_Values_Numeric:
"""
The width of each partition.
"""
self._ensure_fitted()
return self._step
@property
def frequencies_(self) -> Sequence[int]:
"""[see superclass]"""
self._ensure_fitted()
return self._frequencies
# noinspection PyMissingOrEmptyDocstring
def fit(
self: T_Self,
values: Iterable[T_Values],
**fit_params: Any,
) -> T_Self:
"""[see superclass]"""
self: RangePartitioner # support type hinting in PyCharm
# ensure arg values is an array
if not isinstance(values, np.ndarray):
if isinstance(values, pd.Series):
values = values.values
else:
if not isinstance(values, Sequence):
try:
values = iter(values)
except TypeError:
raise TypeError("arg values must be iterable")
values = np.array(values)
lower_bound = self._lower_bound
upper_bound = self._upper_bound
if lower_bound is None or upper_bound is None:
q3q1 = np.nanquantile(values, q=[0.75, 0.25])
inlier_range = op.sub(*q3q1) * 1.5 # iqr * 1.5
if lower_bound is None:
lower_bound = values[values >= q3q1[1] - inlier_range].min()
if upper_bound is None:
upper_bound = values[values <= q3q1[0] + inlier_range].max()
assert upper_bound >= lower_bound
# calculate the step count based on the maximum number of partitions,
# rounded to the next-largest rounded value ending in 1, 2, or 5
self._step = step = self._step_size(lower_bound, upper_bound)
# calculate centre values of the first and last partition;
# both are rounded to multiples of the step size
first_partition = math.floor((lower_bound + step / 2) / step) * step
last_partition = math.ceil((upper_bound - step / 2) / step) * step
n_partitions = int(round((last_partition - first_partition) / self._step)) + 1
self._partitions = partitions = np.round(
first_partition + np.arange(n_partitions) * self._step,
# round to the nearest power of 10 of the step variable
int(-np.floor(np.log10(self._step))),
).tolist()
center_offset_left = self._partition_center_offset
center_offset_right = self._step - center_offset_left
self._partition_bounds = [
(center - center_offset_left, center + center_offset_right)
for center in partitions
]
# calculate the number of elements in each partitions
# create the bins, starting with the lower bound of the first partition
partition_bins = (first_partition - step / 2) + np.arange(
n_partitions + 1
) * step
partition_indices = np.digitize(values, bins=partition_bins)
# frequency counts will include left and right outliers, hence n_partitions + 2
# and we exclude the first and last element of the result
frequencies = np.bincount(partition_indices, minlength=n_partitions + 2)[1:-1]
self._frequencies = frequencies
return self
@property
def is_fitted(self) -> bool:
"""[see superclass]"""
return self._frequencies is not None
@staticmethod
def _ceil_step(step: float):
"""
Round the step size (arbitrary float) to a human-readable number like 0.5, 1, 2.
:param step: the step size to round by
:return: the nearest greater or equal step size in the series
(..., 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, ...)
"""
if step <= 0:
raise ValueError("arg step must be positive")
return min(10 ** math.ceil(math.log10(step * m)) / m for m in [1, 2, 5])
@staticmethod
@abstractmethod
def _step_size(
lower_bound: T_Values_Numeric, upper_bound: T_Values_Numeric
) -> T_Values_Numeric:
# Compute the step size (interval length) used in the partitions
pass
@property
@abstractmethod
def _partition_center_offset(self) -> T_Values_Numeric:
# Offset between center and endpoints of an interval
pass
class ContinuousRangePartitioner(RangePartitioner[float]):
"""
Partition numerical values in adjacent intervals of the same length.
The range of intervals and interval size is computed based on attributes
:attr:`.max_partitions`, :attr:`.lower_bound`, and :attr:`.upper_bound`.
Partition boundaries and interval sized are chosen with interpretability in mind and
are always a power of 10, or a multiple of 2 or 5 of a power of 10, e.g.
0.1, 0.2, 0.5, 1.0, 2.0, 5.0, and so on.
The intervals also satisfy the following conditions:
- :attr:`lower_bound` is within the first interval
- :attr:`upper_bound` is within the last interval
For example, with :attr:`.max_partitions` = 10, :attr:`.lower_bound` = 3.3, and
:attr:`.upper_bound` = 4.7, the resulting partitioning would be:
[3.2, 3.4), [3.4, 3.6), [3.6, 3.8), [4.0, 4.2), [4.4, 4.6), [4.6, 4.8]
"""
def _step_size(self, lower_bound: float, upper_bound: float) -> float:
return RangePartitioner._ceil_step(
(upper_bound - lower_bound) / (self.max_partitions - 1)
)
@property
def _partition_center_offset(self) -> float:
return self._step / 2
class IntegerRangePartitioner(RangePartitioner[int]):
"""
Partition integer values in adjacent intervals of the same length.
The range of intervals and interval size is computed based on attributes
:attr:`.max_partitions`, :attr:`.lower_bound`, and :attr:`.upper_bound`.
Partition boundaries and interval sized are chosen with interpretability in mind and
are always an integer and a power of 10, or a multiple of 2 or 5 of a power of 10,
e.g. 1, 2, 5, 10, 20, 50, and so on.
The intervals also satisfy the following conditions:
- :attr:`lower_bound` is within the first interval
- :attr:`upper_bound` is within the last interval
For example, with :attr:`.max_partitions` = 5, :attr:`.lower_bound` = 3, and
:attr:`.upper_bound` = 11, the resulting partitioning would be:
[2, 4), [4, 6), [6, 8), [8, 10), [10, 12)
"""
def _step_size(self, lower_bound: int, upper_bound: int) -> int:
return max(
1,
int(
RangePartitioner._ceil_step(
(upper_bound - lower_bound) / (self.max_partitions - 1)
)
),
)
@property
def _partition_center_offset(self) -> int:
return self._step // 2
@inheritdoc(match="[see superclass]")
class CategoryPartitioner(Partitioner[T_Values]):
"""
Partition categorical values.
Create one partition each per unique value, considering only the
:attr:`.max_partitions` most frequent values.
"""
def __init__(self, max_partitions: Optional[int] = None) -> None:
"""[see superclass]"""
super().__init__(max_partitions=max_partitions)
self._frequencies = None
self._partitions = None
@property
def is_fitted(self) -> bool:
"""[see superclass]"""
return self._frequencies is not None
@property
def is_categorical(self) -> bool:
"""
``True``
"""
return True
@property
def partitions_(self) -> Sequence[T_Values]:
"""[see superclass]"""
self._ensure_fitted()
return self._partitions
@property
def frequencies_(self) -> Sequence[int]:
"""[see superclass]"""
self._ensure_fitted()
return self._frequencies
# noinspection PyMissingOrEmptyDocstring
def fit(self: T_Self, values: Sequence[T_Values], **fit_params: Any) -> T_Self:
"""[see superclass]"""
self: CategoryPartitioner # support type hinting in PyCharm
if not isinstance(values, pd.Series):
if not (isinstance(values, np.ndarray) or isinstance(values, Sequence)):
try:
values = iter(values)
except TypeError:
raise TypeError("arg values must be iterable")
values = pd.Series(data=values)
value_counts = values.value_counts(ascending=False)
max_partitions = self.max_partitions
self._partitions = value_counts.index.values[:max_partitions]
self._frequencies = value_counts.values[:max_partitions]
return self
__tracker.validate()
| StarcoderdataPython |
1888192 | from configya import YAMLConfig
from cosmogrb.utils.package_utils import get_path_of_user_dir
structure = {}
structure["logging"] = dict(level="INFO")
structure["multiprocess"] = dict(n_grb_workers=6, n_universe_workers=6)
structure["gbm"] = {}
structure["gbm"]["orbit"] = dict(default_time=0, use_random_time=True)
class CosmogrbConfig(YAMLConfig):
def __init__(self):
super(CosmogrbConfig, self).__init__(
structure=structure,
config_path=get_path_of_user_dir(),
config_name="cosmogrb_config.yml",
)
cosmogrb_config = CosmogrbConfig()
__all__ = ["cosmogrb_config"]
| StarcoderdataPython |
3467757 | """
Helper to get nameservers information and resolving domains.
"""
import dns
import dns.message
import dns.rdataclass
import dns.rdatatype
import dns.query
import dns.resolver
class DNSClient:
def __init__(self, nameservers=None, port=53):
self.nameservers = nameservers or ["8.8.8.8", "8.8.4.4"]
if "localhost" in self.nameservers:
nameservers.pop(nameservers.index("localhost"))
nameservers.append("127.0.0.1")
self.resolver = dns.resolver.Resolver(configure=False)
self.resolver.nameservers = self.nameservers
self.resolver.port = port
def get_nameservers(self, domain="threefoldtoken.org"):
answer = self.resolver.query(domain, "NS")
res = []
for rr in answer:
res.append(rr.target.to_text())
return res
def get_namerecords(self, url="www.threefoldtoken.org"):
"""
return ip addr for a full name
"""
answer = self.resolver.query(url, "A")
res = []
for rr in answer:
res.append(rr.address)
return res
def is_free(self, domain, domain_type="A"):
try:
self.query(domain, domain_type)
except:
return True
return False
def query(self, *args, **kwargs):
return self.resolver.query(*args, **kwargs)
def export_module_as():
return DNSClient()
| StarcoderdataPython |
4876251 | <filename>matdolbook/board/models.py
from django.db import models
from matdolbook.users import models as user_models
class TimeStampModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
abstract = True
class Content(TimeStampModel):
#file = models.ImageField(null= True)
text= models.TextField()
creator = models.ForeignKey(user_models.User, on_delete = models.CASCADE, related_name = 'contents')
def __str__(self):
lambda_text = lambda n : n[:25] + '...' if len(n)>25 else n
return "{} - by {}".format(lambda_text(self.text) , self.creator.username)
@property
def comment_count(self):
return self.comments.all().count()
@property
def like_count(self):
return self.likes.all().count()
class Meta:
ordering = ['-created_at']
class Book(models.Model):
title = models.CharField(max_length = 30)
author = models.CharField(max_length = 30)
#content = models.ForeignKey(ContentsToBook , on_delete =models.CASCADE, related_name='contents')
@property
def interest_count(self):
return self.interests.all().count()
@property
def content_count(self):
return self.contentsAboutbook.all().count()
def __str__(self):
return "title - {} , author - {}".format(self.title, self.author)
class ContentToBook(TimeStampModel):
text = models.TextField()
creator = models.ForeignKey(user_models.User, on_delete = models.CASCADE, related_name = 'contentsTobook')
bookinfo = models.ForeignKey(Book, on_delete = models.CASCADE, related_name = 'contentsAboutbook')
def __str__(self):
lambda_text = lambda n : n[:25] + '...' if len(n)>25 else n
return "{} - by {} BOOK: {}".format(lambda_text(self.text) , self.creator.username, self.bookinfo)
@property
def comment_count(self):
return self.commentsBook.all().count()
@property
def like_count(self):
return self.likesBook.all().count()
class Meta:
ordering = ['-created_at']
class Comment(TimeStampModel):
message = models.TextField()
creator = models.ForeignKey(user_models.User, on_delete = models.CASCADE)
content = models.ForeignKey(Content, on_delete= models.CASCADE, related_name= 'comments', null =True , blank= True)
contentsToBook =models.ForeignKey(ContentToBook, on_delete= models.CASCADE, related_name = 'commentsBook', null = True, blank= True)
#book = models.ForeignKey(Book, on_delete = models.CASCADE, related_name ='comments')
def __str__(self):
return self.message
@property
def like_count(self):
return self.likes.all().count()
class Meta:
ordering = ['-created_at']
#공감
class LikeToContent(models.Model):
creator = models.ForeignKey(user_models.User, on_delete = models.CASCADE, related_name= 'my_list')
content = models.ForeignKey(Content, on_delete= models.CASCADE, related_name= 'likes', null =True, blank= True)
contentsToBook =models.ForeignKey(ContentToBook, on_delete= models.CASCADE, related_name = 'likesBook', null= True, blank =True)
def __str__(self):
return "Creator - {} Content - {}".format(self.creator.username, )
class LikeToComment(models.Model):
creator = models.ForeignKey(user_models.User, on_delete = models.CASCADE)
comment = models.ForeignKey(Comment, on_delete = models.CASCADE , related_name ='likes')
def __str__(self):
return "Creator - {} , Content - {}".format(self.creator.username, self.comment.message)
#담기
class AddCart(TimeStampModel):
creator = models.ForeignKey(user_models.User, on_delete = models.CASCADE)
content = models.ForeignKey(ContentToBook, on_delete = models.CASCADE ,related_name= 'addcarts')
def __str__(self):
return "Creator - {} , Content - {}".format(self.creator.username, self.content.text)
class Meta:
ordering = ['-created_at']
#관심책
class InterestToBook(models.Model):
creator = models.ForeignKey(user_models.User ,on_delete = models.CASCADE)
book = models.ForeignKey(Book, on_delete= models.CASCADE, related_name= 'interests')
def __str__(self):
return "{} interests {}".format(self.creator, self.book) | StarcoderdataPython |
1837004 | <reponame>velikaBeba/khinsider-downloader
#!/bin/python3
from bs4 import BeautifulSoup
import concurrent.futures
import requests
import argparse
import os
import shutil
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('link', help='link to the album on khinsider')
parser.add_argument('-d', '--delete',
dest='delete_dir', action='store_true',
help='delete album folder if it already exists')
parser.add_argument('-m', '--mp3',
dest='mp3', action='store_true',
help='downloads mp3s (default is flac)')
parser.add_argument('-e', '--enumerate',
dest='ordered', action='store_true',
help='number all the songs (format "## - name")')
parser.add_argument('-o', '--output-dir',
dest='output_dir', default='./',
help='directory to output to')
args = parser.parse_args()
# get main page
album_page = BeautifulSoup(requests.get(args.link).content, features="lxml")
# get album name and make folder
album_name = album_page.find('p', {'align' : 'left'}).find('b').text
try: os.mkdir(args.output_dir + album_name)
except FileExistsError:
if args.delete_dir: choice = 'y'
else: choice = input("Folder {} already exists. Delete it? (y/n): "\
.format(album_name))
if choice == 'y':
shutil.rmtree(args.output_dir + album_name)
os.mkdir(args.output_dir + album_name)
else: exit()
os.chdir(args.output_dir + album_name)
# download album cover
cover = album_page.find_all(
'a', {"target" : "_blank"}, href=True)[-1].get('href')
with open('cover.jpg', 'wb') as out:
out.write(requests.get(cover).content)
# get links
website = 'https://downloads.khinsider.com'
links = [website + link.find('a', href=True).get('href')\
for link in album_page\
.find_all('td', {'class' : 'playlistDownloadSong'})]
names = ["{:02} - ".format(i + 1) if args.ordered else ""\
for i in range(len(links))]
# check flac availability
header = album_page.find('tr', {'id' : 'songlist_header'}).text
audio_format = 1
file_extension = '.flac'
if args.mp3 or 'FLAC' not in header:
audio_format = 0
file_extension = '.mp3'
def download(name, link):
page = BeautifulSoup(requests.get(link).content, features="lxml")
link = page.find_all('a', {"style" : "color: #21363f;"},href=True)\
[audio_format].get('href')
name += page.find_all('p', {"align" : "left"})[-1]\
.text\
.splitlines()[-1]\
.split(": ", 1)[-1]\
+ file_extension
with open(name, 'wb') as output_file:
data = requests.get(link).content
output_file.write(data)
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download, names, links)
| StarcoderdataPython |
1761009 | <reponame>brunocorbetta/exerciciocursoemvideo
from random import randint
from time import sleep
def sorteia(list):
print('Sorteando 5 valores da lista ', end='')
for c in range(0, 5):
n = randint(0, 100)
list.append(n)
sleep(0.5)
print(f'{n}', end=' ')
print('Pronto')
def somapar(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'Somando os valores pares da {lista}, temos {soma}')
numeros = []
sorteia(numeros)
somapar(numeros)
| StarcoderdataPython |
4886268 | <filename>save_sim/save_df_old.py<gh_stars>0
#!/bin/sh /cvmfs/icecube.opensciencegrid.org/py2-v1/icetray-start
#METAPROJECT /data/user/jbourbeau/metaprojects/icerec/V05-00-00/build
from __future__ import division
import numpy as np
import pandas as pd
import time
import glob
import argparse
import os
from collections import defaultdict
import composition.support_functions.simfunctions as simfunctions
import composition.support_functions.paths as paths
from composition.support_functions.checkdir import checkdir
# from ShowerLLH_scripts.analysis.zfix import zfix
if __name__ == "__main__":
# Setup global path names
mypaths = paths.Paths()
checkdir(mypaths.comp_data_dir)
p = argparse.ArgumentParser(
description='Runs extra modules over a given fileList')
p.add_argument('-o', '--outfile', dest='outfile',
help='Output file')
args = p.parse_args()
dataframe_dict = defaultdict(list)
# Get simulation information
t_sim = time.time()
print('Loading simulation information...')
file_list = sorted(glob.glob(mypaths.comp_data_dir +
'/IT73_sim/files/sim_????.hdf5'))
value_keys = ['IceTopMaxSignal',
'IceTopMaxSignalInEdge',
'IceTopMaxSignalString',
'IceTopNeighbourMaxSignal',
'InIce_charge',
'NChannels',
'max_charge_frac',
'NStations',
'StationDensity',
'IceTop_FractionContainment',
'InIce_FractionContainment',
'LineFit_InIce_FractionContainment']
for f in file_list:
print('\tWorking on {}'.format(f))
sim_dict = {}
store = pd.HDFStore(f)
for key in value_keys:
sim_dict[key] = store.select(key).value
# Get MCPrimary information
for key in ['x', 'y', 'energy', 'zenith', 'azimuth', 'type']:
sim_dict['MC_{}'.format(key)] = store.select('MCPrimary')[key]
# Get s125
sim_dict['s125'] = store.select('LaputopParams')['s125']
# Get ShowerPlane zenith reconstruction
sim_dict['ShowerPlane_zenith'] = store.select('ShowerPlane').zenith
# Add simulation set number and corresponding composition
sim_num = os.path.splitext(f)[0].split('_')[-1]
sim_dict['sim'] = np.array([sim_num] * len(store.select('MCPrimary')))
sim_dict['MC_comp'] = np.array(
[simfunctions.sim2comp(sim_num)] * len(store.select('MCPrimary')))
store.close()
for key in sim_dict.keys():
dataframe_dict[key] += sim_dict[key].tolist()
print('Time taken: {}'.format(time.time() - t_sim))
print('Time per file: {}\n'.format((time.time() - t_sim) / 4))
# Get ShowerLLH reconstruction information
t_LLH = time.time()
print('Loading ShowerLLH reconstructions...')
file_list = sorted(glob.glob(mypaths.llh_dir +
'/IT73_sim/files/SimLLH_????_logdist.hdf5'))
for f in file_list:
print('\tWorking on {}'.format(f))
LLH_dict = {}
store = pd.HDFStore(f)
# Get most-likely composition
proton_maxLLH = store.select('ShowerLLHParams_proton').maxLLH
iron_maxLLH = store.select('ShowerLLHParams_iron').maxLLH
LLH_array = np.array([proton_maxLLH, iron_maxLLH]).T
maxLLH_index = np.argmax(LLH_array, axis=1)
showerLLH_proton = store.select('ShowerLLH_proton')
showerLLH_iron = store.select('ShowerLLH_iron')
LLH_dict['reco_exists'] = showerLLH_proton.exists.astype(bool)
# Get ML energy
energy_choices = [showerLLH_proton.energy.values, showerLLH_iron.energy.values]
LLH_dict['reco_energy'] = np.choose(maxLLH_index, energy_choices)
# Get ML core position
x_choices = [showerLLH_proton.x, showerLLH_iron.x]
LLH_dict['reco_x'] = np.choose(maxLLH_index, x_choices)
y_choices = [showerLLH_proton.y, showerLLH_iron.y]
LLH_dict['reco_y'] = np.choose(maxLLH_index, y_choices)
# Get ML core radius
r_choices = [np.sqrt(showerLLH_proton.x**2 + showerLLH_proton.y**2),
np.sqrt(showerLLH_iron.x**2 + showerLLH_iron.y**2)]
LLH_dict['reco_radius'] = np.choose(maxLLH_index, r_choices)
# Get ML zenith
zenith_choices = [showerLLH_proton.zenith, showerLLH_iron.zenith]
LLH_dict['reco_zenith'] = np.choose(maxLLH_index, zenith_choices)
# Get ShowerLLH containment information
IT_containment_choices = [store.select('ShowerLLH_IceTop_containment_proton').value,
store.select('ShowerLLH_IceTop_containment_iron').value]
LLH_dict['reco_IT_containment'] = np.choose(
maxLLH_index, IT_containment_choices)
InIce_containment_choices = [store.select('ShowerLLH_InIce_containment_proton').value,
store.select('ShowerLLH_InIce_containment_iron').value]
LLH_dict['reco_InIce_containment'] = np.choose(
maxLLH_index, InIce_containment_choices)
# Get ShowerLLH+lap hybrid containment information
IT_containment_choices = [store.select('LLH-lap_IceTop_containment_proton').value,
store.select('LLH-lap_IceTop_containment_iron').value]
LLH_dict['reco_IT_containment'] = np.choose(
maxLLH_index, IT_containment_choices)
InIce_containment_choices = [store.select('LLH-lap_InIce_containment_proton').value,
store.select('LLH-lap_InIce_containment_iron').value]
LLH_dict['reco_InIce_containment'] = np.choose(
maxLLH_index, InIce_containment_choices)
# LLH_dict['reco_energy'] = 10**(np.log10(LLH_dict['reco_energy'])-zfix(np.pi-LLH_dict['reco_zenith']))
store.close()
for key in LLH_dict.keys():
dataframe_dict[key] += LLH_dict[key].tolist()
print('Time taken: {}'.format(time.time() - t_LLH))
print('Time per file: {}'.format((time.time() - t_LLH) / 4))
# Convert value lists to arrays (faster than using np.append?)
for key in dataframe_dict.keys():
dataframe_dict[key] = np.asarray(dataframe_dict[key])
df = pd.DataFrame.from_dict(dataframe_dict)
df.to_hdf('{}/IT73_sim/sim_dataframe.hdf5'.format(mypaths.comp_data_dir),
'dataframe', mode='w')
| StarcoderdataPython |
12827955 | from unittest import mock
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.tracking import MlflowClient
from mlflow.tracking.default_experiment.databricks_notebook_experiment_provider import (
DatabricksNotebookExperimentProvider,
DatabricksRepoNotebookExperimentProvider,
)
from mlflow.utils.mlflow_tags import MLFLOW_EXPERIMENT_SOURCE_TYPE, MLFLOW_EXPERIMENT_SOURCE_ID
def test_databricks_notebook_default_experiment_in_context():
with mock.patch("mlflow.utils.databricks_utils.is_in_databricks_notebook") as in_notebook_mock:
assert DatabricksNotebookExperimentProvider().in_context() == in_notebook_mock.return_value
def test_databricks_notebook_default_experiment_id():
with mock.patch("mlflow.utils.databricks_utils.get_notebook_id") as patch_notebook_id:
assert (
DatabricksNotebookExperimentProvider().get_experiment_id()
== patch_notebook_id.return_value
)
def test_databricks_repo_notebook_default_experiment_in_context():
with mock.patch(
"mlflow.utils.databricks_utils.is_in_databricks_repo_notebook"
) as in_repo_notebook_mock:
in_repo_notebook_mock.return_value = True
assert DatabricksRepoNotebookExperimentProvider().in_context()
with mock.patch(
"mlflow.utils.databricks_utils.is_in_databricks_repo_notebook"
) as not_in_repo_notebook_mock:
not_in_repo_notebook_mock.return_value = False
assert not DatabricksRepoNotebookExperimentProvider().in_context()
def test_databricks_repo_notebook_default_experiment_gets_id_by_request():
with mock.patch(
"mlflow.utils.databricks_utils.get_notebook_id"
) as notebook_id_mock, mock.patch(
"mlflow.utils.databricks_utils.get_notebook_path"
) as notebook_path_mock, mock.patch.object(
MlflowClient, "create_experiment"
) as create_experiment_mock:
notebook_id_mock.return_value = 1234
notebook_path_mock.return_value = "/Repos/path"
create_experiment_mock.return_value = "experiment_id"
returned_id = DatabricksRepoNotebookExperimentProvider().get_experiment_id()
assert returned_id == "experiment_id"
tags = {MLFLOW_EXPERIMENT_SOURCE_TYPE: "REPO_NOTEBOOK", MLFLOW_EXPERIMENT_SOURCE_ID: 1234}
create_experiment_mock.assert_called_once_with("/Repos/path", None, tags)
def test_databricks_repo_notebook_default_experiment_uses_fallback_notebook_id():
with mock.patch(
"mlflow.utils.databricks_utils.get_notebook_id"
) as notebook_id_mock, mock.patch(
"mlflow.utils.databricks_utils.get_notebook_path"
) as notebook_path_mock, mock.patch.object(
MlflowClient, "create_experiment"
) as create_experiment_mock:
DatabricksRepoNotebookExperimentProvider._resolved_repo_notebook_experiment_id = None
notebook_id_mock.return_value = 1234
notebook_path_mock.return_value = "/Repos/path"
create_experiment_mock.side_effect = MlflowException(
message="not enabled", error_code=INVALID_PARAMETER_VALUE
)
returned_id = DatabricksRepoNotebookExperimentProvider().get_experiment_id()
assert returned_id == 1234
| StarcoderdataPython |
1859448 | from bidding.models import Bid
class BiddingInteractions:
def __init__(self, **repositories):
self._database_repository = repositories["database_repository"]
self._pubsub_repository = repositories["pubsub_repository"]
def create_bid(self, data):
bid = Bid(
item_id=data["item_id"],
user_id=data["user_id"],
price=data["price"],
status=data["status"],
bid_accepted=data["bid_accepted"],
price_accepted=data["price_accepted"],
)
bid = self._database_repository.create_bid(bid).to_dict()
if bid["bid_accepted"]:
self._pubsub_repository.push(
self._pubsub_repository.OFFER_ACCEPTED_TOPIC,
bid,
)
return bid
def get_bid(self, data):
return self._database_repository.get_bid(int(data)).to_dict()
def update_bid(self, id, data):
price = data["price"]
status = data["status"]
bid_accepted = data["bid_accepted"]
price_accepted = data["price_accepted"]
bid = self._database_repository.update_bid(
int(id),
price,
status,
bid_accepted,
price_accepted,
).to_dict()
if bid["bid_accepted"]:
self._pubsub_repository.push(
self._pubsub_repository.OFFER_ACCEPTED_TOPIC,
bid,
)
return bid
def delete_bid(self, d_id):
return self._database_repository.delete_bid(int(d_id))
| StarcoderdataPython |
1853184 | <filename>c3lingo/admin.py
from django.contrib import admin
from .models import Language, Conference, Room, Talk, Translation, Translator, TranslatorSpeaks, Booth, Shift, ShiftAssignment
admin.site.register(Language)
admin.site.register(Conference)
admin.site.register(Room)
admin.site.register(Talk)
admin.site.register(Translation)
admin.site.register(Translator)
admin.site.register(TranslatorSpeaks)
admin.site.register(Booth)
admin.site.register(Shift)
admin.site.register(ShiftAssignment)
| StarcoderdataPython |
6534708 | <reponame>nodeus/radioboss-telegram-bot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Телеграм бот для связи c RadioBoss
# работает на hyperadio.retroscene.org -> @hyperadio_bot
from __future__ import print_function, unicode_literals
import logging
import os
import sys
import requests
import xmltodict
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import sqlite3
import datetime
from sqlite3 import Error
import configtb
__version__ = '0.0.1' # не забываем ставиь версию
TOKEN = configtb.token # токен нашего бота
URL = configtb.URL # URL к API телеграма
#PROXY_URL = 'socks5://172.16.31.10:1080' # здесь можно поставить свой прокси
RB_PASS = configtb.rbPas # пароль к API RadioBoss
RB_PORT = configtb.rbPort # порт RadioBoss
ALBUM_ART_PATH = 'INSERT-HERE-PATH-TO-ALBUM-ARTWORK-FILE' # Example 'd:\\MYRADIO\\ALBUMART\\artwork.png' путь до файла-картинки, которую выгружает RadioBoss (Albumart)
######################## текст сообщений бота ##############################
TEXT_HELP = """
Send me some commands:
/np — Get info about current playing track
/like — Add current track to playlist on request
/plus — Raise current track rating
/minus — Drop current track rating
/dl — Download current track
/dln — Download track by number in current playlist
Example: «/dln 1» or «/dln 25 100»
/art — Download album art for current track
/last — Get info about 5 last played tracks
/time — Get timetable
/help — This help
The delay for commands processing can be up to 10 seconds, so be patient, please. Do not spam me!
Also I can convert some chiptunes, so upload it to me ;)
"""
# стартовое сообщение
TEXT_START = """
Hi! I am a RadioBoss bot from github.com/nodeus/radioboss-telegram-bot/ (ver {:s})
{:s}
""".format(__version__, TEXT_HELP)
# текст расписания
TEXT_TIMETABLE = """
We broadcast 24 hours a day with some special music blocks:
08.00 - 08.30 msk XM tracked music
09.00 - 10.00 msk BitJam podcast
10.00 - 10.30 msk ZX Spectrum music
15.00 - 16.00 msk DEMOVIBES
17.00 - 17.30 msk ZX Spectrum music
18.00 - 18.30 msk XM tracked music
20.00 - 20.30 msk ZX Spectrum music
21.00 - 23.00 msk Music on your request
"""
# шаблон сообщения "сейчас иргает"
NOWPLAYNG_TPL = """
github.com/nodeus/radioboss-telegram-bot/
Now playing: {t_casttitle!s}
Duration: {t_duration!s}. Play position: {mins!s} min {secs!s} sec
Next track: {nt_artist!s} — {nt_title!s} ({nt_duration!s})
Last played: {nt_lastplayed!s}
Current listeners: {t_listeners!s}
"""
# шаблон сообщения "запрос трека"
TRACK_REQUEST_TPL = """
\U00002764 Thanks {user_name}.
Track «{t_casttitle}» added to playlist on request.
Listen to this track from 21 to 23 msk this evening.
"""
# шаблон сообщения "инфо по треку"
TRACK_INFO_TPL = """
Time (msk+2): {@LASTPLAYED}
Track: {@ARTIST} - {@TITLE} - {@ALBUM}
Playlist item №{playlist_pos}
"""
# шаблон сообщения "рейтингование"
RATE_TEXT_TPL = """
\U0001F44D Thanks {user_name}.
You {rate_str} the rating for «{t_casttitle}» track.
Current rating: {tag_rating} \U00002197
"""
# Enable logging
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename='mylog.log')
logging.root.addHandler(logging.StreamHandler(sys.stdout))
logger = logging.getLogger(__name__)
def radio_query(**kwargs):
"""функция соединения с RadioBoss"""
# команда к API RadioBoss
params = dict(kwargs)
params['pass'] = RB_PASS
response = requests.get('http://hyperadio.ru:' + RB_PORT + '/', params=params)
logger.info('Request to radioboss API — %s: %s', kwargs.get('action'), response.status_code)
return response
def get_username(update, context):
"""функция получения имени пользователя"""
user_name = update.message.from_user['username']
if user_name == None:
user_name = update.message.from_user['first_name'] + ' ' + update.message.from_user['last_name']
return user_name
def get_np():
"""функция отправки запроса на получение информации от RadioBoss — action playbackinfo возвращает словарь nowpl"""
# команда к API RadioBoss
r = radio_query(action='playbackinfo')
info = xmltodict.parse(r.content)['Info']
cur_track = info['CurrentTrack']['TRACK']
next_track = info['NextTrack']['TRACK']
prev_track = info['PrevTrack']['TRACK']
playback = info['Playback']
streaming = info['Streaming']
return {
't_artist': cur_track['@ARTIST'],
't_title': cur_track['@TITLE'],
't_album': cur_track['@ALBUM'],
't_year': cur_track['@YEAR'],
't_genre': cur_track['@GENRE'],
't_comment': cur_track['@COMMENT'],
't_filename': cur_track['@FILENAME'],
't_duration': cur_track['@DURATION'],
't_playcount': cur_track['@PLAYCOUNT'],
't_lastplayed': cur_track['@LASTPLAYED'],
't_intro': cur_track['@INTRO'],
't_outro': cur_track['@OUTRO'],
't_language': cur_track['@LANGUAGE'],
't_f1': cur_track['@F1'],
't_f2': cur_track['@F2'],
't_f3': cur_track['@F3'],
't_f4': cur_track['@F4'],
't_f5': cur_track['@F5'],
't_casttitle': cur_track['@ITEMTITLE'],
't_listeners': cur_track['@LISTENERS'],
'pt_artist': prev_track['@ARTIST'],
'pt_title': prev_track['@TITLE'],
'pt_album': prev_track['@ALBUM'],
'pt_year': prev_track['@YEAR'],
'pt_genre': prev_track['@GENRE'],
'pt_comment': prev_track['@COMMENT'],
'pt_filename': prev_track['@FILENAME'],
'pt_duration': prev_track['@DURATION'],
'pt_playcount': prev_track['@PLAYCOUNT'],
'pt_lastplayed': prev_track['@LASTPLAYED'],
'pt_intro': prev_track['@INTRO'],
'pt_outro': prev_track['@OUTRO'],
'pt_language': prev_track['@LANGUAGE'],
'pt_f1': prev_track['@F1'],
'pt_f2': prev_track['@F2'],
'pt_f3': prev_track['@F3'],
'pt_f4': prev_track['@F4'],
'pt_f5': prev_track['@F5'],
'pt_casttitle': prev_track['@ITEMTITLE'],
'nt_artist': next_track['@ARTIST'],
'nt_title': next_track['@TITLE'],
'nt_album': next_track['@ALBUM'],
'nt_year': next_track['@YEAR'],
'nt_genre': next_track['@GENRE'],
'nt_comment': next_track['@COMMENT'],
'nt_filename': next_track['@FILENAME'],
'nt_duration': next_track['@DURATION'],
'nt_playcount': next_track['@PLAYCOUNT'],
'nt_lastplayed': next_track['@LASTPLAYED'],
'nt_intro': next_track['@INTRO'],
'nt_outro': next_track['@OUTRO'],
'nt_language': next_track['@LANGUAGE'],
'nt_f1': next_track['@F1'],
'nt_f2': next_track['@F2'],
'nt_f3': next_track['@F3'],
'nt_f4': next_track['@F4'],
'nt_f5': next_track['@F5'],
'nt_casttitle': next_track['@ITEMTITLE'],
'play_pos': playback['@pos'],
'play_len': playback['@len'],
'play_state': playback['@state'],
'playlist_pos': playback['@playlistpos'],
'play_streams': playback['@streams'],
'listeners': streaming['@listeners']
}
def nowplay_string(nowpl):
"""создаём строку ответа для запроса /np и возвращаем её"""
secs = int(nowpl['play_pos']) // 1000 # считаем минуты / секунды
mins = secs // 60
secs = secs - mins * 60
return NOWPLAYNG_TPL.format(mins=mins, secs=secs, **nowpl)
def request_song(user_name):
"""функция добавления трека в плейлист заказа"""
nowpl = get_np()
radio_query(action='songrequest', filename=nowpl['t_filename'], message=user_name)
return None
def start(update, context):
"""отправляем сообщение приветствия когда команда /start запрошена"""
update.message.reply_text(TEXT_START)
user_name = get_username(update, context)
logger.info('--- %s start interaction with bot ---', user_name)
def helpme(update, context):
"""отправляем сообщение помощи когда команда /help запрошена"""
update.message.reply_text(TEXT_HELP)
user_name = get_username(update, context)
logger.info('%s request help', user_name)
def dl_track(update, context):
"""отправляем текущий трек когда команда /dl запрошена"""
# TODO сделать проверку на уже отправленные файлы в телеграм и отдавать ссылкой на telegram-id файла, если уже были закачаны
# нужна база отправленных файлов
nowpl = get_np()
title = str(nowpl['t_casttitle'])
filename = nowpl['t_filename']
context.bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.UPLOAD_DOCUMENT)
context.bot.send_audio(timeout=120, caption=title, chat_id=update.message.chat_id, audio=open(filename, 'rb'))
user_name = get_username(update, context)
logger.info('%s download %s', user_name, filename)
def dl_number(update, context):
"""отправляем трек из базы по запрошенному номеру с текущего плейлиста"""
# TODO сделать проверку на уже отправленные файлы в телеграм и отдавать ссылкой на telegram-id файла, если уже были закачаны
# нужна база отправленных файлов
user_name = get_username(update, context)
if not context.args:
update.message.reply_text('Please, type track numbers after command.\nExample: «/dln 1 2 3»')
logger.info('%s use /dln command without args.', user_name)
else:
for track_number in context.args:
track_number.strip(", ")
if track_number.isdigit():
response = radio_query(action='trackinfo', pos=track_number)
try:
trinfo = xmltodict.parse(response.content)
track = trinfo['Info']['Track']['TRACK']
file_name = track['@FILENAME']
track_title = track['@ARTIST'] + ' — ' + track['@TITLE']
context.bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.UPLOAD_DOCUMENT)
context.bot.send_document(timeout=120, filename=file_name, caption=track_title,
chat_id=update.message.chat_id, document=open(file_name, 'rb'))
logger.info('%s download track №%s file: %s', user_name, track_number, file_name)
except Exception as e:
logger.info('Wrong track number %s', track_number, '\n', file_name)
update.message.reply_text('Wrong track number {!s}. Please try again.'.format(track_number))
else:
update.message.reply_text(track_number + '%s — isn`t number of track i know...')
logger.info('%s type wrong track number — %s', user_name, track_number)
def dl_art(update, context):
"""отправляем обложку трека/альбома когда команда /art запрошена"""
user_name = get_username(update, context)
nowpl = get_np()
if os.path.exists(ALBUM_ART_PATH):
context.bot.send_photo(chat_id=update.message.chat_id, photo=open(ALBUM_ART_PATH, 'rb'))
logger.info('%s download %s album art.', user_name, nowpl['t_filename'])
else:
update.message.reply_text('Sorry, no album art for this track.')
logger.info('%s request %s album art, but it is not found.', user_name, nowpl['t_filename'])
def np(update, context):
"""отправляем nowplay с сервера RadioBoss в телеграм"""
nowpl = get_np()
update.message.reply_text(nowplay_string(nowpl))
user_name = get_username(update, context)
logger.info('%s request Nowplay for %s', user_name, nowpl['t_casttitle'])
def like(update, context):
"""отправляем like на сервер radioboss и сообщение в телеграм"""
user_name = get_username(update, context)
nowpl = get_np()
request_song(user_name)
update.message.reply_text(TRACK_REQUEST_TPL.format(user_name=user_name, **nowpl))
logger.info('%s liked %s', user_name, nowpl['t_casttitle'])
def timetable(update, context):
"""отправляем расписание в телеграм"""
update.message.reply_text(TEXT_TIMETABLE)
user_name = get_username(update, context)
logger.info('%s request timetable', user_name)
def last(update, context):
"""отправляем информацию по 5 последним проигранным трекам"""
user_name = get_username(update, context)
nowpl = get_np()
infopos = int(nowpl['playlist_pos'])
for x in range(0, min(infopos, 5)):
response = radio_query(action='trackinfo', pos=str(infopos - x))
trinfo = xmltodict.parse(response.content)
track_info = trinfo['Info']['Track']['TRACK']
update.message.reply_text(TRACK_INFO_TPL.format(playlist_pos=infopos - x, **track_info))
logger.info('%s request last played', user_name)
update.message.reply_text('Nowplay: ' + nowpl['t_casttitle'] + '\nPlaylist item №: ' + nowpl['playlist_pos'])
def error(update, context):
"""логгируем ошибки и отправляем сообщение в телеграм, если что-то пошло не так"""
logger.warning('Update "%s" caused error "%s"', update, context.error)
update.message.reply_text('Ooops, something went wrong. Sorry...')
# соединение с бд sqlite
def sql_connection():
try:
con = sqlite3.connect('rating.db')
print ("Connection is established")
logger.info('Connection is established')
except Error:
print(Error)
logger.info(Error)
finally:
con.close()
logger.info('connection is closed')
def sql_insert(con, entities):
"""добавляем в таблицу id пользователя, имя пользователя, название трека, дату голосования"""
cursor = con.cursor()
cursor.execute('INSERT INTO rating (userid, username, ratedtrack, ratedate) VALUES(?,?,?,?)', entities)
con.commit()
def sql_fetch(con,user_id,rated_track):
"""возвращаем дату голосования по имени пользователя и названию трека"""
cursor = con.cursor()
cursor.execute('SELECT ratedate FROM rating WHERE userid = :uid AND ratedtrack = :rtrack', {'uid': user_id, 'rtrack': rated_track})
row = cursor.fetchone()
return row
def change_rating(update, context):
"""изменение рейтинга трека"""
try:
# запрос информации от hyperadio сервера
nowpl = get_np()
# имя пользователя запроса
user_name = get_username(update, context)
# id пользователя запроса
user_id = update.message.from_user['id']
# текущая дата
rate_date = datetime.date.today()
tagxml = radio_query(action='readtag', fn=nowpl['t_filename'])
tagdoc = xmltodict.parse(tagxml.content)
file = tagdoc['TagInfo']['File']
taginfo = {'tag_filename': file['@FN'],
'tag_duration': file['@Duration'],
'tag_artist': file['@Artist'],
'tag_title': file['@Title'],
'tag_album': file['@Album'],
'tag_year': file['@Year'],
'tag_genre': file['@Genre'],
'tag_comment': file['@Comment'],
'tag_bpm': file['@BPM'],
'tag_rating': file['@Rating'],
'tag_playcount': file['@Playcount'],
'tag_lastplayed': file['@LastPlayed']}
# полный путь запрошенного файла
rated_track = taginfo['tag_filename']
# рейтинг запрошенного файла
rating = int(taginfo['tag_rating'])
if context.direction == 1 and rating == 10:
update.message.reply_text('This track has the highest rating — 10.')
return
elif context.direction == -1 and rating == 0:
update.message.reply_text('This track has the lowest rating — 0.')
return
# подключаемся к базе
con = sqlite3.connect('rating.db')
sql_connection()
# запрос из базы, если совпадение с текущим id пользователя и именем файла
# получаем None — нет совпадений, или дату — есть совпадение
get_date = sql_fetch(con,user_id,rated_track)
if get_date is None:
rating = max(min(rating + context.direction, 10), 0)
taginfo['tag_rating'] = str(rating)
rate_str = 'increased' if context.direction == 1 else 'dropped'
update.message.reply_text(RATE_TEXT_TPL.format(user_name=user_name, rate_str=rate_str, tag_rating=rating, **nowpl))
file['@Rating'] = str(rating)
newxml = xmltodict.unparse(tagdoc)
radio_query(action='writetag', fn=taginfo['tag_filename'], data=newxml)
logger.info('%s %s the rating for %s — %s to %s', user_name, rate_str, taginfo['tag_artist'], taginfo['tag_title'], rating)
# данные для записи в базу
entities = (user_id, user_name, rated_track, rate_date)
# пишем в базу
sql_insert(con,entities)
return
else:
update.message.reply_text('Sorry, you can not vote for this track twice...\nRating for «' + taginfo['tag_artist'] + ' – ' + taginfo['tag_title'] + '» has been changed by you at: ' + get_date[0])
logger.info('%s tried to voting twice for %s – %s.', user_name, taginfo['tag_artist'], taginfo['tag_title'] )
return
except Exception as e:
logger.exception(e)
def ratingplus(update, context):
"""добавление 1 к рейтингу текущего трека"""
context.direction = 1
return change_rating(update, context)
def ratingminus(update, context):
"""вычитание 1 из рейтинга текущего трека"""
context.direction = -1
return change_rating(update, context)
def main():
"""запуск бота"""
# раскомментировать если используется прокси
#if PROXY_URL:
# request_kwargs = {'proxy_url': PROXY_URL}
#else:
request_kwargs = {}
updater = Updater(configtb.token, use_context=True)
dp = updater.dispatcher
# команды, обрабатываемые ботом
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", helpme))
dp.add_handler(CommandHandler("like", like))
dp.add_handler(CommandHandler("plus", ratingplus))
dp.add_handler(CommandHandler("minus", ratingminus))
dp.add_handler(CommandHandler("np", np))
dp.add_handler(CommandHandler("dl", dl_track))
dp.add_handler(CommandHandler("dln", dl_number, pass_args=True))
dp.add_handler(CommandHandler("art", dl_art))
dp.add_handler(CommandHandler("last", last))
dp.add_handler(CommandHandler("time", timetable))
# логгирование ошибок
dp.add_error_handler(error)
# старт бота
updater.start_polling(poll_interval=2.0, timeout=10000)
updater.idle()
if __name__ == '__main__':
main()
| StarcoderdataPython |
9632544 | from datetime import timedelta
import t
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import logging
from gunicorn.config import Config
from gunicorn.instrument.statsd import Statsd
class TestException(Exception): pass
class MockSocket(object):
"Pretend to be a UDP socket"
def __init__(self, failp):
self.failp = failp
self.msgs = [] # accumulate messages for later inspection
def send(self, msg):
if self.failp:
raise TestException("Should not interrupt the logger")
self.msgs.append(msg)
def reset(self):
self.msgs = []
class MockResponse(object):
def __init__(self, status):
self.status = status
def test_statsd_fail():
"UDP socket fails"
logger = Statsd(Config())
logger.sock = MockSocket(True)
logger.info("No impact on logging")
logger.debug("No impact on logging")
logger.critical("No impact on logging")
logger.error("No impact on logging")
logger.warning("No impact on logging")
logger.exception("No impact on logging")
def test_instrument():
logger = Statsd(Config())
# Capture logged messages
sio = StringIO()
logger.error_log.addHandler(logging.StreamHandler(sio))
logger.sock = MockSocket(False)
# Regular message
logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666})
t.eq(logger.sock.msgs[0], "gunicorn.test:666|g")
t.eq(sio.getvalue(), "Blah\n")
logger.sock.reset()
# Only metrics, no logging
logger.info("", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666})
t.eq(logger.sock.msgs[0], "gunicorn.test:666|g")
t.eq(sio.getvalue(), "Blah\n") # log is unchanged
logger.sock.reset()
# Debug logging also supports metrics
logger.debug("", extra={"mtype": "gauge", "metric": "gunicorn.debug", "value": 667})
t.eq(logger.sock.msgs[0], "gunicorn.debug:667|g")
t.eq(sio.getvalue(), "Blah\n") # log is unchanged
logger.sock.reset()
logger.critical("Boom")
t.eq(logger.sock.msgs[0], "gunicorn.log.critical:1|c|@1.0")
logger.sock.reset()
logger.access(MockResponse("200 OK"), None, {}, timedelta(seconds=7))
t.eq(logger.sock.msgs[0], "gunicorn.request.duration:7000.0|ms")
t.eq(logger.sock.msgs[1], "gunicorn.requests:1|c|@1.0")
t.eq(logger.sock.msgs[2], "gunicorn.request.status.200:1|c|@1.0")
def test_prefix():
c = Config()
c.set("statsd_prefix", "test.")
logger = Statsd(c)
logger.sock = MockSocket(False)
logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666})
t.eq(logger.sock.msgs[0], "test.gunicorn.test:666|g")
def test_prefix_no_dot():
c = Config()
c.set("statsd_prefix", "test")
logger = Statsd(c)
logger.sock = MockSocket(False)
logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666})
t.eq(logger.sock.msgs[0], "test.gunicorn.test:666|g")
def test_prefix_multiple_dots():
c = Config()
c.set("statsd_prefix", "test...")
logger = Statsd(c)
logger.sock = MockSocket(False)
logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666})
t.eq(logger.sock.msgs[0], "test.gunicorn.test:666|g")
def test_prefix_nested():
c = Config()
c.set("statsd_prefix", "test.asdf.")
logger = Statsd(c)
logger.sock = MockSocket(False)
logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666})
t.eq(logger.sock.msgs[0], "test.asdf.gunicorn.test:666|g")
| StarcoderdataPython |
1684977 | """Challenge is to create a basic calculator that will add any two numbers together incl decimals"""
num1 = input("Enter your first number: ")
num2 = input("Enter your second number: ")
result = float(num1) + float(num2)
print(result)
| StarcoderdataPython |
6604639 | <reponame>SkanderGar/QuantMacro
import numpy as np
from scipy.stats import norm
from numpy import vectorize
@vectorize
def U1(C, C_):
if C <= 0:
U = -np.inf
else:
U = -(1/2)*(C-C_)**2
return U
@vectorize
def U2(C, S):
if C <= 0:
U = -np.inf
else:
U = (C**(1-S) -1)/(1-S)
return U
class agent1:
def __init__(self, N_a, Mu_y = 1, sig_y=0.5, gamma_y=0.7, T=45, N_s=2, order = 7, delta = 0.015, theta=0.68, rho = 0.06, Sig = 5, C_ = 1 , U2 = 1, B=0):
self.theta = theta
self.delta = delta
self.order = order
self.T = T
self.gamma_y = gamma_y
self.sig_y = sig_y
self.beta = 1/(1+rho)
self.Sig = Sig
self.C_ = C_
self.U2 = 1
self.N_s = N_s
self.N_a = N_a
self.B = B
self.Tr, self.Y_grid_s = self.markov_Tr(self.N_s, Mu_y = Mu_y, Sig_y = self.sig_y, gamma = self.gamma_y)
self.Tr_l = self.Tr[:,0]
self.Tr_l = np.tile(self.Tr_l, (N_a,1))
self.Tr_h = self.Tr[:,1]
self.Tr_h = np.tile(self.Tr_h, (N_a,1))
func = []
Phi1 = np.vectorize(lambda x: 1)
Phi2 = np.vectorize(lambda x: x)
func.append(Phi1)
func.append(Phi2)
if self.order>= 2:
for i in range(2,self.order):
f = np.vectorize(lambda x, n=i: 2*func[n-1](x)*x - func[n-2](x))
func.append(f)
self.func = func
def markov_Tr(self, N_s, Mu_y = 1, Sig_y = 0.5, gamma=0.7, m=1):
rho = gamma
Sig_eps = Sig_y*((1 -rho**2)**(1/2))
max_y = Mu_y + m*Sig_y
min_y = Mu_y - m*Sig_y
Y_grid = np.linspace(min_y, max_y, N_s)
Mu = Mu_y*(1-rho)
w = np.abs(max_y-min_y)/(N_s-1)
Tr = np.zeros((N_s,N_s))
if Sig_y == 0:
Tr = np.eye(N_s)
else:
for i in range(N_s):
for j in range(1,N_s-1):
Tr[i,j] = norm.cdf((Y_grid[j] - Mu -rho*Y_grid[i] + w/2)/Sig_eps ) - norm.cdf((Y_grid[j] - Mu -rho*Y_grid[i]-w/2)/Sig_eps )
Tr[i,0] = norm.cdf((Y_grid[0] - Mu -rho*Y_grid[i]+w/2)/Sig_eps )
Tr[i,N_s-1] = 1 - norm.cdf((Y_grid[N_s-1] - Mu -rho*Y_grid[i]-w/2)/Sig_eps)
return Tr, Y_grid
def select_node(self, num, grid):
n = len(grid)
element = (n-1)/(num-1)
values = []
for i in range(num):
index = int(np.ceil(element*i))
value = grid[index]
values.append(value)
return values
def cheby_interp(self, x, f_x, nodes=20):
cheb_x = self.select_node(nodes, x)
cheb_f_x = self.select_node(nodes, f_x)
max_x = max(cheb_x)
min_x = min(cheb_x)
PHI = []
for i in range(len(self.func)):
phi = self.func[i](2*(cheb_x-min_x)/(max_x-min_x) - 1)
PHI.append(phi)
PHI = np.array(PHI).T
theta = np.linalg.inv(PHI.T@PHI)@PHI.T@cheb_f_x
return theta
def T_endo(self, ga_all):#ga_all need to contain both low and high state
n,c = ga_all.shape
Tr = self.Tr
PHI = []
PI = []
o = np.zeros((c,c))
O = np.zeros((n,n))
PI = np.zeros((c*n,c*n))
One = np.ones((n,n))
Mat = []
for i in range(c):
for j in range(c):
mat = o.copy()
mat[i,j]=1
Mat.append(mat)
pi = np.kron(mat,One*Tr[i,j])
PI = PI+pi
PI = PI.T
PHI = []
for i in range(c):
pos = ga_all[:,i]
phi = O.copy()
for j in range(n):
phi[j,pos[j]]=1
PHI.append(phi)
Endo = np.zeros((c*n,c*n))
k = 0
for j in range(c): # because it needs to be PHI0.T twice then PHI1.T
for i in range(c):
endo = np.kron(Mat[k],PHI[j].T)
Endo = Endo + endo
k=k+1
Tendo = PI*Endo
return Tendo
def Inv_dist(self, ga_all, Tol=10**(-3)):
Tendo = self.T_endo(ga_all)
Pold = np.ones(len(Tendo))/len(Tendo)
err = 1
while err>Tol:
Pnew = Tendo@Pold
err = np.linalg.norm(Pnew-Pold)/np.linalg.norm(Pold)
Pold = Pnew
return Pold
def update_chi(self, C, V):
Vl = V[:,0]
Vh = V[:,1]
E_Vl = self.Tr_l[:,0]*Vl + self.Tr_l[:,1]*Vh
E_Vh = self.Tr_h[:,0]*Vl + self.Tr_h[:,1]*Vh
E_V = np.vstack((E_Vl, E_Vh))
# V is a matrix
if self.U2 == 1:
Chi = U2(C, self.Sig) + self.beta*np.tile(E_V, (len(self.grid_a),1))
else:
Chi = U1(C, self.Sig) + self.beta*np.tile(E_V, (len(self.grid_a),1))
return Chi
def update_V(self, Vold, C, ret = 0):
Chi = self.update_chi(C, Vold)
argm_pos = np.argmax(Chi, axis=1)
V_new = []
ga = []
gc = []
for i, idx in enumerate(list(argm_pos)):
v = Chi[i,idx]
g1 = self.mesh_ap[i,idx]
g2 = C[i,idx]
V_new.append(v)
ga.append(g1)
gc.append(g2)
V_new = np.array(V_new)
V_new = np.reshape(V_new, (len(self.grid_a),len(self.Y_grid_s)))
ga = np.array(ga)
ga = np.reshape(ga, (len(self.grid_a),len(self.Y_grid_s)))
gc = np.array(gc)
gc = np.reshape(gc, (len(self.grid_a),len(self.Y_grid_s)))
if ret == 1:
pos_resh = np.reshape(argm_pos,(len(self.grid_a),len(self.Y_grid_s)))
return V_new, ga, gc, pos_resh
elif ret == 0:
return V_new
def problem(self, start = None, Tol = 10**(-6), ret2 = 0):
if start == None:
V_start = np.zeros((len(self.grid_a), len(self.Y_grid_s)))
else:
V_start = start
err = 1
j = 0
while err>Tol:
V_new = self.update_V(V_start, self.C)
err = np.max(np.abs(V_start - V_new))
V_start = V_new
if j%100==0:
print(' iteration value', j)
print(' error value', err)
j = j+1
V_new, ga, gc, pos = self.update_V(V_start, self.C, ret = 1)
if ret2 == 0:
return pos, ga
else:
return V_new, ga, gc
def Interest_update(self, num_r = 10, r_min=0.001, r_max=0.05, maxiter=20, Tol = 0.01, pas = 0.2): #r_min can't be 0 because of the lower bound
#r_grid = np.linspace(r_min, r_max, num_r)
#Old_pos = np.ceil(num_r/2)
r_up = r_max
r_down = r_min
r_old = (r_up+r_down)/2
### do something like when sign changes stop
#Comp = 0
j = 0
while True:
if j >maxiter:
print('############# Warning ! ################')
print('##### Maximum number of iterations #####')
print('############# Warning ! ################')
V_new, ga, gc = self.problem(ret2=1)
break
######## when I redefine r I need to also redefine the variables that are
## dependent
r = r_old
self.r = r
max_a = self.Y_grid_s[-1]/self.r
if self.B==0:
min_a = -(self.Y_grid_s[0]/self.r)*0.98
else:
min_a = 0
self.K_d = ((1-self.theta)/self.r)**(1/self.theta)#because inelastic supply of L_s
self.w = self.theta*(self.K_d)**(1-self.theta)
self.grid_a = np.linspace(min_a, max_a, self.N_a)
self.Y_grid = np.tile(self.Y_grid_s, (len(self.grid_a),1)).T
O = np.ones((len(self.Y_grid_s),len(self.grid_a)))
self.grid_a.shape = len(self.grid_a),1
self.mesh_a = np.kron(self.grid_a,O)
self.mesh_Y = np.tile(self.Y_grid, (len(self.grid_a),1))
self.grid_a.shape = len(self.grid_a),
self.mesh_ap = np.tile(self.grid_a, (len(self.mesh_Y),1))
self.C = self.mesh_a*(1+self.r-self.delta) + self.w*self.mesh_Y - self.mesh_ap
#########
argm_pos, ga = self.problem()
dist = self.Inv_dist(argm_pos)
n, c = argm_pos.shape###
ga_endo = np.reshape(ga.T,(n*c,))##after checking reshape I decided to transpose
Excess = dist@ga_endo
Excess = Excess - self.K_d # for market clearing
if np.abs(Excess)<Tol:
V_new, ga, gc = self.problem(ret2=1)
break
if Excess>=0:
r_new = pas*r_old + (1-pas)*r_down
r_up = r_old
elif Excess<0:
r_new = pas*r_old + (1-pas)*r_up
r_down = r_old
r_old = r_new
print('iteration:',j)
print('Excess:',Excess)
print('pos:',self.r)
j = j+1
dist_l = dist[:self.N_a]
theta_l = self.cheby_interp(self.grid_a, dist_l)
dist_h = dist[self.N_a:]
theta_h = self.cheby_interp(self.grid_a, dist_h)
interp_dist_l = np.vectorize(lambda x: sum(theta_l[i]*self.func[i](2*(x-min_a)/(max_a-min_a) - 1) for i in range(len(self.func))))
interp_dist_h = np.vectorize(lambda x: sum(theta_h[i]*self.func[i](2*(x-min_a)/(max_a-min_a) - 1) for i in range(len(self.func))))
dist_s_l = interp_dist_l(self.grid_a)
dist_s_l = dist_s_l + np.abs(min(dist_s_l))
dist_s_l = dist_s_l/np.max(dist_s_l)
dist_s_h = interp_dist_h(self.grid_a)
dist_s_h = dist_s_h + np.abs(min(dist_s_h))
dist_s_h = dist_s_h/np.max(dist_s_h)
Structure = {}
Structure['V'] = V_new
Structure['ga'] = ga
Structure['gc'] = gc
Structure['Capital'] = self.K_d
Structure['Saving Rate'] = self.K_d/self.K_d**(1-self.theta)
Structure['smoothed_dist_l'] = dist_s_l
Structure['smoothed_dist_h'] = dist_s_h
Structure['interest'] = r
Structure['Excess'] = Excess
return Structure
| StarcoderdataPython |
9792708 | import torch
from torch_sparse import SparseTensor
from torch_geometric.nn import FiLMConv
def test_film_conv():
x1 = torch.randn(4, 4)
x2 = torch.randn(2, 16)
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
edge_type = torch.tensor([0, 1, 1, 0, 0, 1])
row, col = edge_index
adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4))
conv = FiLMConv(4, 32)
assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=1)'
out1 = conv(x1, edge_index)
assert out1.size() == (4, 32)
assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist()
t = '(Tensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, edge_index).tolist() == out1.tolist()
t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist()
conv = FiLMConv(4, 32, num_relations=2)
assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=2)'
out1 = conv(x1, edge_index, edge_type)
assert out1.size() == (4, 32)
assert conv(x1, adj.t()).tolist() == out1.tolist()
t = '(Tensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, edge_index, edge_type).tolist() == out1.tolist()
t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, adj.t()).tolist() == out1.tolist()
adj = adj.sparse_resize((4, 2))
conv = FiLMConv((4, 16), 32)
assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=1)'
out1 = conv((x1, x2), edge_index)
assert out1.size() == (2, 32)
assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()
t = '(PairTensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), edge_index).tolist() == out1.tolist()
t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()
conv = FiLMConv((4, 16), 32, num_relations=2)
assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=2)'
out1 = conv((x1, x2), edge_index, edge_type)
assert out1.size() == (2, 32)
assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
t = '(PairTensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist()
t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
| StarcoderdataPython |
283416 | <filename>src/gamma_5_gev_normal/job.py
#!/usr/bin/env python
import sys, string, os, re ;
# This script depends on Gleam package
# It must be run whenever the generation
# process or the digis format change.
# The result is used by CalRecon/src/test/validate.py.
#=================================================
# globals
#=================================================
original_dir = os.getcwd()
release_expr = re.compile('^v[0-9]+r[0-9]+')
#=================================================
# the function below establishes the path
# to the root directory of a given client package
#=================================================
def root_dir(package) :
# ask cmt
packages_pipe = os.popen('cd '+original_dir+' ; cmt show packages')
for line in packages_pipe :
tokens = line.split()
if tokens[0] == package :
if tokens[1] == 'v1' :
packages_pipe.close()
return os.path.join(tokens[2],tokens[0])
else :
packages_pipe.close()
return os.path.join(tokens[2],tokens[0],tokens[1])
packages_pipe.close()
# not found
print 'PREPARATION ERROR: package',package,'NOT FOUND'
sys.exit(1)
#=================================================
# build a package test application
#=================================================
def build_application_test(package) :
os.chdir(os.path.join(root_dir(package),'cmt'))
build_command = 'cmt bro -local cmt config'
if os.name == 'posix':
build_command += ' ; cmt bro -local make'
build_command += ' ; make test'
if os.system(build_command) != 0 :
print 'VALIDATION ERROR: test_'+package+'.exe BUILD FAILED'
sys.exit(1)
# David: Windows nmake compilation fails for some reason,
# so one will need to compile interactively with MRvcmt
# before launching this validation script
#
# elif os.name == 'nt':
# build_command += ' & cmt bro -local nmake /f nmake'
# build_command += ' & nmake /f nmake test'
# if os.system(build_command) != 0 :
# print 'VALIDATION ERROR: test_'+package+'.exe BUILD FAILED'
# sys.exit(1)
os.chdir(original_dir)
#=================================================
# all things to be done for a given set of options
# prerequisite: the current dir is something
# like <project>/<package>/<version>/cmt
#=================================================
def run_job(setup_package,binary_package,options) :
# change directory
os.chdir(os.path.join(root_dir(setup_package),'cmt'))
# file names
exe_name = os.path.join(root_dir(binary_package),os.environ['CMTCONFIG'],'test_'+binary_package+'.exe')
opt_name = os.path.join(original_dir,options+'.txt')
log_name = os.path.join(original_dir,options+'.log')
# command
if os.name == 'posix':
exe_command = '. setup.sh ; '+exe_name+' '+opt_name
if os.name == 'nt':
exe_command = 'call setup.bat & '+exe_name+' '+opt_name
# prepare the log file
log_file = file(log_name,'w')
log_pipe = os.popen(exe_command)
for line in log_pipe :
log_file.write(line)
log_file.close()
if log_pipe.close() != None :
print 'PREPARATION ERROR: '+binary_package+' '+options+' EXECUTION FAILED'
sys.exit(1)
# back to original dir
os.chdir(original_dir)
#=================================
# job
#=================================
build_application_test('Gleam')
run_job('Gleam','Gleam','jobOptions')
| StarcoderdataPython |
1872839 | <gh_stars>0
"""A collection of utility methods used by CCBB at UCSD
This project aggregates a collection of python-language utility methods
developed to support the work of the Center for Computational Biology
and Bioinformatics at the University of California at San Diego.
"""
# Much of the content of this file is copied from the
# setup.py of the (open-source) PyPA sample project at
# https://github.com/pypa/sampleproject/blob/master/setup.py
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ccbb_pyutils',
# Versions should comply with PEP440.
version='0.3.6',
description='A collection of utility methods used by CCBB at UCSD',
long_description=long_description,
# The project's main homepage.
url="https://github.com/ucsd-ccbb/ccbb-ucsd-pyutils",
# Author details
author='The Center for Computational Biology and Bioinformatics',
author_email='<EMAIL>',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language:: Python:: 3:: Only',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed.
install_requires=['jupyter','matplotlib', 'multiqc', 'natsort', 'nbformat', 'nbparameterise', 'notebook','pandas'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
}
)
| StarcoderdataPython |
9712867 | """This module contains classes that convert restrictions to manageable objects."""
import yaml
from odfuzz.exceptions import RestrictionsError
from odfuzz.constants import EXCLUDE, INCLUDE, DRAFT_OBJECTS, QUERY_OPTIONS, FORBID_OPTION, VALUE
class RestrictionsGroup:
"""A wrapper that holds a reference for all types of restrictions."""
def __init__(self, restrictions_file):
self._restrictions_file = restrictions_file
self._forbidden_options = []
self._option_restrictions = {}
if self._restrictions_file:
parsed_restrictions = self._parse_restrictions()
else:
parsed_restrictions = {}
self._init_restrictions(parsed_restrictions)
def _parse_restrictions(self):
try:
with open(self._restrictions_file) as stream:
restrictions_dict = yaml.safe_load(stream)
except (EnvironmentError, yaml.YAMLError) as error:
raise RestrictionsError('An exception was raised while parsing the restrictions file \'{}\': {}'
.format(self._restrictions_file, error))
return restrictions_dict
def _init_restrictions(self, restrictions_dict):
exclude_restr = restrictions_dict.get(EXCLUDE, {})
include_restr = restrictions_dict.get(INCLUDE, {})
for query_option in QUERY_OPTIONS:
query_exclude_restr = exclude_restr.get(query_option, {})
query_include_restr = include_restr.get(query_option, {})
self._option_restrictions[query_option] = QueryRestrictions(query_exclude_restr, query_include_restr)
self._forbidden_options = exclude_restr.get(FORBID_OPTION, [])
self._init_draft_objects(include_restr)
self._init_value_objects(include_restr)
def _init_draft_objects(self, include_restr):
restriction = QueryRestrictions({}, include_restr.get(DRAFT_OBJECTS, {}))
self._option_restrictions[DRAFT_OBJECTS] = restriction
def _init_value_objects(self, include_restr):
restriction = QueryRestrictions({}, include_restr.get(VALUE, {}))
self._option_restrictions[VALUE] = restriction
def add_exclude_restriction(self, value, restriction_key):
for query_restriction in self.option_restrictions():
query_restriction.add_exclude_restriction(value, restriction_key)
def option_restrictions(self):
return self._option_restrictions.values()
def forbidden_options(self):
return self._forbidden_options
def get(self, option_name):
return self._option_restrictions.get(option_name)
class QueryRestrictions:
"""A set of restrictions applied to a query option."""
def __init__(self, exclude_restr, include_restr):
self._exclude = exclude_restr
self._include = include_restr
@property
def include(self):
return self._include
@property
def exclude(self):
return self._exclude
def add_exclude_restriction(self, value, restriction_key):
try:
restrictions = self._exclude[restriction_key]
except KeyError:
restrictions = []
restrictions.append(value)
unique_values = list(set(restrictions))
self._exclude[restriction_key] = unique_values
| StarcoderdataPython |
113689 | <filename>reveries/common/maya_shader_export/ramp.py<gh_stars>1-10
BASIS_MAPPING = {
1: 'linear'
}
class RampSampler(object):
def __init__(self, node_name):
import maya.cmds as cmds
import maya.api.OpenMaya as om
self.key_number = None
self.keys_list = []
self.color_list = []
self.basis_value = None
self.basis_name = ''
node = om.MGlobal.getSelectionListByName(node_name).getDependNode(0)
depfn = om.MFnDependencyNode(node)
compound_plug = depfn.findPlug("colorEntryList", False)
for idx in range(compound_plug.numElements()):
index_plug = compound_plug.elementByPhysicalIndex(idx)
pos_handle = index_plug.child(0).asMDataHandle()
color_handle = index_plug.child(1).asMDataHandle()
# print idx, pos_handle.asFloat(), ":", color_handle.asFloat3()
self.keys_list.append(pos_handle.asFloat())
self.color_list.append(color_handle.asFloat3())
self.key_number = compound_plug.numElements()
self.basis_value = cmds.getAttr("{}.interpolation".format(node_name))
def get_key_number(self):
return self.key_number
def get_keys_list(self):
return self.keys_list
def get_color_list(self):
return self.color_list
def get_basis_value(self):
return self.basis_value
def get_basis_name(self):
return BASIS_MAPPING[self.basis_value]
| StarcoderdataPython |
6655363 | #!/usr/bin/python3
import Circle
import configparser
import math
import random
import sys
# does a test circle intersect with a list of circles, with a given 'cushion'
def intersect(test, circles, cushion):
for circle in circles:
if test.intersect(circle, cushion):
return True
return False
# check we have a config file and an output file
if len(sys.argv) != 3:
print("Incorrect number of command line parameters.")
print("Usage:")
print(" %s config_file output_file" % (sys.argv[0]))
exit(0)
Config = configparser.ConfigParser()
Config.read(sys.argv[1])
# get the information from the cfg file
NUM_CIRCLES = Config.getint("circles", "NUM_CIRCLES")
RADIUS = Config.getfloat("circles", "RADIUS")
RADIUS_PLUSMINUS = Config.getfloat("circles", "RADIUS_PLUSMINUS")
SEED_STRING = Config.get("variables", "SEED_STRING")
DELTA = RADIUS/100
CUSHION = Config.getfloat("variables", "CUSHION")
MAX_RIGHT = Config.getfloat("variables", "MAX_RIGHT")
# set up variables using config data
baseRadius = RADIUS - (RADIUS_PLUSMINUS/2)
headNum = int(2 * (MAX_RIGHT / RADIUS))
# set up other variables needed
currentMaxHeight = 0
circles = []
# lets do this
random.seed(SEED_STRING)
for i in range(NUM_CIRCLES):
# X and Y might change, so they are test values
# radius stays the same
radius = baseRadius + (RADIUS_PLUSMINUS * random.random())
testX = radius + (MAX_RIGHT - 2*RADIUS)*random.random()
testY = (currentMaxHeight + (RADIUS+RADIUS_PLUSMINUS))*3
# find a subset of circles to test intersections against
# because we prepend new circles, these will always be the most recent
# and therefore highest circles. As new circles start from the top and
# move down, we don't need to check if the test circle intersects with
# circles at the bottom
circlesSubset = circles[0:headNum]
cantDrop = False
while cantDrop is False:
while (intersect(Circle.Circle(testX, testY-DELTA, radius), circlesSubset, CUSHION) is False) and testY-DELTA > radius:
testY -= DELTA
cantDrop = True
if (intersect(Circle.Circle(testX-DELTA, testY, radius), circlesSubset, CUSHION) is False) and testX-DELTA > radius:
testX -= DELTA
cantDrop = False
continue
if (intersect(Circle.Circle(testX+25*DELTA, testY-DELTA, radius), circlesSubset, CUSHION) is False) and testX+25*DELTA < MAX_RIGHT and testY-DELTA > radius:
testX += 25*DELTA
testY -= DELTA
cantDrop = False
continue
if testY - radius <= DELTA:
break
# insert at the start, therefore all the circles at the 'top'
# are at the head of the list, so we can use that to just test
# intersections against the 'highest' circles
circles.insert(0, Circle.Circle(testX, testY, radius))
# set a new currentMaxHeight so we're start higher than the highest circle
if(testY > currentMaxHeight):
currentMaxHeight = testY
print("%d / %d" % (i + 1, NUM_CIRCLES), end="\r", flush=True)
print("")
f = open(sys.argv[2], "w")
f.write("include \"pigment_function.inc\"\n")
f.write("\n")
for circle in circles:
f.write("object {")
f.write(" sphere { <%f, %f, 0> %f }" % (circle._x, circle._y, circle._r))
f.write(" texture { finish { ambient 1 }")
f.write(" pigment { color <")
f.write(" pigment_function(%f, %f, 0).red," % (circle._x, circle._y))
f.write(" pigment_function(%f, %f, 0).green," % (circle._x, circle._y))
f.write(" pigment_function(%f, %f, 0).blue" % (circle._x, circle._y))
f.write("> } } }\n")
f.close()
| StarcoderdataPython |
255002 | #!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], "r") as f:
lines = [l for l in f.read().split("\n") if l]
ilist = []
imap = {}
total = 0
result = 0
other = 0
while True:
x = 0
y = 0
rot = 90
for l in lines:
a = l[0]
v = int(l[1:])
if a == "N":
y += v
elif a == "S":
y -= v
elif a == "E":
x += v
elif a == "W":
x -= v
elif a == "L":
rot -= v
elif a == "R":
rot += v
elif a == "F":
if rot == 0:
y += v
elif rot == 90:
x += v
elif rot == 180:
y -= v
elif rot == 270:
x -= v
else:
print("BAD ROT", rot)
else:
print("BAD ACT")
rot = rot % 360
print('rot', rot)
print(x, y)
print(abs(x) + abs(y))
break
print(f"Total: {total}")
print(f"Result: {result}")
print(f"Other: {other}")
| StarcoderdataPython |
5063464 | from .interfaces.icustompropertymanager import ICustomPropertyManager
from .enums.enum_types import CustomInfoType
from .enums.enum_options import CustomPropertyAddOption
from .enums.enum_results import (
CustomInfoAddResult,
CustomInfoDeleteResult,
CustomInfoGetResult,
)
class CustomPropertyManager(ICustomPropertyManager):
def __init__(self, parent, config_name):
super().__init__(parent, config_name)
def get_all(self):
"""Gets all the custom properties for the current active configuration
Returns:
List of Tuples: A list of tuples; each containing the following:
1. Property Name,
2. Property Type,
3. Property Value,
4. Resolved - A result code,
5. Property Link
"""
arg1, arg2, arg3, arg4, arg5 = self.get_all3()
return list(
zip(
arg5.value,
CustomInfoType(arg4.value),
arg3.value,
CustomInfoGetResult(arg2.value),
arg1.value,
)
)
def add(self, field_name, field_type, field_value, overwrite_existing):
_field_type = CustomInfoType[field_type.upper().replace(" ", "_")].value
_overwrite_existing = CustomPropertyAddOption[
overwrite_existing.upper().replace(" ", "_")
].value
retval = self._add3(
field_name, _field_type, field_value, _overwrite_existing
)
return CustomInfoAddResult(retval)
def delete(self, field_name):
retval = self._delete2(field_name)
return CustomInfoDeleteResult(retval)
def get(self):
pass
| StarcoderdataPython |
3264472 | #-*- coding:utf-8 -*-
class DAG:
def __init__(self, sentence, Trie):
# self.dag = {Nd1:[nextNd1, nextNd2, ...], ...}
self.dag = {}
self.dict = Trie
self.build(sentence)
def build(self, sentence: str):
"""build DAG with given sentence
Args:
sentence (str): sentence
"""
N = len(sentence)
for st in range(N):
ed_list = []
ed = st+1
frag = sentence[st]
while ed <= N:
if self.dict.search(frag) and self.dict.get_freq(frag):
ed_list.append(ed)
ed = ed + 1
if (ed > N+1):
break
frag = sentence[st:ed]
if not ed_list:
ed_list.append(st+1)
self.dag[st] = ed_list
def items(self):
return self.dag.items()
def get(self, key: str, default=None):
"""get suffix of the given node(key)
Args:
key (str): node
default (any, optional): default values when given node has no suffix node. Defaults to None.
Returns:
list: suffix of the given node
"""
return self.dag.get(key, default) | StarcoderdataPython |
9727227 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import unittest
from tempfile import mkdtemp
from textwrap import dedent
from shutil import rmtree
import sys
sys.modules['kmip'] = mock.Mock()
sys.modules['kmip.pie'] = mock.Mock()
sys.modules['kmip.pie.client'] = mock.Mock()
from swift.common.middleware.crypto.kmip_keymaster import KmipKeyMaster
class MockProxyKmipClient(object):
def __init__(self, secret):
self.secret = secret
self.uid = None
def get(self, uid):
self.uid = uid
return self.secret
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def create_secret(algorithm_name, length, value):
algorithm = mock.MagicMock()
algorithm.name = algorithm_name
secret = mock.MagicMock(cryptographic_algorithm=algorithm,
cryptographic_length=length,
value=value)
return secret
def create_mock_client(secret, calls):
def mock_client(*args, **kwargs):
client = MockProxyKmipClient(secret)
calls.append({'args': args, 'kwargs': kwargs, 'client': client})
return client
return mock_client
class TestKmipKeymaster(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def test_config_in_filter_section(self):
conf = {'__file__': '/etc/swift/proxy-server.conf',
'__name__': 'filter:kmip_keymaster',
'key_id': '1234'}
secret = create_secret('AES', 256, b'x' * 32)
calls = []
klass = 'swift.common.middleware.crypto.kmip_keymaster.ProxyKmipClient'
with mock.patch(klass, create_mock_client(secret, calls)):
km = KmipKeyMaster(None, conf)
self.assertEqual(secret.value, km.root_secret)
self.assertIsNone(km.keymaster_config_path)
self.assertEqual({'config_file': '/etc/swift/proxy-server.conf',
'config': 'filter:kmip_keymaster'},
calls[0]['kwargs'])
self.assertEqual('1234', calls[0]['client'].uid)
def test_config_in_separate_file(self):
km_conf = """
[kmip_keymaster]
key_id = 4321
"""
km_config_file = os.path.join(self.tempdir, 'km.conf')
with open(km_config_file, 'wb') as fd:
fd.write(dedent(km_conf))
conf = {'__file__': '/etc/swift/proxy-server.conf',
'__name__': 'filter:kmip_keymaster',
'keymaster_config_path': km_config_file}
secret = create_secret('AES', 256, b'x' * 32)
calls = []
klass = 'swift.common.middleware.crypto.kmip_keymaster.ProxyKmipClient'
with mock.patch(klass, create_mock_client(secret, calls)):
km = KmipKeyMaster(None, conf)
self.assertEqual(secret.value, km.root_secret)
self.assertEqual(km_config_file, km.keymaster_config_path)
self.assertEqual({'config_file': km_config_file,
'config': 'kmip_keymaster'},
calls[0]['kwargs'])
self.assertEqual('4321', calls[0]['client'].uid)
def test_proxy_server_conf_dir(self):
proxy_server_conf_dir = os.path.join(self.tempdir, 'proxy_server.d')
os.mkdir(proxy_server_conf_dir)
# KmipClient can't read conf from a dir, so check that is caught early
conf = {'__file__': proxy_server_conf_dir,
'__name__': 'filter:kmip_keymaster',
'key_id': '789'}
with self.assertRaises(ValueError) as cm:
KmipKeyMaster(None, conf)
self.assertIn('config cannot be read from conf dir', str(cm.exception))
# ...but a conf file in a conf dir could point back to itself for the
# KmipClient config
km_config_file = os.path.join(proxy_server_conf_dir, '40.conf')
km_conf = """
[filter:kmip_keymaster]
keymaster_config_file = %s
[kmip_keymaster]
key_id = 789
""" % km_config_file
with open(km_config_file, 'wb') as fd:
fd.write(dedent(km_conf))
conf = {'__file__': proxy_server_conf_dir,
'__name__': 'filter:kmip_keymaster',
'keymaster_config_path': km_config_file}
secret = create_secret('AES', 256, b'x' * 32)
calls = []
klass = 'swift.common.middleware.crypto.kmip_keymaster.ProxyKmipClient'
with mock.patch(klass, create_mock_client(secret, calls)):
km = KmipKeyMaster(None, conf)
self.assertEqual(secret.value, km.root_secret)
self.assertEqual(km_config_file, km.keymaster_config_path)
self.assertEqual({'config_file': km_config_file,
'config': 'kmip_keymaster'},
calls[0]['kwargs'])
self.assertEqual('789', calls[0]['client'].uid)
def test_bad_key_length(self):
conf = {'__file__': '/etc/swift/proxy-server.conf',
'__name__': 'filter:kmip_keymaster',
'key_id': '1234'}
secret = create_secret('AES', 128, b'x' * 16)
calls = []
klass = 'swift.common.middleware.crypto.kmip_keymaster.ProxyKmipClient'
with mock.patch(klass, create_mock_client(secret, calls)):
with self.assertRaises(ValueError) as cm:
KmipKeyMaster(None, conf)
self.assertIn('Expected an AES-256 key', str(cm.exception))
self.assertEqual({'config_file': '/etc/swift/proxy-server.conf',
'config': 'filter:kmip_keymaster'},
calls[0]['kwargs'])
self.assertEqual('1234', calls[0]['client'].uid)
def test_bad_key_algorithm(self):
conf = {'__file__': '/etc/swift/proxy-server.conf',
'__name__': 'filter:kmip_keymaster',
'key_id': '1234'}
secret = create_secret('notAES', 256, b'x' * 32)
calls = []
klass = 'swift.common.middleware.crypto.kmip_keymaster.ProxyKmipClient'
with mock.patch(klass, create_mock_client(secret, calls)):
with self.assertRaises(ValueError) as cm:
KmipKeyMaster(None, conf)
self.assertIn('Expected an AES-256 key', str(cm.exception))
self.assertEqual({'config_file': '/etc/swift/proxy-server.conf',
'config': 'filter:kmip_keymaster'},
calls[0]['kwargs'])
self.assertEqual('1234', calls[0]['client'].uid)
def test_missing_key_id(self):
conf = {'__file__': '/etc/swift/proxy-server.conf',
'__name__': 'filter:kmip_keymaster'}
with self.assertRaises(ValueError) as cm:
KmipKeyMaster(None, conf)
self.assertIn('key_id option is required', str(cm.exception))
| StarcoderdataPython |
35933 | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Login Form
"""
from zope.authentication.interfaces import IUnauthenticatedPrincipal
class LoginForm(object):
"""Mix-in class to implement login form logic"""
context = None
request = None
unauthenticated = None
camefrom = None
def __call__(self):
request = self.request
principal = request.principal
unauthenticated = IUnauthenticatedPrincipal.providedBy(principal)
self.unauthenticated = unauthenticated
camefrom = request.get('camefrom')
if isinstance(camefrom, list):
# Beginning on python2.6 this happens if the parameter is
# supplied more than once
camefrom = camefrom[0]
self.camefrom = camefrom
if not unauthenticated and 'SUBMIT' in request:
# authenticated by submitting
request.response.redirect(camefrom or '.')
return ''
return self.index() # call template
| StarcoderdataPython |
264684 | <filename>eclipse-mosquitto/test/broker/02-subpub-qos2-bad-puback-1.py<gh_stars>1-10
#!/usr/bin/env python3
# Test what the broker does if receiving a PUBACK in response to a QoS 2 PUBLISH.
from mosq_test_helper import *
def helper(port, proto_ver):
connect_packet = mosq_test.gen_connect("helper", keepalive=60, proto_ver=proto_ver)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=proto_ver)
mid = 1
publish1s_packet = mosq_test.gen_publish("subpub/qos2", qos=2, mid=mid, payload="message", proto_ver=proto_ver)
pubrec1s_packet = mosq_test.gen_pubrec(mid, proto_ver=proto_ver)
pubrel1s_packet = mosq_test.gen_pubrel(mid, proto_ver=proto_ver)
pubcomp1s_packet = mosq_test.gen_pubcomp(mid, proto_ver=proto_ver)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, publish1s_packet, pubrec1s_packet, "pubrec 1s")
mosq_test.do_send_receive(sock, pubrel1s_packet, pubcomp1s_packet, "pubcomp 1s")
sock.close()
def do_test(proto_ver):
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub-qos2-test", keepalive=keepalive, proto_ver=proto_ver)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=proto_ver)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "subpub/qos2", 2, proto_ver=proto_ver)
suback_packet = mosq_test.gen_suback(mid, 2, proto_ver=proto_ver)
mid = 1
publish1r_packet = mosq_test.gen_publish("subpub/qos2", qos=2, mid=mid, payload="message", proto_ver=proto_ver)
puback1r_packet = mosq_test.gen_puback(mid, proto_ver=proto_ver)
pingreq_packet = mosq_test.gen_pingreq()
pingresp_packet = mosq_test.gen_pingresp()
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
helper(port, proto_ver)
mosq_test.expect_packet(sock, "publish 1r", publish1r_packet)
sock.send(puback1r_packet)
sock.send(pingreq_packet)
p = sock.recv(len(pingresp_packet))
if len(p) == 0:
rc = 0
sock.close()
except socket.error as e:
if e.errno == errno.ECONNRESET:
# Connection has been closed by peer, this is the expected behaviour
rc = 0
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
print("proto_ver=%d" % (proto_ver))
exit(rc)
do_test(proto_ver=4)
do_test(proto_ver=5)
exit(0)
| StarcoderdataPython |
6578674 | """
Script to calculate Freight Reliability Metric per ODOT Guidance.
By <NAME>, Metro, <EMAIL>
NOTE: SCRIPT RELIES ON PANDAS v.0.23.0 OR GREATER!
Usage:
>>>python lottr_truck.py
"""
import os
import pandas as pd
import numpy as np
import datetime as dt
def calc_freight_reliability(df_rel):
"""
Calculates TTTR (Truck Travel Time Reliability), AKA freight reliability.
Args: df_rel, a pandas dataframe.
Returns: df_rel, a pandas dataframe with new columns 'weighted_ttr'
tttr_index, the full freight reliability index measure of the
whole interstate system.
"""
df_int = df_rel.loc[df_rel['interstate'] == 1]
# Total length of the interstate system
df_int_sum = df_int['miles'].sum()
# Calculated weighted tttr for trucks
df_int['weighted_ttr'] = df_int['miles'] * df_int['tttr']
sum_weighted = df_int['weighted_ttr'].sum()
tttr_index = sum_weighted / df_int_sum
return df_rel, tttr_index
def calc_ttr(df_ttr):
"""Calculates travel time reliability.
Args: df_ttr, a pandas dataframe.
Returns: df_ttr, a pandas dataframe with new ttr column.
"""
# Working vehicle occupancy assumptions:
VOCt = 1
df_ttr['VOLt'] = df_ttr['pct_truck'] * df_ttr['dir_aadt'] * 365
df_ttr['ttr'] = df_ttr['miles'] * df_ttr['VOLt'] * VOCt
return df_ttr
def AADT_splits(df_spl):
"""Calculates AADT by truck vehicle type.
Args: df_spl, a pandas dataframe.
Returns: df_spl, a pandas dataframe containing new columns:
dir_aadt: directional aadt
pct_truck: percentage mode splits of trucks.
"""
df_spl['dir_aadt'] = (df_spl['aadt']/df_spl['faciltype']).round()
df_spl['pct_truck'] = df_spl['aadt_combi'] / df_spl['dir_aadt']
return df_spl
def get_max_ttr(df_max):
"""Returns maximum ttr calculated per TMC.
Args: df_max, a pandas dataframe.
Returns: df_max, a dataframe containing grouped TMCs with max tttr values.
"""
ttr_operations = ({'tttr': 'max'})
df_max = df_max.groupby('tmc_code', as_index=False).agg(ttr_operations)
return df_max
def calc_lottr(df_lottr):
"""Calculates LOTTR (Level of Travel Time Reliability) using FHWA metrics.
Args: df_lottr, a pandas dataframe.
Returns: df_lottr, a pandas dataframe with new columns:
95_pct_tt, 95th percentile calculation.
50_pct_tt, 50th percentile calculation.
tttr, completed truck travel time reliability calculation.
"""
df_lottr['95_pct_tt'] = df_lottr['travel_time_seconds']
df_lottr['50_pct_tt'] = df_lottr['travel_time_seconds']
tmc_operations = ({'95_pct_tt': lambda x: np.percentile(x, 95),
'50_pct_tt': lambda x: np.percentile(x, 50)})
df_lottr = df_lottr.groupby('tmc_code', as_index=False).agg(tmc_operations)
df_lottr['tttr'] = df_lottr['95_pct_tt'] / df_lottr['50_pct_tt']
return df_lottr
def agg_travel_times(df_tt, days):
"""Aggregates weekday truck travel time reliability values.
Args: df_tt, a pandas dataframe.
Returns: df_ttr_all_times, a pandas dataframe with stacked truck travel
time reliability numbers for easy group_by characteristics.
"""
# creates df containing all tmcs and ttrs listed vertically
tmc_list = df_tt['tmc_code'].drop_duplicates().values.tolist()
tmc_format = {'tmc_code': tmc_list}
df_tmc = pd.DataFrame.from_dict(tmc_format)
overnight = [list(range(20, 24)), list(range(0, 7))]
overnight = [hour for lst in overnight for hour in lst]
if days == 'MF':
df_6_9 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(
list(range(6, 10)))]
df_10_15 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(
list(range(10, 16)))]
df_16_19 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(
list(range(16, 20)))]
df_20_6 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(overnight)]
df_list = [df_6_9, df_10_15, df_16_19, df_20_6]
if days == 'SATSUN':
df_6_19 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(
list(range(6, 20)))]
df_20_6 = df_tt[df_tt['measurement_tstamp'].dt.hour.isin(overnight)]
df_list = [df_6_19, df_20_6]
df_ttr_all_times = pd.DataFrame()
for df in df_list:
df_temp = calc_lottr(df)
df_ttr_all_times = pd.concat([df_ttr_all_times, df_temp], sort=False)
return df_ttr_all_times
def main():
"""Main script to calculate TTTR."""
startTime = dt.datetime.now()
print('Script started at {0}'.format(startTime))
pd.set_option('display.max_rows', None)
drive_path = 'H:/map21/perfMeasures/phed/data/original_data/'
quarters = ['2017Q0']
#quarters = ['2017Q0', '2017Q1', '2017Q2', '2017Q3', '2017Q4']
folder_end = '_TriCounty_Metro_15-min'
file_end = '_NPMRDS (Trucks).csv'
df = pd.DataFrame() # Empty dataframe
for q in quarters:
filename = q + folder_end + file_end
path = q + folder_end
full_path = path + '/' + filename
print("Loading {0} data...".format(q))
df_temp = pd.read_csv(
os.path.join(
os.path.dirname(__file__), drive_path + full_path))
df = pd.concat([df, df_temp], sort=False)
df = df.dropna()
# Filter by timestamps
print("Filtering timestamps...".format(q))
df['measurement_tstamp'] = pd.to_datetime(df['measurement_tstamp'])
df['hour'] = df['measurement_tstamp'].dt.hour
wd = 'H:/map21/perfMeasures/phed/data/'
# Join/filter on relevant Metro TMCs
print("Join/filter on Metro TMCs...")
df_urban = pd.read_csv(
os.path.join(os.path.dirname(__file__), wd + 'metro_tmc_092618.csv'))
df = pd.merge(df, df_urban, how='right', left_on=df['tmc_code'],
right_on=df_urban['Tmc'])
df = df.drop('key_0', axis=1)
#print(df.shape, df['travel_time_seconds'].sum())
# Apply calculation functions
print("Applying calculation functions...")
# Separate weekend and weekday dataframes for processing
df_mf = df[df['measurement_tstamp'].dt.weekday.isin([0, 1, 2, 3, 4])]
df_sat_sun = df[df['measurement_tstamp'].dt.weekday.isin([5, 6])]
df_mf = agg_travel_times(df_mf, 'MF')
df_sat_sun = agg_travel_times(df_sat_sun, 'SATSUN')
# Combine weekend, weekday dataset
df = pd.concat([df_mf, df_sat_sun], sort=False)
df = get_max_ttr(df)
# Join TMC Metadata
print("Join TMC Metadata...")
df_meta = pd.read_csv(
os.path.join(
os.path.dirname(__file__),
wd +
'TMC_Identification_NPMRDS (Trucks and passenger vehicles).csv'),
usecols=['tmc', 'miles', 'faciltype', 'aadt', 'aadt_singl',
'aadt_combi'])
df = pd.merge(df, df_meta, left_on=df['tmc_code'],
right_on=df_meta['tmc'], how='inner')
# ###########This is necessary in pandas > v.0.22.0 ####
df = df.drop('key_0', axis=1)
########################################################
# Join Interstate values
df_interstate = pd.read_csv(
os.path.join(os.path.dirname(__file__), wd + 'interstate_tmc_092618.csv'))
df = pd.merge(df, df_interstate, left_on='tmc_code', right_on='Tmc',
how='inner')
df = AADT_splits(df)
df = calc_ttr(df)
df, reliability_index = calc_freight_reliability(df)
print(reliability_index)
df.to_csv('lottr_truck_out.csv')
endTime = dt.datetime.now()
print("Script finished in {0}.".format(endTime - startTime))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6631091 | import cv2
import random
# from threading import Timer
class Splatter:
def __init__(self, topleft, bottomright, color=None):
imgnum = str(random.randint(1,8))
self.outline = cv2.imread(str('splatter-'+imgnum+'.png'), -1)
self.outline = cv2.resize(self.outline, (bottomright[0]-topleft[0], bottomright[1]-topleft[1]), interpolation = cv2.INTER_AREA)
cv2.cvtColor(self.outline, cv2.COLOR_BGRA2RGBA) #remember to try to convert frame to RGBA also
if color == None:
self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
else:
self.color = color
self.outline[:, :, 0:3][self.outline[:, :, 3] != 0] = self.color
self.outline[:, :, 0:3][self.outline[:, :, 3] == 0] = (0, 0, 0)
self.opacity = 1
self.topleft = topleft
self.bottomright = bottomright
def fade(self):
#self.outline[self.outline[:, :, 3] >= 4] -= 4
if self.opacity > 0:
self.opacity -= 0.1
if self.opacity < 0:
self.opacity = 0
| StarcoderdataPython |
1723209 | <filename>output/models/ms_data/regex/re_s15_xsd/__init__.py<gh_stars>1-10
from output.models.ms_data.regex.re_s15_xsd.re_s15 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| StarcoderdataPython |
6674048 | import numpy as np
from finitewave.core.fibrosis import FibrosisPattern
class ScarRect2DPattern(FibrosisPattern):
def __init__(self, x1, x2, y1, y2):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
def generate(self, size, mesh=None):
if mesh is None:
mesh = np.zeros(size)
mesh[self.x1:self.x2, self.y1:self.y2] = 2
return mesh
| StarcoderdataPython |
6404932 | <gh_stars>10-100
'''Contains DSrule classes for DateSense package.'''
from .DStoken import DStoken
# Rules are where the real fun happens with date format detection. Please
# feel free to implement your own! The only strictly necessary component
# of a DSrule class is that it has an apply(self, options) method
# where options is a DSoptions object.
class DSDelimiterRule(object):
'''Delimiter rules mean that if some tokens are separated by a
delimiter, assumptions can be made for what those tokens represent.
DSDelimiterRule objects that are elements in the format_rules
attribute of DSoptions objects are evaluated during parsing.
'''
def __init__(self, directives, delimiters, posscore=0, negscore=0):
'''Constructs a DSDelimiterRule object.
Positive reinforcement: The scores of specified possibilities that
are adjacent to one or more tokens where any of the specified
delimiters are a possibility are affected.
Negative reinforcement: The scores of specified possibilities that
are not adjacent any tokens where any of the specified delimiters
are a possibility are affected.
Returns the DSDelimiterRule object.
:param directives: A directive or set of directives that the rule
applies to, like ('%H','%I','%M','%S').
:param delimiters: A delimiter or set of delimiters that the rule
applies to, like ':'.
:param posscore: (optional) Increment the score of possibilities
matching the "Positive reinforcement" condition by this much.
Defaults to 0.
:param negscore: (optional) Increment the score of possibilities
matching the "Negative reinforcement" condition by this much.
Defaults to 0.
'''
self.posscore = posscore
self.negscore = negscore
self.directives = directives
self.delimiters = delimiters
# Positive reinforcement: Specified possibilities that are adjacent to one of the specified delimiters
# Negative reinforcement: Specified possibilities that are not adjacent to one of the specified delimiters
def apply(self, options):
'''Applies the rule to the provided DSoptions object by affecting token possibility scores.'''
adjacent=[]
# For each delimiter specified:
for delimiter in self.delimiters:
toklist_count = len(options.allowed)
# Determine which date tokens are adjacent to any one that has the delimiter text as a possibility
for i in range(0,toklist_count):
toklist = options.allowed[i]
delimtok=DStoken.get_token_with_text(toklist, delimiter)
if delimtok:
if i > 0 and (options.allowed[i-1] not in adjacent):
adjacent.append(options.allowed[i-1])
if i < toklist_count-1 and (options.allowed[i+1] not in adjacent):
adjacent.append(options.allowed[i+1])
# Affect scores of possibilities specified
for toklist in options.allowed:
# Positive reinforcement
if toklist in adjacent:
if self.posscore:
for tok in toklist:
if tok.text in self.directives:
tok.score += self.posscore
# Negative reinforcement
elif self.negscore:
for tok in toklist:
if tok.text in self.directives:
tok.score += self.negscore
class DSLikelyRangeRule(object):
'''Likely range rules mean that a numeric directive is most
likely to be present for only a subset of its strictly possible
values.
DSLikelyRangeRule objects that are elements in the format_rules
attribute of DSoptions objects are evaluated during parsing.
'''
def __init__(self, directives, likelyrange, posscore=0, negscore=0):
'''Constructs a DSLikelyRangeRule object.
Positive reinforcement: The scores of specified directives where
the encountered values are all within the likely range are
affected.
Negative reinforcement: The scores of specified directives where
any of the encountered values lie outside the likely range are
affected.
Returns the DSLikelyRangeRule object.
:param directives: A directive or set of directives that the rule
applies to, like '%S'.
:param likelyrange: Min and max range that any values for the
specified directives are likely to be within. Should be indexed -
recommended you use a tuple, like (0, 59). The value at index 0
will be considered the minimum and index 1 the maximum. The
range is inclusive.
:param posscore: (optional) Increment the score of possibilities
matching the "Positive reinforcement" condition by this much.
Defaults to 0.
:param negscore: (optional) Increment the score of possibilities
matching the "Negative reinforcement" condition by this much.
Defaults to 0.
'''
self.posscore = posscore
self.negscore = negscore
self.directives = directives
self.likelyrange = likelyrange
# Positive reinforcement: Directives inside the likely range
# Negative reinforcement: Directives outside the likely range
def apply(self, options):
'''Applies the rule to the provided DSoptions object by affecting token possibility scores.'''
# Iterate through the token possibilities
toklist_count = len(options.allowed)
for i in range(0,toklist_count):
toklist = options.allowed[i]
for tok in toklist:
# If the possibility is a number and matches the argument, check whether the encoutered data was all inside the likely range.
if tok.kind == DStoken.KIND_NUMBER and tok.text in self.directives:
# Positive reinforcement
if options.numranges[i][0] >= self.likelyrange[0] and options.numranges[i][1] <= self.likelyrange[1]:
tok.score += self.posscore
# Negative reinforcement
else:
tok.score += self.negscore
class DSPatternRule(object):
'''Pattern rules inform the parser that tokens commonly show
up in the sequence provided. ('%m','/','%d','/',('%y','%Y'))
would be one example of such a sequence.
DSPatternRule objects that are elements in the format_rules
attribute of DSoptions objects are evaluated during parsing.
'''
def __init__(self, sequence, maxdistance=1, minmatchscore=0, posscore=0, negscore=0):
'''Constructs a DSPatternRule object.
Positive reinforcement: The scores of possibilities comprising
a complete sequence as specified are affected. Wildcard tokens
between specified tokens in the sequence do not have their scores
affected.
Negative reinforcement: The scores of directive possibilities
found in the sequence that were not found to be part of any
instance of the sequence are affected. Scores of non-directive
token possibilities are not affected.
Returns the DSPatternRule object.
:param sequence: A set of token possibilities, like
('%H',':','%M',':','%S').
:param maxdistance: (optional) How many wildcard tokens are allowed
to be in between those defined in the sequence. For example,
the sequence ('%H',':','%M') with a maxdistance of 1 would
match %H:%M but not %H.%M. The sequence ('%H','%M') with a
maxdistance of 2 would match both. Defaults to 1.
:param minmatchscore: (optional) The minimum score a directive
may have to be considered a potential member of the sequence.
(Does not apply to non-directive possibilities - those will
count at any score.) Defaults to 0.
:param posscore: (optional) Increment the score of possibilities
matching the "Positive reinforcement" condition by this much.
Defaults to 0.
:param negscore: (optional) Increment the score of possibilities
matching the "Negative reinforcement" condition by this much.
Defaults to 0.
'''
self.posscore = posscore
self.negscore = negscore
self.sequence = sequence
self.maxdistance = maxdistance
self.minmatchscore = minmatchscore
# Positive reinforcement: Possibilities comprising a complete pattern
# Negative reinforcement: Directive possibilities in the pattern that were not found to be part of an instance of the pattern
def apply(self, options):
'''Applies the rule to the provided DSoptions object by affecting token possibility scores.'''
# Which date token in the pattern are we on?
onarg = 0
# How many tokens have we looked over since the last one that's part of the pattern?
counter = 0
# What are the token possibilities we've run into so far that fit the pattern?
ordered_toks = []
ordered_toks_current = []
# Iterate through the lists of token possibilities
for toklist in options.allowed:
# Check if we've passed over the allowed number of in-between tokens yet, if so then reset the pattern search
if ordered_toks_current:
counter += 1
if counter > self.maxdistance:
onarg = 0
counter = 0
ordered_toks_current = []
# Does the token here match the pattern?
# (Only consider directives with scores greater than or equal to self.minmatchscore, and decorators of any score)
foundtok = 0
for tok in toklist:
if (tok.score >= self.minmatchscore or tok.is_decorator()) and tok.text in self.sequence[onarg]:
ordered_toks_current.append(tok)
foundtok += 1
# One or more possibilities here match the pattern! On to the next expected possibility in the pattern sequence.
if foundtok:
onarg += 1
counter = 0
# Did we hit the end of the pattern sequence? If so, let's reset so we can see if there's any more occurences.
if onarg == len(self.sequence):
onarg = 0
ordered_toks.extend(ordered_toks_current)
# Positive reinforcement
if self.posscore:
for tok in ordered_toks:
tok.score += self.posscore
# Negative reinforcement
if self.negscore:
# Iterate through all possibilities for all tokens
for toklist in options.allowed:
for tok in toklist:
# Is the possibility a directive?
if not tok.is_decorator():
# Does the possibility exist anywhere in the pattern?
for matchtext in self.sequence:
if tok.text in matchtext:
# Was it not a part of any found instances of the pattern? If so, whack the score.
if tok not in ordered_toks:
tok.score += self.negscore
class DSMutExclusionRule(object):
'''Mutual exclusion rules indicate that a group of directives
probably aren't going to show up in the same date string.
('%H','%I') would be an example of mutually-exclusive directives.
DSMutExclusionRule objects that are elements in the format_rules
attribute of DSoptions objects are evaluated during parsing.
'''
def __init__(self, directives, posscore=0, negscore=0):
'''Constructs a DSMutExclusionRule object.
Positive reinforcement: The highest-scoring instance of any of the
specified possibilities is found and the scores for that same
possibility at any token where it's present is affected.
Negative reinforcement: The highest-scoring instance of any of the
specified possibilities is found and the scores for all the other
specified possibilities at any token where they're present are
affected.
Returns the DSMutExclusionRule object.
:param directives: A set of directives that the rule applies to,
like ('%H','%I').
:param posscore: (optional) Increment the score of possibilities
matching the "Positive reinforcement" condition by this much.
Defaults to 0.
:param negscore: (optional) Increment the score of possibilities
matching the "Negative reinforcement" condition by this much.
Defaults to 0.
'''
self.posscore = posscore
self.negscore = negscore
self.directives = directives
# Positive reinforcement: The highest-scoring instance of any of the specified possibilities specified is found and the scores of that possibility everywhere will be affected
# Negative reinforcement: The highest-scoring instance of any of the specified possibilities specified is found and the scores of all the other possibilities will be affected
def apply(self, options):
'''Applies the rule to the provided DSoptions object by affecting token possibility scores.'''
# Find the highest-scoring instance of each token possibility specified
matchedtoks = []
for toklist in options.allowed:
for tok in toklist:
for i in range(0,len(self.directives)):
matchedtoks.append(None)
matchtext = self.directives[i]
if tok.text in matchtext:
if (not matchedtoks[i]) or tok.score > matchedtoks[i].score:
matchedtoks[i] = tok
# Determine which of the possibilities had the highest score
highest_tok = None
highest_index = 0
for i in range(0,len(matchedtoks)):
tok = matchedtoks[i]
if tok and ((not highest_tok) or tok.score > highest_tok.score):
highest_tok = tok
highest_index = i
# Affect scores (Ties go to the lowest-index argument.)
if highest_tok:
for toklist in options.allowed:
for tok in toklist:
for i in range(0,len(self.directives)):
matchtext = self.directives[i]
if tok.text in matchtext:
# Positive reinforcement
if i == highest_index:
tok.score += self.posscore
# Negative reinforcement
else:
tok.score += self.negscore
| StarcoderdataPython |
8076475 | <reponame>hahnah/uncrowded-cafe-backend
import os
import requests
import flask
def place_details(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
request_json = request.get_json()
place_id = (
request.args.get('place_id') if request.args and 'place_id' in request.args else
request_json['place_id'] if request_json and 'place_id' in request_json else
None
)
if place_id is None:
return flask.jsonify({ 'status': 'FAILURE', 'search_result': [] })
api_key = os.environ.get('API_KEY', None)
if api_key is None:
return flask.jsonify({ 'status': 'FAILURE', 'search_result': [] })
BASE_URL = 'https://maps.googleapis.com/maps/api/place/'
DETAIL_URL = BASE_URL + 'details/json?placeid={}&fields=opening_hours,photos&key={}'
request_url = DETAIL_URL.format(place_id, api_key)
response = requests.get(request_url).json()['result']
open_now = response['opening_hours']['open_now']
photo_reference = response['photos'][0]['photo_reference']
result_json = {
'status': 'SUCCESS',
'result': {
'open_now': open_now,
'photo_reference': photo_reference
}
}
return flask.jsonify(result_json) | StarcoderdataPython |
8058313 | import _sk_fail; _sk_fail._("sre_constants")
| StarcoderdataPython |
3362188 | <gh_stars>1-10
from jedi._compatibility import unicode
from jedi.inference.compiled.value import CompiledObject, CompiledName, \
CompiledObjectFilter, CompiledValueName, create_from_access_path
from jedi.inference.base_value import ValueWrapper, LazyValueWrapper
def builtin_from_name(inference_state, string):
typing_builtins_module = inference_state.builtins_module
if string in ('None', 'True', 'False'):
builtins, = typing_builtins_module.non_stub_value_set
filter_ = next(builtins.get_filters())
else:
filter_ = next(typing_builtins_module.get_filters())
name, = filter_.get(string)
value, = name.infer()
return value
class CompiledValue(LazyValueWrapper):
def __init__(self, compiled_obj):
self.inference_state = compiled_obj.inference_state
self._compiled_obj = compiled_obj
def __getattribute__(self, name):
if name in ('get_safe_value', 'execute_operation', 'access_handle',
'negate', 'py__bool__', 'is_compiled'):
return getattr(self._compiled_obj, name)
return super(CompiledValue, self).__getattribute__(name)
def _get_wrapped_value(self):
instance, = builtin_from_name(
self.inference_state, self._compiled_obj.name.string_name).execute_with_values()
return instance
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._compiled_obj)
def create_simple_object(inference_state, obj):
"""
Only allows creations of objects that are easily picklable across Python
versions.
"""
assert type(obj) in (int, float, str, bytes, unicode, slice, complex, bool), obj
compiled_obj = create_from_access_path(
inference_state,
inference_state.compiled_subprocess.create_simple_object(obj)
)
return CompiledValue(compiled_obj)
def get_string_value_set(inference_state):
return builtin_from_name(inference_state, u'str').execute_with_values()
def load_module(inference_state, dotted_name, **kwargs):
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
# and again and it's really slow.
if dotted_name.startswith('tensorflow.'):
return None
access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
if access_path is None:
return None
return create_from_access_path(inference_state, access_path)
| StarcoderdataPython |
3341623 | from django.apps import AppConfig
class CsoneConfig(AppConfig):
name = 'csone'
| StarcoderdataPython |
1709155 | <gh_stars>0
FIRST_PHOTO_URL = "https://mars.nasa.gov/msl-raw-images/proj/msl/redops/ods/surface/sol/03418/opgs/edr/ncam/NLB_700915024EDR_F0933240CCAM03417M_.JPG"
TEST_RESP = {"photos":
[
{
"id": 948763,
"sol": 3418,
"camera": {
"id": 26,
"name": "NAVCAM",
"rover_id": 5,
"full_name": "Navigation Camera"
},
"img_src": FIRST_PHOTO_URL,
"earth_date": "2022-03-18",
"rover": {
"id": 5,
"name": "Curiosity",
"landing_date": "2012-08-06",
"launch_date": "2011-11-26",
"status": "active"
}
},
{
"id": 948764,
"sol": 3418,
"camera": {
"id": 26,
"name": "NAVCAM",
"rover_id": 5,
"full_name": "Navigation Camera"
},
"img_src": "https://mars.nasa.gov/msl-raw-images/proj/msl/redops/ods/surface/sol/03418/opgs/edr/ncam/NRB_700922659EDR_F0933240NCAM00560M_.JPG",
"earth_date": "2022-03-18",
"rover": {
"id": 5,
"name": "Curiosity",
"landing_date": "2012-08-06",
"launch_date": "2011-11-26",
"status": "active"
}
},
{
"id": 948765,
"sol": 3418,
"camera": {
"id": 26,
"name": "NAVCAM",
"rover_id": 5,
"full_name": "Navigation Camera"
},
"img_src": "https://mars.nasa.gov/msl-raw-images/proj/msl/redops/ods/surface/sol/03418/opgs/edr/ncam/NRB_700922621EDR_F0933240NCAM00560M_.JPG",
"earth_date": "2022-03-18",
"rover": {
"id": 5,
"name": "Curiosity",
"landing_date": "2012-08-06",
"launch_date": "2011-11-26",
"status": "active"
}
}
]
}
| StarcoderdataPython |
6659314 | <filename>src/models/example/model_example.py
# coding: utf-8
from pydantic import BaseModel, validator
from typing import List, Optional
from .args import ExampleArgs
from .. import validators
from ..model_type_enum import ModelTypeEnum
from ...lib.enums import AppTypeEnum
class ModelExample(BaseModel):
id: str
version: str
type: ModelTypeEnum
app: AppTypeEnum
name: str
args: ExampleArgs
methods: List[str]
_str_null_empty = validator('name', allow_reuse=True)(
validators.str_null_empty)
@validator('id')
def validate_id(cls, value: str) -> str:
if value == 'ooouno-ex':
return value
raise ValueError(
f"root/id/ must be 'ooouno-ex'. Current value: {value}")
| StarcoderdataPython |
1678379 | import os
import time
from contextlib import contextmanager
from django_webtest import WebTest
from evap.evaluation.tests.tools import WebTestWith200Check
from evap.staff.tools import ImportType, generate_import_filename
def helper_enter_staff_mode(webtest):
# This is a bit complicated in WebTest
# See https://github.com/django-webtest/django-webtest/issues/68#issuecomment-350244293
webtest.app.set_cookie("sessionid", "initial")
session = webtest.app.session
session["staff_mode_start_time"] = time.time()
session.save()
webtest.app.set_cookie("sessionid", session.session_key)
def helper_exit_staff_mode(webtest):
# This is a bit complicated in WebTest
# See https://github.com/django-webtest/django-webtest/issues/68#issuecomment-350244293
webtest.app.set_cookie("sessionid", "initial")
session = webtest.app.session
if "staff_mode_start_time" in session:
del session["staff_mode_start_time"]
session.save()
webtest.app.set_cookie("sessionid", session.session_key)
@contextmanager
def run_in_staff_mode(webtest):
helper_enter_staff_mode(webtest)
yield
helper_exit_staff_mode(webtest)
class WebTestStaffMode(WebTest):
def setUp(self):
helper_enter_staff_mode(self)
class WebTestStaffModeWith200Check(WebTestWith200Check):
def setUp(self):
helper_enter_staff_mode(self)
def helper_delete_all_import_files(user_id):
for import_type in ImportType:
filename = generate_import_filename(user_id, import_type)
try:
os.remove(filename)
except FileNotFoundError:
pass
# For some form fields, like a <select> which can be configured to create new options,
# setting the value directly would be rejected by Webtest,
# as it would check whether all values are included in the options.
# To circumvent this, set the options beforehand with this helper.
def helper_set_dynamic_choices_field_value(field, value):
field.options = [(name, False, name) for name in value]
field.value = value
| StarcoderdataPython |
242635 | <filename>src/txt2xls/function/builtin/unite_function.py
# coding=utf-8
"""
"""
__author__ = 'Alisue <<EMAIL>>'
import os
def default_unite_function(data):
"""
A default unite_function which recieve `data` and return filename without
middle extensions
>>> # [<filename>] is mimicking `data`
>>> default_unite_function(['./foo/foo.bar.hoge.piyo'])
'./foo/foo.piyo'
>>> default_unite_function(['./foo/foo.piyo'])
'./foo/foo.piyo'
>>> default_unite_function(['./foo/foo'])
'./foo/foo'
"""
# data[0] indicate the filename of the data
rootname, basename = os.path.split(data[0])
filename, ext = os.path.splitext(basename)
if '.' in filename:
filename = filename.rsplit('.')[0]
filename = os.path.join(rootname, filename + ext)
return filename
# define __call__
__call__ = default_unite_function
if __name__ == '__main__':
import doctest; doctest.testmod()
| StarcoderdataPython |
5011685 | from . import Model
from api.validator import UserValidator
users = []
class UserModel(Model):
def __init__(self, user=None, is_admin=0):
super(UserModel, self).__init__(item=user, list_of_items=users)
# Remains 0 for default user
self.isAdmin = is_admin
def user_is_admin(self):
if not self.isAdmin == 0:
return True
return False
def user_sign_up(self):
admin_status = self.user_is_admin()
# Generate Unique Id
user_id = super(UserModel, self).generate_id()
# Returns Validated User Dict
validated_user = UserValidator(self.item).all_checks()
if not validated_user == 'Invalid':
# Checks If User is in list
for user in users:
if user['email'] == validated_user['email']:
return 'User Exists'
created_user = {
"id": user_id,
"firstname": validated_user['firstname'],
"lastname": validated_user['lastname'],
"othername": validated_user['othername'],
"email": validated_user['email'],
"phoneNumber": validated_user['phoneNumber'],
"passportUrl": validated_user['passportUrl'],
"password": <PASSWORD>['password'],
"isAdmin": admin_status
}
# Add User To List
users.append(created_user)
return created_user['firstname']
return 'Invalid Data Check The Fields'
| StarcoderdataPython |
11365674 | <gh_stars>1-10
#########################################
# Custom Large Font for HD44780 Displays
#########################################
class HD44780_Large_Font:
def __init__(self, lcd):
self.lcd = lcd
self.lt = 255
ast_lt_template = (
0b00000,
0b00000,
0b00000,
0b00000,
0b11100,
0b01110,
0b00111,
0b00011
)
self.ast_lt = 0
self.lcd.create_char(self.ast_lt, ast_lt_template)
ub_template = (
0b11111,
0b11111,
0b11111,
0b00000,
0b00000,
0b00000,
0b00000,
0b00000
)
self.ub = 1
self.lcd.create_char(self.ub, ub_template)
ast_rt_template = (
0b00000,
0b00000,
0b00000,
0b00000,
0b00111,
0b01110,
0b11100,
0b11000
)
self.ast_rt = 2
self.lcd.create_char(self.ast_rt, ast_rt_template)
ast_lb_template = (
0b00111,
0b01110,
0b11100,
0b11000,
0b00000,
0b00000,
0b00000,
0b00000
)
self.ast_lb = 3
self.lcd.create_char(self.ast_lb, ast_lb_template)
self.rt = 255
self.ll = 255
lb_template = (
0b00000,
0b00000,
0b00000,
0b00000,
0b00000,
0b11111,
0b11111,
0b11111
)
self.lb = 4
self.lcd.create_char(self.lb, lb_template)
self.lr = 255
ast_rb_template = (
0b11100,
0b01110,
0b00111,
0b00011,
0b00000,
0b00000,
0b00000,
0b00000
)
self.ast_rb = 5
self.lcd.create_char(self.ast_rb, ast_rb_template)
umb_template = (
0b11111,
0b11111,
0b11111,
0b00000,
0b00000,
0b00000,
0b11111,
0b11111
)
self.umb = 6
self.lcd.create_char(self.umb, umb_template)
lmb_template = (
0b11111,
0b00000,
0b00000,
0b00000,
0b00000,
0b11111,
0b11111,
0b11111
)
self.lmb = 7
self.lcd.create_char(self.lmb, lmb_template)
def write_string(self, text):
x = 0
space = 4
for letter in text:
self.lcd.cursor_pos = (0, x)
if letter == 'A':
self.print_A(x)
elif letter == 'U':
self.print_U(x)
elif letter == '*':
self.print_asterisk(x)
elif letter == '1':
self.print_1(x)
elif letter == '2':
self.print_2(x)
elif letter == '3':
self.print_3(x)
elif letter == '4':
self.print_4(x)
elif letter == '5':
self.print_5(x)
elif letter == '6':
self.print_6(x)
elif letter == '7':
self.print_7(x)
elif letter == '8':
self.print_8(x)
elif letter == '9':
self.print_9(x)
elif letter == '0':
self.print_0(x)
x += space
def print_A(self, x):
self.lcd.write(self.lt)
self.lcd.write(self.umb)
self.lcd.write(self.rt)
self.lcd.cursor_pos = (1, x)
self.lcd.write(255)
self.lcd.write(254)
self.lcd.write(255)
def print_U(self, x):
self.lcd.write(255)
self.lcd.write(254)
self.lcd.write(255)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.ll)
self.lcd.write(self.lb)
self.lcd.write(self.lr)
def print_asterisk(self, x):
self.lcd.write(self.ast_lt)
self.lcd.write(255)
self.lcd.write(self.ast_rt)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.ast_lb)
self.lcd.write(255)
self.lcd.write(self.ast_rb)
def print_1(self, x):
self.lcd.write(self.ub)
self.lcd.write(self.rt)
y = x + 1
self.lcd.cursor_pos = (1, y)
self.lcd.write(255)
def print_2(self, x):
self.lcd.write(self.umb)
self.lcd.write(self.umb)
self.lcd.write(self.rt)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.ll)
self.lcd.write(self.lmb)
self.lcd.write(self.lmb)
def print_3(self, x):
self.lcd.write(self.umb)
self.lcd.write(self.umb)
self.lcd.write(self.rt)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.lmb)
self.lcd.write(self.lmb)
self.lcd.write(self.lr)
def print_4(self, x):
self.lcd.write(self.ll)
self.lcd.write(self.lb)
self.lcd.write(self.rt)
y = x + 2
self.lcd.cursor_pos = (1, y)
self.lcd.write(255)
def print_5(self, x):
self.lcd.write(255)
self.lcd.write(self.umb)
self.lcd.write(self.umb)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.lmb)
self.lcd.write(self.lmb)
self.lcd.write(self.lr)
def print_6(self, x):
self.lcd.write(self.lt)
self.lcd.write(self.umb)
self.lcd.write(self.umb)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.ll)
self.lcd.write(self.umb)
self.lcd.write(self.lr)
def print_7(self, x):
self.lcd.write(self.ub)
self.lcd.write(self.ub)
self.lcd.write(self.rt)
y = x + 1
self.lcd.cursor_pos = (1, y)
self.lcd.write(self.lt)
def print_8(self, x):
self.lcd.write(self.lt)
self.lcd.write(self.umb)
self.lcd.write(self.rt)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.ll)
self.lcd.write(self.lmb)
self.lcd.write(self.lr)
def print_9(self, x):
self.lcd.write(self.lt)
self.lcd.write(self.umb)
self.lcd.write(self.rt)
y = x + 2
self.lcd.cursor_pos = (1, y)
self.lcd.write(255)
def print_0(self, x):
self.lcd.write(self.lt)
self.lcd.write(self.ub)
self.lcd.write(self.rt)
self.lcd.cursor_pos = (1, x)
self.lcd.write(self.ll)
self.lcd.write(self.lb)
self.lcd.write(self.lr) | StarcoderdataPython |
4878688 | <reponame>aliced187/alice-and-charlie
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
from floodsystem.flood import stations_level_over_threshold
def run():
stations = build_station_list()
# Update latest level data for all stations
update_water_levels(stations)
slist = stations_level_over_threshold(stations, 0.8)
for x in slist:
print(*x)
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run() | StarcoderdataPython |
4928323 | <reponame>fengli12321/FLMusicServer<gh_stars>1-10
# Generated by Django 2.1.5 on 2019-01-24 16:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musics', '0002_music_lyric'),
]
operations = [
migrations.AlterField(
model_name='music',
name='lyric',
field=models.FileField(blank=True, help_text='歌词', null=True, upload_to='musics/lyrics', verbose_name='歌词'),
),
]
| StarcoderdataPython |
212365 | <filename>src/pycode.py
import sys
import time
import threading
from threading import Thread
def add(a,b):
try:
print(str(int(a)+int(b)))
except ValueError:
print(str(float(a)+float(b)))
sys.stdout.flush()
def sub(a,b):
try:
print(str(int(a)-int(b)))
except ValueError:
print(str(float(a)-float(b)))
sys.stdout.flush()
def mul(a,b):
try:
print(str(int(a)*int(b)))
except ValueError:
print(str(float(a)*float(b)))
sys.stdout.flush()
def div(a,b):
print(str(float(a)/float(b)))
sys.stdout.flush()
def getInput():
message = raw_input()
messageArray = message.split(' ')
command = messageArray[0]
operand1 = messageArray[1]
operand2 = messageArray[2]
if(command=='add'):
Thread(target = add(operand1,operand2)).start()
if(command=='sub'):
Thread(target = sub(operand1,operand2)).start()
if(command=='mul'):
Thread(target = mul(operand1,operand2)).start()
if(command=='div'):
Thread(target = div(operand1,operand2)).start()
getInput()
if __name__ == '__main__':
Thread(target = getInput).start() | StarcoderdataPython |
6660164 | #!/usr/bin/python3
class Statistic:
def __init__(self):
self.min = None
self.max = None
self.sum = 0
self.cnt = 0
def clear(self):
self.min = None
self.max = None
self.sum = 0
self.cnt = 0
def sample(self, value):
self.min = min(self.min, value) if self.min is not None else value
self.max = max(self.max, value) if self.max is not None else value
self.cnt += 1
self.sum += value
def results(self):
return [
self.cnt,
self.min,
self.max,
self.sum / self.cnt if self.cnt > 0 else None
]
class Tag:
def __init__(self):
self.name = ""
self.file = ""
self.line = 0
def csv(file, delimeter=','):
return [line.strip().split(delimeter) for line in file]
def get_statistic(id):
statistic = Statistic()
durations = []
with open('intervals.rp.csv', 'r') as fintervals:
fintervals.readline()
for row in csv(fintervals):
if id == row[0]:
duration = float(row[3])
statistic.sample(duration)
durations.append(duration)
return durations, statistic
def get_statistics():
statistics = {}
with open('intervals.rp.csv', 'r') as fintervals:
fintervals.readline()
for row in csv(fintervals):
id = row[0]
if id not in statistics:
statistics[id] = Statistic()
duration = float(row[3])
statistic = statistics[id]
statistic.sample(duration)
return statistics
def get_tags():
tags = {}
with open('tags.rp.csv', 'r') as ftags:
ftags.readline()
for row in csv(ftags):
id = row[0]
if id not in tags:
tags[id] = Tag()
tags[id].name = row[1]
tags[id].file = row[2]
tags[id].line = int(row[3])
return tags
def overview():
statistics = get_statistics()
tags = get_tags()
print("-" * 85)
print("%5s %32s %10s %10s %10s %10s" % ("id", "name", "cnt", "min", "max", "avg"))
print("-" * 85)
for id in sorted(statistics):
tag = tags[id]
stats = statistics[id].results()
print("%5s %32s %10u %10.2f %10.2f %10.2f" % (id, tag.name, *stats[0:4]))
print("-" * 85)
def interval(id):
durations, statistic = get_statistic(str(id))
tags = get_tags()
tag = tags[str(id)]
stats = statistic.results()
print("-" * 85)
print("%5s %32s %10s %10s %10s %10s" % ("id", "name", "cnt", "min", "max", "avg"))
print("-" * 85)
print("%5s %32s %10u %10.2f %10.2f %10.2f" % (id, tag.name, *stats[0:4]))
print("-" * 85)
print()
print("%s" % ("iterations"))
print()
for duration in durations:
print("%.2f" % (duration))
print()
try:
import argparse
parser = argparse.ArgumentParser(description='Rapid profile analyzer.')
parser.add_argument('--id', '-i', type=int, help='interval ID to query', default=None)
args = parser.parse_args()
if args.id is None:
overview()
else:
interval(args.id)
except:
print('FALLBACK MODE\n')
import sys
if (len(sys.argv) > 1):
key = sys.argv[1]
if key == '-i':
if len(sys.argv) > 2:
id = sys.argv[2]
interval(id)
else:
print('-i required ID argument')
else:
try:
id = sys.argv[1]
interval(id)
except:
print('unrecognized argument argument')
else:
overview()
| StarcoderdataPython |
42803 | <gh_stars>1-10
import bitstring
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
ret = bitstring.BitArray()
for a in arr:
ret += bitstring.pack("uint:5", a)
return ret
# Map of classical and witness address prefixes
base58_prefix_map = {
'bc' : (0, 5),
'tb' : (111, 196)
}
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
| StarcoderdataPython |
199949 | <filename>stubs.min/Autodesk/Revit/DB/__init___parts/ViewDisplaySketchyLines.py
class ViewDisplaySketchyLines(object,IDisposable):
""" Represents the settings for sketchy lines. """
def Dispose(self):
""" Dispose(self: ViewDisplaySketchyLines) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: ViewDisplaySketchyLines,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
EnableSketchyLines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""True to enable sketchy lines visibility. False to disable it.
Get: EnableSketchyLines(self: ViewDisplaySketchyLines) -> bool
Set: EnableSketchyLines(self: ViewDisplaySketchyLines)=value
"""
Extension=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The extension scale value. Controls the magnitude of line's extension.
Values between 0 and 10.
Get: Extension(self: ViewDisplaySketchyLines) -> int
Set: Extension(self: ViewDisplaySketchyLines)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: ViewDisplaySketchyLines) -> bool
"""
Jitter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The jitter defines jitteriness of the line.
Values between 0 and 10.
Get: Jitter(self: ViewDisplaySketchyLines) -> int
Set: Jitter(self: ViewDisplaySketchyLines)=value
"""
| StarcoderdataPython |
1865129 | <filename>lettersmith/path.py
from urllib.parse import urlparse, urljoin
from pathlib import Path, PurePath
from os import sep, listdir, path, walk
import re
_STRANGE_CHARS = "[](){}<>:^&%$#@!'\"|*~`,"
STRANGE_CHAR_PATTERN = "[{}]".format(re.escape(_STRANGE_CHARS))
def space_to_dash(text):
"""Replace spaces with dashes."""
return re.sub("\s+", "-", text)
def remove_strange_chars(text):
"""Remove funky characters that don't belong in a URL."""
return re.sub(STRANGE_CHAR_PATTERN, "", text)
def to_slug(text):
"""Given some text, return a nice URL"""
text = str(text).strip().lower()
text = remove_strange_chars(text)
text = space_to_dash(text)
return text
def to_title(pathlike):
"""
Read a pathlike as a title. This takes the stem and removes any
leading "_".
"""
stem = PurePath(pathlike).stem
return stem[1:] if stem.startswith("_") else stem
def is_file_like(pathlike):
"""Check if path is file-like, that is, ends with an `xxx.xxx`"""
return len(PurePath(pathlike).suffix) > 0
def ensure_trailing_slash(pathlike):
"""Append a trailing slash to a path if missing."""
path_str = str(pathlike)
if is_file_like(path_str) or path_str.endswith("/"):
return path_str
else:
return path_str + "/"
def is_local_url(url):
"""Does the URL have a scheme?"""
o = urlparse(url)
return not o.scheme
def qualify_url(pathlike, base="/"):
"""
Qualify a URL with a basepath. Will leave URL if the URL is already
qualified.
"""
path_str = str(pathlike)
if not path_str.startswith(base) and is_local_url(path_str):
return urljoin(base, path_str)
else:
return path_str
def remove_base_slash(any_path):
"""Remove base slash from a path."""
return re.sub("^/", "", any_path)
def to_nice_path(ugly_pathlike):
"""
Makes an ugly path into a "nice path". Nice paths are paths that end with
an index file, so you can reference them `like/this/` instead of
`like/This.html`.
ugly_path:
some/File.md
nice_path:
some/file/index.html
"""
purepath = PurePath(ugly_pathlike)
# Don't touch index pages
if purepath.stem == "index":
return purepath
index_file = "index" + purepath.suffix
index_path = PurePath(purepath.parent, purepath.stem, index_file)
# Slug-ify and then convert slug string to path object.
nice_path = PurePath(to_slug(index_path))
return nice_path
def to_url(pathlike, base="/"):
"""
Makes a nice path into a url.
Basically gets rid of the trailing `index.html`.
nice_path:
some/file/index.html
url:
/some/file/
"""
slug = to_slug(pathlike)
purepath = PurePath(slug)
if purepath.name == "index.html":
purepath = ensure_trailing_slash(purepath.parent)
qualified = qualify_url(purepath, base=base)
return qualified
def is_draft(pathlike):
return PurePath(pathlike).name.startswith("_")
def should_pub(pathlike, build_drafts=False):
"""
Should you publish this? This function is just an ergonomic shortcut
for filtering out drafts based on build_drafts setting.
"""
return build_drafts or not is_draft(pathlike)
def is_dotfile(pathlike):
return PurePath(pathlike).name.startswith(".")
def is_config_file(pathlike):
"""Check if the file is a lettersmith config file"""
return PurePath(pathlike).name == "lettersmith.yaml"
def is_doc_file(pathlike):
"""
Is this path a valid doc-like path?
"""
return (is_file_like(pathlike)
and not is_dotfile(pathlike)
and not is_config_file(pathlike))
def is_index(pathlike):
return PurePath(pathlike).stem == 'index'
def tld(pathlike):
"""
Get the name of the top-level directory in this path.
"""
parts = PurePath(pathlike).parts
return parts[0] if len(parts) > 1 else ''
def read_dir(some_path):
"""
Read a path to return the directory portion.
If the path looks like a file, will return the dirname.
Otherwise, will leave the path untouched.
"""
return path.dirname(some_path) if is_file_like(some_path) else some_path
def is_sibling(path_a, path_b):
"""
What is a sibling:
foo/bar/baz.html
foo/bar/bing.html
What is not a sibling:
foo/bar/boing/index.html
"""
return (
PurePath(path_a).parent == PurePath(path_b).parent
and not is_index(path_b))
def has_ext(pathlike, extensions):
"""
Check to see if the extension of the pathlike matches any of the
extensions in `extensions`.
"""
return PurePath(pathlike).suffix in extensions
def glob_all(pathlike, globs):
"""
Given a pathlike and an iterable of glob patterns, will glob
all of them under the path.
Returns a generator of all results.
"""
realpath = Path(pathlike)
for glob_pattern in globs:
for p in realpath.glob(glob_pattern):
yield p | StarcoderdataPython |
6407160 | <gh_stars>1-10
#! /usr/bin/env python3
from pathlib import Path
from typing import List
import ros_metrics_reporter.coverage.run_lcov as run_lcov
class CoverageAll:
def __init__(
self, output_dir: Path, base_dir: Path, lcovrc: Path, exclude: List[str]
):
self.__output_lcov_dir = output_dir / "all"
self.__base_dir = base_dir.absolute()
self.__lcovrc = lcovrc
self.__exclude = exclude
def __generate_html_report(self, output_dir: Path, coverage_info_dir_name: str):
coverage_info_path = (
self.__base_dir / coverage_info_dir_name / "total_coverage.info"
)
if not coverage_info_path.exists():
return
if not output_dir.exists():
output_dir.mkdir(parents=True)
filtered_path = run_lcov.filter_report(
coverage_info_path=coverage_info_path,
base_dir=self.__base_dir,
output_dir=output_dir,
lcovrc=self.__lcovrc,
exclude=self.__exclude,
)
run_lcov.generate_html_report(
coverage_info_path=filtered_path,
base_dir=self.__base_dir,
output_dir=output_dir,
lcovrc=self.__lcovrc,
)
def generate_html_report(self, test_label: str = ""):
# Generate HTML report for all packages
print("Generating Coverage report for all packages...")
# Set test directory name to 'lcov' if test_label is empty
# Otherwise, set it to 'lcov.test_label'
if not test_label:
self.__generate_html_report(self.__output_lcov_dir, "lcov")
else:
output_dir = self.__output_lcov_dir / test_label
coverage_info_dir_name = f"lcov.{test_label}"
self.__generate_html_report(output_dir, coverage_info_dir_name)
| StarcoderdataPython |
143942 | <reponame>mm40/pudb
import collections
import pytest # noqa: F401
from pudb.py3compat import builtins
from pudb.settings import load_breakpoints, save_breakpoints
def test_load_breakpoints(mocker):
fake_data = ["b /home/user/test.py:41"], ["b /home/user/test.py:50"]
mock_open = mocker.mock_open()
mock_open.return_value.readlines.side_effect = fake_data
mocker.patch.object(builtins, "open", mock_open)
mocker.patch("pudb.settings.lookup_module",
mocker.Mock(return_value="/home/user/test.py"))
mocker.patch("pudb.settings.get_breakpoint_invalid_reason",
mocker.Mock(return_value=None))
result = load_breakpoints()
expected = [("/home/user/test.py", 41, False, None, None),
("/home/user/test.py", 50, False, None, None)]
assert result == expected
def test_save_breakpoints(mocker):
MockBP = collections.namedtuple("MockBreakpoint", "file line cond")
mock_breakpoints = [MockBP("/home/user/test.py", 41, None),
MockBP("/home/user/test.py", 50, None)]
mocker.patch("pudb.settings.get_breakpoints_file_name",
mocker.Mock(return_value="saved-breakpoints"))
mock_open = mocker.mock_open()
mocker.patch.object(builtins, "open", mock_open)
save_breakpoints(mock_breakpoints)
mock_open.assert_called_with("saved-breakpoints", "w")
| StarcoderdataPython |
5050894 | import scrapy
from scrapy.utils.project import get_project_settings
from acaSpider.items import AcaspiderItem
import logging
import re
import datetime
from acaSpider.proxyDownloader import getProxy
class ACMSpider(scrapy.Spider):
name = "ACM_Spider"
allowed_domains = ["dl.acm.org"]
start_urls = get_project_settings().get('ACM_URL')
def __init__(self):
super(ACMSpider, self).__init__()
self.startPage = 0
self.pageSize = 20
self.startTime = get_project_settings().get('START_TIME')
self.proxyUpdateDelay = get_project_settings().get('PROXY_UPDATE_DELAY')
getProxy().main()
def parse(self, response):
item = AcaspiderItem()
print('爬取第', self.startPage, '页')
results_num = response.xpath('//span[@class="hitsLength"]/text()').extract()[0].replace(',', '')
subjects = response.xpath('//ul[@class="rlist--inline facet__list--applied"]/li/span/text()').extract()[0]
response = response.xpath('//li[@class="search__item issue-item-container"]')
item['title'] = []
item['authors'] = []
item['year'] = []
item['typex'] = []
item['subjects'] = []
item['url'] = []
item['abstract'] = []
item['citation'] = []
for res in response:
try:
item['title'].append(self.remove_html(res.xpath('.//span[@class="hlFld-Title"]/a/text()').extract()[0]))
except:
item['title'].append(' ')
try:
item['authors'].append(self.merge_authors(res.xpath('.//ul[@aria-label="authors"]/li/a/span/text()').extract()))
except:
item['authors'].append(' ')
try:
item['year'].append(self.remove4year(self.remove_html(res.xpath('.//span[@class="dot-separator"]').extract()[0])))
except:
item['year'].append(' ')
try:
item['typex'].append(res.xpath('.//span[@class="epub-section__title"]/text()').extract()[0])
except:
item['typex'].append(' ')
try:
item['url'].append(res.xpath('.//a[@class="issue-item__doi dot-separator"]/text()').extract()[0])
except:
item['url'].append(' ')
try:
item['abstract'].append(self.remove_html(res.xpath('.//div[contains(@class, "issue-item__abstract")]/p').extract()[0]))
except:
item['abstract'].append(' ')
try:
item['citation'].append(res.xpath('.//span[@class="citation"]/span/text()').extract()[0])
except:
item['citation'].append(' ')
item['subjects'].append(subjects)
yield item
logging.warning('$ ACM_Spider已爬取:' + str((self.startPage + 1) * self.pageSize))
if (datetime.datetime.now() - self.startTime).seconds > self.proxyUpdateDelay:
getProxy().main()
print('已爬取:', (self.startPage + 1) * self.pageSize)
logging.warning('$ ACM_Spider runs getProxy')
if (self.startPage + 1) * self.pageSize < int(results_num) and self.startPage < 1:
self.startPage += 1
next_url = self.start_urls[0] + '&startPage=' + str(self.startPage) + '&pageSize=' + str(self.pageSize)
yield scrapy.Request(
next_url,
callback=self.parse,
)
def remove_html(self, string):
pattern = re.compile(r'<[^>]+>')
return (re.sub(pattern, '', string).replace('\n', '').replace(' ', '')).strip()
def remove4year(self, string):
return string.split(', ')[0]
def merge_authors(self, au_list):
au_str = ''
for i in au_list:
au_str += i + ','
return au_str.strip(',')
'''
def parse(self, response):
item = AcaspiderItem()
print('爬取第', self.startPage, '页')
results_num = response.xpath('//span[@class="hitsLength"]/text()').extract()[0].replace(',', '')
item['title'] = list(map(self.remove_html, response.xpath('//span[@class="hlFld-Title"]/a/text()').extract()))
item['authors'] = list(map(self.remove_html, response.xpath('//ul[@aria-label="authors"]').extract()))
item['year'] = list(map(self.remove4year, list(map(self.remove_html, response.xpath('//span[@class="dot-separator"]').extract()))))
item['typex'] = response.xpath('//span[@class="epub-section__title"]/text()').extract()
item['subjects'] = response.xpath('//ul[@class="rlist--inline facet__list--applied"]/li/span/text()').extract() * len(item['title'])
item['url'] = response.xpath('//a[@class="issue-item__doi dot-separator"]/text()').extract()
item['abstract'] = list(map(self.remove_html, response.xpath('//div[@class="issue-item__abstract truncate-text trunc-done"]/p').extract()))
item['citation'] = response.xpath('//span[@class="citation"]/span/text()').extract() # 动态变化
yield item
logging.warning('$ ACM_Spider已爬取:' + str((self.startPage + 1) * self.pageSize))
if (datetime.datetime.now() - self.startTime).seconds > self.proxyUpdateDelay:
getProxy().main()
print('已爬取:', (self.startPage + 1) * self.pageSize)
logging.warning('$ ACM_Spider runs getProxy')
if (self.startPage + 1) * self.pageSize < int(results_num) and self.startPage < 1:
self.startPage += 1
next_url = self.start_urls[0] + '&startPage=' + str(self.startPage) + '&pageSize=' + str(self.pageSize)
yield scrapy.Request(
next_url,
callback=self.parse,
)
'''
| StarcoderdataPython |
6429388 | <filename>projecteuler/6.py
#project euler problem 6
#author : itsjaysuthar
def sq(n):
return n * n
sum1 = 0
sum2 = 0
for i in range(1, 101):
sum2 = sum2 + sq(i)
for i in range(1, 101):
sum1 = sum1 + i
print(sq(sum1) - sum2)
| StarcoderdataPython |
6613772 |
import os
from flask import redirect
from get_ip import get_ip
from scan import load_services, check_local_services, check_network_machines
from dbmodel import Users, Properties, Services, app, db
from components.base.content_endpoints import debug, root, logout, login, scan_network, get_machines_service
from components.base.content_endpoints import manage_page_users, manage_page_services, loged_user
from components.base.content_endpoints import get_user_access, get_services, get_data, init_db, delete_entry
from components.base.content_endpoints import edit_entry, delete_entry
# from components.temp_monitor.api import get_temps, measure, clean, temp_api
@app.route('/debug')
def route_debug():
return debug()
@app.route('/')
def route_root():
return root()
@app.route('/scan/<string:username>')
@app.route('/scan')
def route_scan(username='guest'):
return scan_network(username)
@app.route('/services')
def return_active_services():
return check_local_services(db)
@app.route('/login', methods=['GET', 'POST'])
@app.route('/register', methods=['GET', 'POST'])
def route_login():
return login()
@app.route('/logout')
def route_logout():
return logout()
#TODO Check user access
@app.route('/<string:service>')
def redirect_service(service:str):
return get_machines_service(service)
@app.route('/manage', methods=['GET', 'POST'])
@app.route('/manage/users' , methods=['GET', 'POST'])
def route_manage_users():
return manage_page_users()
@app.route('/manage/services', methods=['GET', 'POST'])
def route_manage_sevices():
return manage_page_services()
@app.route('/delete/<string:table>/<int:id>', methods=['GET', 'POST'])
def route_delete_entry(table, id):
return delete_entry(table, id)
@app.route('/edit/<string:table>/<int:id>', methods=['GET', 'POST'])
def route_edit_entry(table, id):
return edit_entry(table, id)
#TODO see how to rediect port 9998 to this route
## Temp Monitor
# @app.route('/temps')
# def route_temps():
# return get_temps()
# @app.route('/measure')
# def route_measure():
# return measure()
# @app.route('/clean')
# def route_temps():
# return temps()
if __name__ == '__main__':
db.create_all()
init_db()
os.popen("sass static/scss/style.scss:static/css/style.css")
session = {}
app.run(debug=True,host=get_ip(), port=2357) | StarcoderdataPython |
3456946 | <reponame>idrisr/chessocrnb
from setuptools import setup, find_packages
setup(
name='chessocr',
version='0.0.1',
description='chess board finder',
author='idrisr',
author_email='<EMAIL>',
keywords=['chess', 'ocr'],
# entry_points={'console_scripts': ['kaggle = kaggle.cli:main']},
install_requires=[
'fastai',
],
packages=find_packages(),
license='Apache 2.0')
| StarcoderdataPython |
9776753 | <reponame>Kartones/PythonAssorted
from doublex import *
from expects import *
from doublex_expects import *
from player import *
# for constants only
from character import *
with description("Player"):
with before.each:
with Stub() as self.position:
self.position.current_position().returns(0)
with Stub() as self.character:
self.character.level = 1
self.character.position = self.position
self.character.attack_range().returns(2)
self.player = Player(character=self.character)
with context("starting elements"):
with it("has a starting character"):
expect(self.player.character).not_to(equal(None))
with it("has no starting factions"):
expect(self.player.factions).to(equal([]))
with context("actions"):
with it("can heal himself"):
character = Spy()
player = Player(character=character)
player.heal(target=player, amount=100)
expect(character.heal).to(have_been_called_with(100))
with it("can receive damage"):
character = Spy()
player = Player(character=character)
player.receive_damage(100)
expect(character.receive_damage).to(have_been_called_with(100))
with it("can damage other player"):
with Stub() as another_player_position:
another_player_position.current_position().returns(0)
with Spy() as another_player_character:
another_player_character.level = 1
another_player_character.position = another_player_position
another_player = Player(character=another_player_character)
self.player.attack(another_player, 200)
expect(another_player_character.receive_damage).to(have_been_called_with(200))
with it("can join factions"):
self.player.join_faction("a_faction")
expect(self.player.factions).to(equal(["a_faction"]))
self.player.join_faction("another_faction")
expect(self.player.factions).to(equal(["a_faction", "another_faction"]))
with it("can leave factions"):
self.player.join_faction("a_faction")
self.player.join_faction("another_faction")
self.player.leave_faction("a_faction")
expect(self.player.factions).to(equal(["another_faction"]))
with it("can move to another position"):
position = Spy()
with Stub() as character:
character.position = position
player = Player(character=character)
player.move(5)
expect(position.move).to(have_been_called_with(5))
with context("alliances"):
with it("is not ally of a player without faction"):
self.player.join_faction("a_faction")
another_player = Player(character=Stub())
expect(self.player.is_ally_of(another_player)).to(be_false)
with it("is ally with a player of same faction"):
self.player.join_faction("a_faction")
another_player = Player(character=Stub())
another_player.join_faction("a_faction")
expect(self.player.is_ally_of(another_player)).to(be_true)
with it("is not ally with a player of different faction"):
self.player.join_faction("a_faction")
another_player = Player(character=Stub())
another_player.join_faction("another_faction")
expect(self.player.is_ally_of(another_player)).to(be_false)
with context("healing restrictions"):
with it("can heal an ally"):
self.player.join_faction("a_faction")
another_player_character = Spy()
another_player = Player(character=another_player_character)
another_player.join_faction("a_faction")
self.player.heal(target=another_player, amount=100)
expect(another_player_character.heal).to(have_been_called_with(100))
with it("cannot heal a non-ally"):
self.player.join_faction("a_faction")
another_player_character = Spy()
another_player = Player(character=another_player_character)
another_player.join_faction("another_faction")
self.player.heal(target=another_player, amount=100)
expect(another_player_character.heal).not_to(have_been_called)
with context("damage restrictions and modifiers"):
with it("cannot damage himself"):
character = Spy()
player = Player(character=character)
player.attack(player, 100)
expect(character.receive_damage).not_to(have_been_called)
with it("If the target is 5 or more levels above the player, the damage applied will be reduced by 50%"):
with Stub() as another_player_position:
another_player_position.current_position().returns(0)
with Spy() as another_player_character:
another_player_character.level = 6
another_player_character.position = another_player_position
another_player = Player(character=another_player_character)
self.player.attack(another_player, 100)
expect(another_player_character.receive_damage).to(have_been_called_with(50))
with it("If the target is 5 or more levels below the player, the damage applied will be boosted by 50%"):
with Stub() as position:
position.current_position().returns(0)
with Stub() as character:
character.level = 6
character.position = position
character.attack_range().returns(2)
player = Player(character=character)
with Stub() as another_player_position:
another_player_position.current_position().returns(0)
with Spy() as another_player_character:
another_player_character.level = 1
another_player_character.position = another_player_position
another_player = Player(character=another_player_character)
player.attack(another_player, 100)
expect(another_player_character.receive_damage).to(have_been_called_with(150))
with it("doesn't does damage if target is not in range"):
with Stub() as position:
position.current_position().returns(0)
with Stub() as character:
character.level = 1
character.position = position
character.attack_range().returns(2)
player = Player(character=character)
with Stub() as another_player_position:
another_player_position.current_position().returns(5)
with Spy() as another_player_character:
another_player_character.level = 1
another_player_character.position = another_player_position
another_player = Player(character=another_player_character)
player.attack(another_player, 100)
expect(another_player_character.receive_damage).not_to(have_been_called)
with it("doesn't does damage to faction allies"):
self.player.join_faction("a_faction")
with Stub() as another_player_position:
another_player_position.current_position().returns(0)
with Spy() as another_player_character:
another_player_character.level = 1
another_player_character.position = another_player_position
another_player = Player(character=another_player_character)
another_player.join_faction("a_faction")
self.player.attack(another_player, 200)
expect(another_player_character.receive_damage).not_to(have_been_called)
with it("does does damage to other faction players"):
self.player.join_faction("a_faction")
with Stub() as another_player_position:
another_player_position.current_position().returns(0)
with Spy() as another_player_character:
another_player_character.level = 1
another_player_character.position = another_player_position
another_player = Player(character=another_player_character)
another_player.join_faction("another_faction")
self.player.attack(another_player, 200)
expect(another_player_character.receive_damage).to(have_been_called_with(200))
| StarcoderdataPython |
3280190 | <reponame>mathieurodic/hamsterdb
#
# Copyright (C) 2005-2015 <NAME> (<EMAIL>).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
# set the library path, otherwise hamsterdb.so/.dll is not found
import os
import sys
import distutils.util
p = distutils.util.get_platform()
ps = ".%s-%s" % (p, sys.version[0:3])
sys.path.insert(0, os.path.join('build', 'lib' + ps))
sys.path.insert(1, os.path.join('..', 'build', 'lib' + ps))
import hamsterdb
class EnvironmentTestCase(unittest.TestCase):
def remove_file(self, fname):
if os.path.isfile(fname):
os.remove(fname)
def testCreate(self):
env = hamsterdb.env()
self.remove_file("test.db")
env.create("test.db")
env.close()
assert(os.path.isfile("test.db"))
env.create("test.db", 0)
env.close()
env.create("test.db", 0, 0644)
env.close()
assert(os.path.isfile("test.db"))
def testCreateExtended(self):
env = hamsterdb.env()
env.create("test.db", 0, 0644, \
((hamsterdb.HAM_PARAM_CACHESIZE, 20), (0, 0)))
env.close()
def testCreateExtendedNegative(self):
self.remove_file("test.db")
env = hamsterdb.env()
try:
env.create("test.db", 0, 0644, ((1, 2, 3)))
except TypeError:
pass
try:
env.create("test.db", 0, 0644, (1, 2, 3))
except TypeError:
pass
try:
env.create("test.db", 0, 0644, (("1", 2)))
except TypeError:
pass
try:
env.create("test.db", 0, 0644, ((1, None)))
except TypeError:
pass
try:
env.create("test.db", 0, 0644, ((1, "None")))
except TypeError:
pass
def testCreateInMemory(self):
self.remove_file("test.db")
env = hamsterdb.env()
env.create("", hamsterdb.HAM_IN_MEMORY)
env.close()
env.create(None, hamsterdb.HAM_IN_MEMORY)
env.close()
assert(os.path.isfile("test.db") == False)
def testCreateNegative(self):
env = hamsterdb.env()
try:
env.create("test.db", 0, 0644, "asdf")
except TypeError:
pass
try:
env.create("test.db", 9999)
except hamsterdb.error, (errno, strerror):
assert hamsterdb.HAM_INV_PARAMETER == errno
def testOpenNegative(self):
self.remove_file("test.db")
env = hamsterdb.env()
try:
env.open("test.db", 0, "asdf")
except TypeError:
pass
try:
env.open("test.db", hamsterdb.HAM_IN_MEMORY)
except hamsterdb.error, (errno, strerror):
assert hamsterdb.HAM_INV_PARAMETER == errno
def testOpenExtended(self):
env = hamsterdb.env()
# TODO if i remove (0,0), a TypeError exception is thrown
try:
env.open("test.db", 0, \
((hamsterdb.HAM_PARAM_CACHESIZE, 20), (0, 0)))
env.close()
except hamsterdb.error, (errno, strerror):
assert hamsterdb.HAM_FILE_NOT_FOUND == errno
def testOpenExtendedNegative(self):
env = hamsterdb.env()
try:
env.open("test.db", 0, ((1, 2, 3)))
except TypeError:
pass
try:
env.open("test.db", 0, (1, 2, 3))
except TypeError:
pass
try:
env.open("test.db", 0, (("1", 2)))
except TypeError:
pass
try:
env.open("test.db", 0, ((1, None)))
except TypeError:
pass
try:
env.open("test.db", 0, ((1, "None")))
except TypeError:
pass
def testCreateDb(self):
env = hamsterdb.env()
env.create("test.db")
db = env.create_db(3)
db.close()
db = env.open_db(3)
db.close()
db = env.create_db(4)
db.close()
db = env.open_db(4)
db.close()
db = env.create_db(5)
db.close()
db = env.open_db(5)
db.close()
env.close()
def testCreateDbParam(self):
env = hamsterdb.env()
env.create("test.db")
db = env.create_db(3, hamsterdb.HAM_RECORD_NUMBER64)
db.close()
db = env.open_db(3)
db.close()
db = env.create_db(4, 0, ((hamsterdb.HAM_PARAM_KEYSIZE, 20), (0,0)))
db.close()
db = env.open_db(4)
db.close()
env.close()
def testCreateDbNestedClose(self):
env = hamsterdb.env()
env.create("test.db")
db = env.create_db(3)
env.close()
db.close()
def testCreateDbNegative(self):
env = hamsterdb.env()
env.create("test.db")
try:
db = env.create_db(0)
db.close()
except hamsterdb.error, (errno, message):
assert hamsterdb.HAM_INV_PARAMETER == errno
try:
db = env.create_db()
db.close()
except TypeError:
pass
env.close()
def testOpenDbNegative(self):
env = hamsterdb.env()
env.create("test.db")
db = env.create_db(1)
db.close()
try:
db = env.open_db(5)
except hamsterdb.error, (errno, message):
assert hamsterdb.HAM_DATABASE_NOT_FOUND == errno
try:
db = env.open_db()
db.close()
except TypeError:
pass
env.close()
def testRenameDb(self):
env = hamsterdb.env()
env.create("test.db")
db = env.create_db(1)
db.close()
env.rename_db(1, 2)
db = env.open_db(2)
db.close()
env.close()
def testRenameDbNegative(self):
env = hamsterdb.env()
env.create("test.db")
try:
env.rename_db(1, 2)
except hamsterdb.error, (errno, message):
assert hamsterdb.HAM_DATABASE_NOT_FOUND == errno
try:
env.rename_db(1, 2, 3)
except TypeError:
pass
try:
env.rename_db()
except TypeError:
pass
env.close()
def testEraseDb(self):
env = hamsterdb.env()
env.create("test.db")
db = env.create_db(1)
db.close()
env.erase_db(1)
try:
db = env.open_db(1)
except hamsterdb.error, (errno, message):
assert hamsterdb.HAM_DATABASE_NOT_FOUND == errno
env.close()
def testEraseDbNegative(self):
env = hamsterdb.env()
env.create("test.db")
try:
env.erase_db(1)
except hamsterdb.error, (errno, message):
assert hamsterdb.HAM_DATABASE_NOT_FOUND == errno
try:
env.erase_db()
except TypeError:
pass
try:
env.erase_db(3,4,5)
except TypeError:
pass
env.close()
def testGetDatabaseNames(self):
env = hamsterdb.env()
env.create("test.db")
n = env.get_database_names()
assert n == ()
db = env.create_db(1)
db.close()
n = env.get_database_names()
assert n == (1,)
db = env.create_db(2)
db.close()
n = env.get_database_names()
assert n == (1, 2,)
db = env.create_db(3)
db.close()
n = env.get_database_names()
assert n == (1, 2, 3,)
env.close()
def testGetDatabaseNamesNegative(self):
env = hamsterdb.env()
env.create("test.db")
try:
n = env.get_database_names(4)
except TypeError:
pass
env.close()
def testFlush(self):
env = hamsterdb.env()
env.create("test.db")
env.flush()
unittest.main()
| StarcoderdataPython |
11251756 | <filename>translate.py<gh_stars>0
#!/usr/bin/env python3
try:
import argparse
import sys
import pandas as pd
except ImportError as e:
sys.exit("Error: " + str(e) + "\nPlease install this module and retry.\n")
'''Basic setup stuff
Take 2 arguments:
infile : Input CSV file to be parsed
outfile : Output CSV file to be generated'''
parser = argparse.ArgumentParser()
parser.add_argument("--infile", help="Input csv file to be parsed", type=str)
parser.add_argument("--outfile", help="Output csv file to be generated", type=str)
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
'''The data that will be extracted has to be inserted into the output file at the end of this:
"## Costs"
"CostTitle","Date","Odo","CostTypeID","Notes","Cost","flag","idR","read","RemindOdo","RemindDate","isTemplate","RepeatOdo","RepeatMonths","isIncome","UniqueId"
"Fastag","2020-12-10 11:07","0","7","","35.0","0","0","1","0","2011-01-01","0","0","0","0","239"
"Fastag","2020-12-11 12:30","0","7","","75.0","0","0","1","0","2011-01-01","0","0","0","0","240"
"Car Wash","2020-11-30 09:30","20","4","","0.0","0","0","1","0","2011-01-01","0","0","0","0","241"
"## FavStations"
'''
# Read and store file contents for future insertion
outfile_text = []
with open(outfile, 'r') as f:
for line in f:
outfile_text.append(line)
# Column default values
cost_title = "Fastag"
odo = 0
cost_type_id = 7
flag = 0
idr = 0
read = 1
remind_odo = 0
remind_date = '2011-01-01'
is_template = 0
repeat_odo = 0
repeat_months = 0
is_income = 0
# We need to get the value of UniqueId to continue in sequence
start_id = int(outfile_text[outfile_text.index('"## FavStations"\n') - 1][-5:-2]) + 1
'''The input file is expected to be of type:
"Date","Activity","Source/Destination","Wallet Txn ID","Comment","Debit","Credit","Transaction Breakup","Status"
"25/12/2021 23:20:47","Paid for order","Toll Fastag Order #892115852","38071755187","","100","","","SUCCESS"
"25/12/2021 17:29:46","Paid for order","Toll Fastag Order #891524834","38064372089","","200","","","SUCCESS"
We're interested in the columns: Date, Source/Destination, Wallet Txn ID, and Debit'''
try:
df = pd.read_csv(infile, usecols = ['Date', 'Source/Destination', 'Wallet Txn ID', 'Debit'])
except FileNotFoundError as e:
print("Error: Infile not found at location specified")
sys.exit(e)
'''
df is now a Pandas dataframe that needs the following massaging:
Date column must be stripped of seconds and converted to timestamp
Source/Destination and Wallet Txn ID should be merged into a single column titled Notes
Debit column must be renamed to Cost'''
# Remove seconds and convert everything to timezone format
df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%Y %H:%M:%S')
df['Date'] = df['Date'].dt.strftime('%Y-%m-%d %H:%M')
# Concatenate columns and remove old ones
df["Notes"] = df["Source/Destination"].map(str) + " " + df["Wallet Txn ID"].map(str)
df.drop(columns=['Source/Destination', 'Wallet Txn ID'], inplace=True)
# Rename Debit to Cost
df = df.rename({'Debit':'Cost'}, axis='columns')
'''The output file is expected to be of type:
"CostTitle","Date" ,"Odo","CostTypeID","Notes" ,"Cost" ,"flag","idR","read","RemindOdo","RemindDate","isTemplate","RepeatOdo","RepeatMonths","isIncome","UniqueId"
"Fastag" ,"2021-12-25 23:20","0" ,"7" ,"Toll Fastag Order #892115852 38071755187","100.0","0" ,"0" ,"1" ,"0" ,"2011-01-01","0" ,"0" ,"0" ,"0" ,"29"
"Fastag" ,"2021-12-25 17:29","0" ,"7" ,"Toll Fastag Order #891524834 38064372089","200.0","0" ,"0" ,"1" ,"0" ,"2011-01-01","0" ,"0" ,"0" ,"0" ,"29"
Add the required columns
'''
# Move Notes to the right location
df.insert(loc=1, column='Notes', value=df.pop('Notes'))
# Insert columns with the values
df.insert(loc=0, column='CostTitle', value=cost_title)
df.insert(loc=2, column='Odo', value=odo)
df.insert(loc=3, column='CostTypeID', value=cost_type_id)
df.insert(loc=6, column='flag', value=flag)
df.insert(loc=7, column='idR', value=idr)
df.insert(loc=8, column='read', value=read)
df.insert(loc=9, column='RemindOdo', value=remind_odo)
df['RemindDate'] = pd.to_datetime(remind_date)
df.insert(loc=11, column='isTemplate', value=is_template)
df.insert(loc=12, column='RepeatOdo', value=repeat_odo)
df.insert(loc=13, column='RepeatMonths', value=repeat_months)
df.insert(loc=14, column='isIncome', value=is_income)
df.insert(loc=15, column='UniqueId', value=range(start_id, start_id + len(df)))
# Sort df by date
df = df.sort_values(by='Date')
# Create a list out of the dataframe
data = df.astype(str).values.flatten().tolist()
# List needs to be formatted with "", commas and newlines appropriately
newlines = [val for val in range(15, len(data), 16)]
for index in range(len(data)):
data[index] = '"' + data[index] + '"'
if index in newlines:
data[index] += '\n'
else:
data[index] += ','
# Now insert this into original text
outfile_text[outfile_text.index('"## FavStations"\n'):outfile_text.index('"## FavStations"\n')] = data
# Write to file
try:
with open(outfile, "w") as f:
f.write(''.join([val for val in outfile_text]))
except Exception as e:
sys.exit(e)
| StarcoderdataPython |
231460 | <reponame>skepickle/roll20-discord-bot
#import time
#import shutil
#import re
import os
import sys
import getopt
import discord
from discord.ext import commands
import asyncio
import roll20bridge
#import roll20sheet
import json
if __name__ != "__main__":
print("ERROR: bot.py must be executed as the top-level code.")
sys.exit(1)
# Options parsing
# TODO These options and configurations need to move into a manager class at some point
discord_token = None
handout_url = None
handout_key = None
chrome_path = None
config = {
'command_prefix': '!',
'global_bot_admins': [],
'guilds': {}
}
"""
config = {
'command_prefix': char,
'global_bot_admins': [ str ],
'guilds': {
'__str:server_id__': {
'name': str,
'adminRole': str,
'gamemasterRole': str,
'playerRole': str,
'bridgeURL': str,
'bridgeKey': str,
'bridgeTimestamp': time and date,
'characters': Roll20Character[]
}
},
'players': {
'__str:user_id__': {
'guilds': [],
'characters': []
}
}
}
"""
if ('DISCORD_TOKEN' in os.environ):
discord_token = os.environ['DISCORD_TOKEN']
if ('CHROMEDRIVER_PATH' in os.environ):
chrome_path = os.environ['CHROMEDRIVER_PATH']
# TODO The following settings will be moved from ENVIRONMENT variables to stored(db?) configurations
if ('GLOBAL_BOT_ADMINS' in os.environ):
config['global_bot_admins'] = os.environ['GLOBAL_BOT_ADMINS'].split(':')
if ('ROLL20_JOURNAL' in os.environ):
handout_url = os.environ['ROLL20_JOURNAL']
if ('ROLL20_KEY' in os.environ):
handout_key = os.environ['ROLL20_KEY']
try:
opts, args = getopt.getopt(sys.argv[1:], "ht:c:", ["token=", "chrome="])
except getopt.GetoptError:
print('bot.py -t <Discord Token> -c <ChromeDriver Path>')
sys.exit(1)
for opt, arg in opts:
if opt == "-h":
print('bot.py -t <Discord Token> -c <ChromeDriver Path>')
sys.exit(1)
elif opt in ("-t", "--token"):
discord_token = arg
elif opt in ("-c", "--chrome"):
chrome_path = arg
bot = commands.Bot(command_prefix=config['command_prefix'], description="Roll20Bot provides access to select character sheets in Roll20 games", pm_help=True)
#print(bot.__dict__)
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
for guild in bot.guilds:
print(" "+guild.name+", "+str(guild.id))
config['guilds'][guild.id] = {
'name': guild.name
}
print('------')
@bot.event
async def on_guild_join(guild):
if guild.id not in config['guilds']:
#await ctx.channel.send(guild.id + " not in current guilds list")
config['guilds'][guild.id] = {
'name': guild.name,
'adminsRole': '',
'usersRole': ''
}
return
@bot.event
async def on_guild_remove(guild):
if guild.id in config['guilds']:
#await ctx.channel.send(guild.id + " in current guilds list")
config['guilds'].pop(guild.id, None)
return
@bot.event
async def on_message(message):
if message.author.bot:
return
#if not message.content.startswith(config['command_prefix']):
# return
#await bot.send_message(message.channel, 'Entering on_message()')
#if (not message.content.startswith('!abc') and
# not message.content.startswith('!def')):
# await bot.send_message(message.channel, 'Not a command for me!')
#if message.content.startswith('!test'):
# #env_str =os.environ
# await bot.send_message(message.channel, 'Test Command from {}'.format(message.author))
# counter = 0
# tmp = await bot.send_message(message.channel, 'Calculating messages...')
# async for log in bot.logs_from(message.channel, limit=100):
# if log.author == message.author:
# counter += 1
# await bot.edit_message(tmp, 'You have {} messages.\n{}'.format(counter, os.environ))
# return
#elif message.content.startswith('!json'):
# tmp = await bot.send_message(message.channel, 'Retrieving Roll20 JSON...')
# #varJSON = json.loads(utf8_decode(xor_decrypt(handout_key,b64_decode(get_roll20_json()))))
# varJSON = Roll20BridgeDecoder.load_handout(chrome_path, handout_url, handout_key)
# await bot.edit_message(tmp, 'The roll20 handout json = {}'.format(json.dumps(varJSON, indent=2, sort_keys=True))[0:2000])
#elif message.content.startswith('!sleep'):
# await asyncio.sleep(5)
# await bot.send_message(message.channel, 'Done sleeping')
await bot.process_commands(message)
@bot.command(name='characters')
async def _discordbot_characters(ctx):
pass
@bot.command(name='sleep')
async def _discordbot_sleep(ctx):
await asyncio.sleep(1)
await ctx.channel.send('Done sleeping')
@bot.command(name='json')
async def _discordbot_json(ctx):
tmp = await ctx.channel.send('Retrieving Roll20 JSON {} ...'.format(handout_url))
varJSON = roll20bridge.load_handout(chrome_path, handout_url, handout_key)
if varJSON == None:
await tmp.edit(content='Could not load Roll20 bridge handout at {}'.format(handout_url))
return
#await ctx.channel.send('The roll20 handout json = {}'.format(json.dumps(varJSON, indent=2, sort_keys=True))[0:2000])
#await bot.edit_message(tmp, '**Roll20 bridge handout loaded:**\n{}'.format(json.dumps(varJSON, indent=2, sort_keys=True))[0:2000])
await tmp.edit(content='**attributes:**\n{}'.format(', '.join(varJSON['siliceous#5311']['Chirk Chorster']['attributes'].keys()))[0:2000])
####################
# Global Bot Administration
####################
# Global bot admins are defined at deployment-time of the bot, and cannot be modified live.
def is_global_bot_admin(ctx):
return str(ctx.message.author) in config['global_bot_admins']
@bot.group(name='global', hidden=True, description='The global group of commands allow for administration of Roll20Bot globally')
async def _discordbot_global(ctx):
if ctx.guild != None:
await ctx.channel.send('The **global** configuration command-group must be initiated from a private-message, not a guild channel.')
if not is_global_bot_admin(ctx):
return
@_discordbot_global.command(name='test', description='DESCRIPTION BLAH BLAH', brief='print env vars', help='Print out server-side environment variables')
async def _discordbot_global_test(ctx, arg_1='1', arg_2='2'):
if ctx.guild != None:
return
if not is_global_bot_admin(ctx):
return
counter = 0
tmp = await ctx.channel.send('Calculating messages...')
#async for log in bot.logs_from(ctx.message.channel, limit=100):
# if log.author == ctx.message.author:
# counter += 1
await tmp.edit(content='{}'.format(os.environ))
@_discordbot_global.command(name='guilds', brief='List guilds using this bot', description='List guilds that are currently have Roll20Bot added.', help='This command does not accept any arguments.')
async def _discordbot_global_guilds(ctx):
if ctx.guild != None:
return
if not is_global_bot_admin(ctx):
return
s = ''
if len(config['guilds']) == 0:
s = 'There are no Discord guilds configured.'
else:
s = "The following Discord guilds are configured:\n"
for key, value in config['guilds'].items():
s += " " + str(key) + " => " + value['name'] + "\n"
await ctx.channel.send(s)
####################
# Guild Bot Administration
####################
# Guild owners should always be able to modify these configurations
# If a role is defined for administrators, then the members of that role will also be able to modify guild configs
def is_guild_admin(ctx):
if ctx.guild == None:
return False
if ctx.message.author == ctx.guild.owner:
return True
# TODO Also check admin role on guild...
return False
@bot.group(name='guild', hidden=True)
async def _discordbot_guild(ctx):
if ctx.guild == None:
await ctx.channel.send('The **guild** configuration command-group must be initiated from a guild channel, not a private-message.')
if not is_guild_admin(ctx):
return
if ctx.invoked_subcommand is None:
await ctx.channel.send('Print !guild usage here.')
@_discordbot_guild.command(name='bridge')
async def _discordbot_guild_bridge(ctx, url=None, key=None):
if ctx.guild == None:
return
if not is_guild_admin(ctx):
return
if (url == None) and (key == None):
s = 'Current guild bridge configuration:\n'
s += '- url: '
if 'bridgeURL' in config['guilds'][ctx.guild.id]:
s += config['guilds'][ctx.guild.id]['bridgeURL']
else:
s += 'UNDEFINED'
s += '\n- key: '
if 'bridgeKey' in config['guilds'][ctx.guild.id]:
s += config['guilds'][ctx.guild.id]['bridgeKey']
else:
s += 'UNDEFINED'
await ctx.channel.send(s)
return
if (url != None):
config['guilds'][ctx.guild.id]['bridgeURL'] = url
if (key != None):
config['guilds'][ctx.guild.id]['bridgeKey'] = key
####################
# Run the Bot
####################
bot.run(discord_token)
| StarcoderdataPython |
8142554 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.arange,
"""
oneflow.arange(start: int = 0, end, step: int = 1, dtype: Optional[oneflow._oneflow_internal.dtype] = None, device: Optional[Union[oneflow._oneflow_internal.device, str]] = None, placement: Optional[oneflow._oneflow_internal.placement] = None, sbp: Optional[Union[oneflow._oneflow_internal.sbp.sbp, List[oneflow._oneflow_internal.sbp.sbp]]] = None, requires_grad: bool = False)
Returns a 1-D tensor of size :math:`\\left\\lfloor \\frac{\\text{end} - \\text{start}}{\\text{step}} \\right\\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\\text{out}_{i+1} = \\text{out}_i + \\text{step}.
Args:
start (int): the starting value for the set of points. Default: ``0``.
end (int): the ending value for the set of points
step (int): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
dtype(flow.dtype, optional): If `dtype` is not given, infer the `dtype` from the other input arguments. If any of start, end, or step are floating-point, the `dtype` is inferred to be the floating-point data type. Otherwise, the `dtype` is inferred to be `flow.int64`.
device(flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> y = flow.arange(0, 5)
>>> y
tensor([0, 1, 2, 3, 4], dtype=oneflow.int64)
""",
)
| StarcoderdataPython |
6622513 | from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent="wazeyes")
endereco=input("Digite um endereco com número e cidade. ")
resultado = str(geolocator.geocode(endereco)).split(",")
if resultado[0]!='None':
print("Endereço completo.: ", resultado)
print("Bairro............: ", resultado[1])
print("Cidade............: ", resultado[2])
print("Regiao............: ", resultado[3])
| StarcoderdataPython |
11259146 | <gh_stars>1-10
import numpy as np
import pymysql as pms
class oraDataFrame(object):
def __init__(self):
self.db = self.connectSQL()
self.cursor = self.db.cursor()
def connectSQL(self):
db = pms.Connect("localhost", "root", "Zhang715", "BCW")
return db
def createPriorityTable(self, tableName):
sql = "CREATE TABLE {0}(\
ID INT PRIMARY KEY NOT NULL AUTO_INCREMENT,\
STATE_ SMALLINT(5),\
ACTION_ SMALLINT(5),\
REWARD_ SMALLINT(5),\
STATE_NEXT SMALLINT(5),\
PRIORITY FLOAT,\
TIME_STEP INT,\
IDX INT)AUTO_INCREMENT=1".format(tableName)
try:
self.cursor.execute(sql)
self.db.commit()
#print("Already Create TABLE PRIORITY")
except:
print("CANNOT CREATE TABLE PRIORITY")
self.db.rollback()
def insert(self, transition, priority, tableName):
"""
Insert transitions and priority
:param transition: a list of transitions
:param priority: a list of priorities
:param tableName:
:return:
"""
try:
for m, p in zip(transition,priority):
s = int(m[0])
a = int(m[1])
r = int(m[2])
s_ = int(m[3])
T = int(m[4])
idx = int(m[5])
insert = "INSERT INTO %s(\
STATE_,\
ACTION_,\
REWARD_,\
STATE_NEXT,\
PRIORITY,\
TIME_STEP,\
IDX)\
VALUES ('%d','%d','%d','%d','%d','%d','%d')" % (tableName, s, a, r, s_,p,T,idx)
try:
self.cursor.execute(insert)
#print("Already insert {0}".format(m))
except:
print("CANNOT INSERT {0}".format(m))
self.db.rollback()
self.db.commit()
except:
self.db.rollback()
def remove(self, tablename, id):
sql = "DELETE FROM {0} WHERE ID={1}".format(tablename, id)
try:
self.cursor.execute(sql)
self.db.commit()
except:
self.db.rollback()
print("Cannot delete...")
def remove_time_idx(self, tablename, time, idx):
sql = "DELETE FROM {0} WHERE TIME_STEP={1} AND IDX={2}".format(tablename, time, idx)
try:
self.cursor.execute(sql)
self.db.commit()
except:
self.db.rollback()
print("Cannot delete...")
def cover(self, tablename, id, transition, priority):
s = int(transition[0])
a = int(transition[1])
r = int(transition[2])
s_ = int(transition[3])
T = int(transition[4])
idx = int(transition[5])
sql = "UPDATE {0} \
SET STATE_ = {1},\
ACTION_ = {2},\
REWARD_ = {3},\
STATE_NEXT = {4}, \
PRIORITY = {5}, \
TIME_STEP = {6},\
IDX = {7} WHERE ID = {8}".format(tablename, s, a, r, s_, priority, T, idx, id)
try:
self.cursor.execute(sql)
self.db.commit()
# print("Already Update PRIORITY of {0} transition".format(ID))
except:
self.db.rollback()
print("Update Failed!")
def updatePriority(self, priority, ID, tableName):
update = "UPDATE {0} SET PRIORITY = {1} WHERE ID = {2}".format(tableName, priority, ID)
try:
self.cursor.execute(update)
self.db.commit()
#print("Already Update PRIORITY of {0} transition".format(ID))
except:
self.db.rollback()
print("Update Failed!")
def updateTid(self, ID, time, idx, tablename):
update = "UPDATE {0} SET TIME_STEP = {1}, IDX={2} WHERE ID = {3}".format(tablename, time, idx, ID)
try:
self.cursor.execute(update)
self.db.commit()
# print("Already Update PRIORITY of {0} transition".format(ID))
except:
self.db.rollback()
print("Update Failed!")
def sumPriority(self, tablename):
sql = "SELECT SUM(PRIORITY) FROM {0}".format(tablename)
try:
self.cursor.execute(sql)
self.db.commit()
priority_sum = self.cursor.fetchone()
return priority_sum[0]
except:
self.db.rollback()
def maxPriority(self, tablename):
sql = "SELECT MAX(PRIORITY) FROM {0}".format(tablename)
try:
self.cursor.execute(sql)
self.db.commit()
priority_max = self.cursor.fetchone()
return priority_max[0]
except:
self.db.rollback()
def get_all_priority(self, tablename):
sql = "SELECT ID, PRIORITY FROM {0}".format(tablename)
try:
self.cursor.execute(sql)
self.db.commit()
priorities = self.cursor.fetchall()
return priorities
except:
self.db.rollback()
def get_row_number(self, tablename):
sql = "SELECT COUNT(*) FROM {0}".format(tablename)
try:
self.cursor.execute(sql)
self.db.commit()
rows = self.cursor.fetchone()
return rows[0]
except:
self.db.rollback()
def extract_transition(self, tablename, id):
sql = "SELECT STATE_, ACTION_, REWARD_, STATE_NEXT, TIME_STEP, IDX FROM {0} WHERE ID = {1}".format(tablename, id)
try:
self.cursor.execute(sql)
self.db.commit()
transition = self.cursor.fetchall()
return transition[0]
except:
self.db.rollback()
def min_idx(self, tablename):
sql = "SELECT MIN(ID) FROM {0}".format(tablename)
try:
self.cursor.execute(sql)
self.db.commit()
min_idx = self.cursor.fetchone()
return min_idx[0]
except:
self.db.rollback()
class easySumTree(object):
def __init__(self):
self.tableName = 'PRIORITY'
self.db = oraDataFrame()
self.create_data_frame()
self.capacity = 0
self.tree = None
self.idframe = None
def create_data_frame(self):
self.db.createPriorityTable(self.tableName)
def add(self, p, transition, id=None):
"""
:param p:
:param transition:
:param id: when the memory is full, the new income transition will be cover the old transition starting from
first row
:return:
"""
if id is None:
self.db.insert(transition=transition, priority=p, tableName=self.tableName)
else:
self.db.cover(transition=transition,priority=p,id=id, tablename=self.tableName)
def remove(self, id):
self.db.remove(self.tableName, id)
def remove_tid(self, time, idx):
self.db.remove_time_idx(self.tableName,time,idx)
def update(self, id, priority):
self.db.updatePriority(priority=priority, ID=id, tableName=self.tableName)
def update_tid(self, id, time, idx):
self.db.updateTid(id, time, idx, self.tableName)
def max_priority(self):
return self.db.maxPriority(self.tableName)
def construct_tree(self):
self.capacity = self.db.get_row_number(self.tableName)
self.idframe = np.zeros(self.capacity, dtype=object)
self.tree = np.zeros(2 * self.capacity - 1)
priorities = self.db.get_all_priority(self.tableName)
for i in range(self.capacity):
id, p = priorities[i]
self.idframe[i] = id
tree_idx = i + self.capacity - 1
self.update_tree(tree_idx, p)
def update_tree(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
id_idx = leaf_idx - self.capacity + 1
id = self.idframe[id_idx]
transition = self.db.extract_transition(self.tableName, id)
return id, self.tree[leaf_idx], transition
def total_p(self):
if self.tree is not None:
return self.tree[0]
else:
print("Tree is not built...")
return False
def clean_tree(self):
self.tree = None
self.idframe = None
self.capacity = None
class pER_Memory(object):
def __init__(self, max_capacity):
self.max_capacity = max_capacity
self.capacity = 0
self.tree = easySumTree()
self.epsilon = 0.01 # small amount to avoid zero priority
self.alpha = 0.6 # [0~1] convert the importance of TD error to priority
self.beta = 0.4 # importance-sampling, from initial value increasing to 1
self.beta_increment_per_sampling = 0.001
self.abs_err_upper = 1. # clipped abs error
self.data_pointer = 0
def store(self, transition):
max_p = self.tree.max_priority()
if max_p is None:
max_p = self.abs_err_upper
if self.capacity < self.max_capacity:
self.tree.add([max_p], [transition]) # set the max p for new p
self.data_pointer += 1
self.capacity += 1
else:
# overlap the old transitions
self.tree.db.cover(self.tree.tableName, self.data_pointer, transition, max_p)
self.data_pointer += 1
if self.data_pointer >= self.max_capacity:
self.data_pointer = self.tree.db.min_idx(self.tree.tableName)
def sample(self, n):
# First, construct the sum tree
self.tree.construct_tree()
# Initial batch index, batch memory, IS weights
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), np.empty((n, 4)), np.empty((n, 1))
pri_seg = self.tree.total_p() / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p() # for later calculate ISweight
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p()
ISWeights[i, 0] = np.power(prob / min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
# clean the tree
self.tree.clean_tree()
return b_idx, b_memory, ISWeights
def batch_update(self, id, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(id, ps):
self.tree.update(ti, p)
def enlarge(self, k):
self.max_capacity += k
self.data_pointer = self.capacity # reset the data pointer back
def shrink(self, k, removal_id):
for id in removal_id:
self.tree.remove(id)
self.max_capacity -= k
self.capacity -= k
self.data_pointer -= k
if __name__ == '__main__':
def gen_t(k):
transitions = []
priorities = []
if k == 0:
transition = np.hstack((0, 0, 0, 0))
transitions.append(transition)
priorities.append(k)
for i in range(k):
s = 1 + i
a = 2 + i
r = 3 + i
s_ = 4 + i
transition = np.hstack((s, a, r, s_))
transitions.append(transition)
priorities.append(i)
return transitions, priorities
#oraDataFrame -- all pass
#db = oraDataFrame()
#db.createPriorityTable('test')
"""
transition,ps = gen_t(20)
db.insertMemory(transition, ps,'test')
print(db.sumPriority('test'))
priorities = db.get_all_priority('test')
ID, p = priorities[0]
print(len(priorities))
print(ID)
print(p)
print(db.get_capacity('test'))
#db.updatePriority(6,2,'test')
# print()
"""
"""
transition, ps = gen_t(20)
st = easySumTree()
st.add(ps, transition)
st.construct_tree()
print(st.get_leaf(5))
st.clean_tree()
for i in range(4):
st.remove(i)
st.construct_tree()
print(st.get_leaf(5))
idx, batch_memory, transition = st.get_leaf(3)
print()
"""
"""
-- Pass
db = oraDataFrame()
t = db.extract_transition('PRIORITY', 3)
print()
"""
"""
transition, ps = gen_t(2)
db = oraDataFrame()
#db.remove('PRIORITY', 1)
db.insert(transition,ps,'PRIORITY')
"""
"""
db = oraDataFrame()
#db.createPriorityTable('test')
#print(db.maxPriority('test'))
transition, ps = gen_t(0)
db.insert(transition, ps, 'test')
"""
"""
db = oraDataFrame()
#transition, ps = gen_t(0)
#db.cover('PRIORITY', 4, transition[0], ps[0])
print(db.min_idx('PRIORITY'))
print()
"""
"""
memory = pER_Memory(30)
transition, ps = gen_t(35)
for t in transition:
memory.store(t) # pass
tree_idx, batch_memory, ISWeights = memory.sample(5)
memory.enlarge(5)
t_, p = gen_t(5)
for tt in t_:
memory.store(tt)
tt_, p_ = gen_t(1)
for ttt in tt_:
memory.store(ttt)
print()
"""
db = oraDataFrame()
db.updatePriority(0.12,23,'PRIORITY') | StarcoderdataPython |
1826329 | from mycloud.drive.drive_client import DriveClient, NO_ENTRY, ROOT_ENTRY, EntryStats, EntryType
from mycloud.drive.exceptions import DriveNotFoundException, DriveFailedToDeleteException
from mycloud.drive.fs_drive_client import FsDriveClient
from mycloud.drive.common import ls_files_recursively
| StarcoderdataPython |
9693120 | <reponame>bmmalone/as-auto-sklearn<gh_stars>1-10
#! /usr/bin/env python3
import argparse
import itertools
import os
import pandas as pd
import misc.automl_utils as automl_utils
import misc.parallel as parallel
import as_asl.as_asl_command_line_utils as clu
import as_asl.as_asl_filenames as filenames
import as_asl.as_asl_utils as as_asl_utils
from as_asl.as_asl_ensemble import ASaslScheduler
from as_asl.validate import Validator
import misc.pandas_utils as pd_utils
import misc.utils as utils
import logging
import misc.logging_utils as logging_utils
logger = logging.getLogger(__name__)
def get_stats_summary(scenario_use_random_forests, args, config):
scenario, use_random_forests = scenario_use_random_forests
msg = "Loading the scenario"
logger.info(msg)
scenario = automl_utils.load_scenario(scenario, return_name=False)
msg = "Loading scheduler"
logger.info(msg)
model_type = "asl.scheduler"
if use_random_forests:
model_type = "rf.scheduler"
scheduler_filename = filenames.get_model_filename(
config['base_path'],
model_type,
scenario=scenario.scenario,
note=config.get('note')
)
if not os.path.exists(scheduler_filename):
msg = "Could not find scheduler: {}".format(scheduler_filename)
logger.warning(msg)
ret = {
"scenario": scenario.scenario
}
return ret
scheduler = ASaslScheduler.load(scheduler_filename)
msg = "Creating schedules for training set"
logger.info(msg)
schedules = scheduler.create_schedules(scenario)
msg = "Stats on training set"
logger.info(msg)
validator = Validator()
training_stats = validator.validate(
schedules=schedules,
test_scenario=scenario,
show=False
)
training_stats.total = training_stats.timeouts + training_stats.solved
training_stats.oracle_par1 = training_stats.oracle_par1 / training_stats.total
training_stats.par10 = training_stats.par10 / training_stats.total
training_stats.par1 = training_stats.par1 / training_stats.total
total_oracle_par1 = 0.0
total_par1 = 0.0
total_par10 = 0.0
total_timeouts = 0
total_solved = 0
for fold in args.folds:
msg = "*** Fold {} ***".format(fold)
logger.info(msg)
testing, training = scenario.get_split(fold)
msg = "Refitting the model"
logger.info(msg)
scheduler = scheduler.refit(training)
msg = "Creating schedules for the test set"
logger.info(msg)
schedules = scheduler.create_schedules(testing)
validator = Validator()
stat = validator.validate(
schedules=schedules,
test_scenario=testing,
show=False
)
total_oracle_par1 += stat.oracle_par1
total_par1 += stat.par1
total_par10 += stat.par10
total_timeouts += stat.timeouts
total_solved += stat.solved
total = total_timeouts + total_solved
total_oracle_par1 = total_oracle_par1 / total
total_par10 = total_par10 / total
total_par1 = total_par1 / total
ret = {
"scenario": scenario.scenario,
"training_oracle_par1": training_stats.oracle_par1,
"training_par1": training_stats.par1,
"training_par10": training_stats.par10,
"training_timeouts": training_stats.timeouts,
"training_solved": training_stats.solved,
"total_oracle_par1": total_oracle_par1,
"total_par1": total_par1,
"total_par10": total_par10,
"total_timeouts": total_timeouts,
"total_solved": total_solved,
}
return ret
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Summarize the evaluation metrics for all scenarios")
clu.add_config(parser)
parser.add_argument('out')
clu.add_cv_options(parser)
clu.add_num_cpus(parser)
automl_utils.add_blas_options(parser)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
# see which folds to run
if len(args.folds) == 0:
args.folds = list(range(1,11))
clu.validate_folds_options(args)
required_keys = ['base_path', 'training_scenarios_path']
config = as_asl_utils.load_config(args.config, required_keys)
if automl_utils.spawn_for_blas(args):
return
scenarios = utils.list_subdirs(config['training_scenarios_path'])
use_random_forests = [False] #, True]
it = itertools.product(scenarios, use_random_forests)
all_stats = parallel.apply_parallel_iter(
it,
args.num_cpus,
get_stats_summary,
args,
config
)
msg = "Combining statistics"
logger.info(msg)
all_stats_df = pd.DataFrame(all_stats)
pd_utils.write_df(
all_stats_df,
args.out,
create_path=True,
do_not_compress=True,
index=False
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4898690 | import numpy as np
import smdebug
import torch
import torch.nn as nn
import torchvision
from smdebug import modes
from torchvision import models
# list of ordered tensor names
activation_outputs = [
#'relu_ReLU_output_0',
"layer1.0.relu_0_output_0",
"layer1.1.relu_0_output_0",
"layer2.0.relu_0_output_0",
"layer2.1.relu_0_output_0",
"layer3.0.relu_0_output_0",
"layer3.1.relu_0_output_0",
"layer4.0.relu_0_output_0",
"layer4.1.relu_0_output_0",
]
gradients = [
#'gradient/relu_ReLU_output',
"gradient/layer1.0.relu_ReLU_output",
"gradient/layer1.1.relu_ReLU_output",
"gradient/layer2.0.relu_ReLU_output",
"gradient/layer2.1.relu_ReLU_output",
"gradient/layer3.0.relu_ReLU_output",
"gradient/layer3.1.relu_ReLU_output",
"gradient/layer4.0.relu_ReLU_output",
"gradient/layer4.1.relu_ReLU_output",
]
# function to prune layers
def prune(model, filters_list, trial, step):
# dict that has a list of filters to be pruned per layer
filters_dict = {}
for layer_name, channel, _ in filters_list:
if layer_name not in filters_dict:
filters_dict[layer_name] = []
filters_dict[layer_name].append(channel)
counter = 0
in_channels_dense = 0
exclude_filters = None
in_channels = 3
exclude = False
# iterate over layers in the ResNet model
for named_module in model.named_modules():
layer_name = named_module[0]
layer = named_module[1]
# check if current layer is a convolutional layer
if isinstance(layer, torch.nn.modules.conv.Conv2d):
# remember the output channels of non-pruned convolution (needed for pruning first fc layer)
in_channels_dense = layer.out_channels
# create key to find right weights/bias/filters for the corresponding layer
weight_name = "ResNet_" + layer_name + ".weight"
# get weight values from last available training step
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# we need to adjust the number of input channels,
# if previous covolution has been pruned
# print( "current:", layer.in_channels, "previous", in_channels, layer_name, weight_name)
if "conv1" in layer_name or "conv2" in layer_name:
if layer.in_channels != in_channels:
layer.in_channels = in_channels
weight = np.delete(weight, exclude_filters, axis=1)
exclude_filters = None
# if current layer is in the list of filters to be pruned
if "conv1" in layer_name:
layer_id = layer_name.strip("conv1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce output channels for conv layer",
layer_id,
"from",
layer.out_channels,
"to",
layer.out_channels - len(filters_dict[key]),
)
# set new output channels
layer.out_channels = layer.out_channels - len(filters_dict[key])
# remove corresponding filters from weights and bias
# convolution weights have dimension: filter x channel x kernel x kernel
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
break
# remember new size of output channels, because we need to prune subsequent convolution
in_channels = layer.out_channels
# set pruned weight and bias
layer.weight.data = torch.from_numpy(weight)
if isinstance(layer, torch.nn.modules.batchnorm.BatchNorm2d):
# get weight values from last available training step
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# get bias values from last available training step
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
# get running_mean values from last available training step
mean_name = layer_name + ".running_mean_output_0"
mean = trial.tensor(mean_name).value(step, mode=modes.TRAIN)
# get running_var values from last available training step
var_name = layer_name + ".running_var_output_0"
var = trial.tensor(var_name).value(step, mode=modes.TRAIN)
# if current layer is in the list of filters to be pruned
if "bn1" in layer_name:
layer_id = layer_name.strip("bn1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce bn layer",
layer_id,
"from",
weight.shape[0],
"to",
weight.shape[0] - len(filters_dict[key]),
)
# remove corresponding filters from weights and bias
# convolution weights have dimension: filter x channel x kernel x kernel
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
bias = np.delete(bias, exclude_filters, axis=0)
mean = np.delete(mean, exclude_filters, axis=0)
var = np.delete(var, exclude_filters, axis=0)
break
# set pruned weight and bias
layer.weight.data = torch.from_numpy(weight)
layer.bias.data = torch.from_numpy(bias)
layer.running_mean.data = torch.from_numpy(mean)
layer.running_var.data = torch.from_numpy(var)
layer.num_features = weight.shape[0]
in_channels = weight.shape[0]
if isinstance(layer, torch.nn.modules.linear.Linear):
# get weight values from last available training step
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# get bias values from last available training step
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
# prune first fc layer
if exclude_filters is not None:
# in_channels_dense is the number of output channels of last non-pruned convolution layer
params = int(layer.in_features / in_channels_dense)
# prune weights of first fc layer
indexes = []
for i in exclude_filters:
indexes.extend(np.arange(i * params, (i + 1) * params))
if indexes[-1] > weight.shape[1]:
indexes.extend(np.arange(weight.shape[1] - params, weight.shape[1]))
weight = np.delete(weight, indexes, axis=1)
print(
"Reduce weights for first linear layer from",
layer.in_features,
"to",
weight.shape[1],
)
# set new in_features
layer.in_features = weight.shape[1]
exclude_filters = None
# set weights
layer.weight.data = torch.from_numpy(weight)
# set bias
layer.bias.data = torch.from_numpy(bias)
return model
| StarcoderdataPython |
6445146 | """
Tests for query_schedule module.
"""
from os import sys, path
import unittest
import pytz
import datetime
import logging
import random
from api_etl.settings import __BASE_DIR__
from api_etl.extract_schedule import build_stop_times_ext_df
from api_etl.query_schedule import trip_scheduled_departure_time
logger = logging.getLogger(__name__)
class TestSchedulesModuleFunctions(unittest.TestCase):
def test_trip_scheduled_departures_time(self):
"""
Test function trip_scheduled_departure_time, that query rdb and return scheduled_departure_time given a trip_id and a station_id.
Takes to arguments: trip_id, station
Must accept both 7 and 8 digits stations
trip_id: DUASN124705F01001-1_408049
station: 8727605 // 8727605*
departure_time '04:06:00' or False
Test scenario:
get random example for stop_times_ext dataframe
check if we get same results from sql queries and from dataframe
"""
# take 10 random elements from dataframe
df = build_stop_times_ext_df()
random_indexes = random.sample(range(0, len(df) - 1), 10)
for random_index in random_indexes:
trip_id = df.iloc[random_index]["trip_id"]
station_id = df.iloc[random_index]["station_id"]
departure_time = df.iloc[random_index]["departure_time"]
# Check that query returns the same as what is written in csv file
result1 = trip_scheduled_departure_time(trip_id, station_id)
self.assertEqual(result1, departure_time)
# False trip_id should return False
result2 = trip_scheduled_departure_time(
"false_trip_id", station_id)
self.assertFalse(result2)
# False station_id should return False
result3 = trip_scheduled_departure_time(
trip_id, "false_station_id")
self.assertFalse(result3)
"""
def test_rdb_get_departure_times_of_day_json_list(self):
paris_tz = pytz.timezone('Europe/Paris')
today_paris = paris_tz.localize(datetime.datetime.now())
today_paris_str = today_paris.strftime("%Y%m%d")
json_list = rdb_get_departure_times_of_day_json_list(
today_paris_str)
# Test if all fields are present
necessary_fields = ["scheduled_departure_day",
"scheduled_departure_time", "trip_id", "station_id", "train_num"]
json_keys_list = list(map(lambda x: list(x.keys()), json_list))
for json_item_keys in json_keys_list:
keys_all_exist = all(
key in json_item_keys for key in necessary_fields)
self.assertTrue(keys_all_exist)
# Test if scheduled departure day is really on given day
"""
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6562663 | <filename>frontends/pytorch/python/torch_mlir/torchscript/e2e_test/configs/native_torch.py<gh_stars>0
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import copy
from typing import Any
import torch
from torch_mlir.torchscript.e2e_test.framework import TestConfig, Trace, TraceItem
class NativeTorchTestConfig(TestConfig):
"""TestConfig that just runs the torch.nn.Module without compiling"""
def __init__(self):
super().__init__()
def compile(self, program: torch.nn.Module) -> torch.nn.Module:
return program
def run(self, artifact: torch.nn.Module, trace: Trace) -> Trace:
# TODO: Deepcopy the torch.nn.Module, so that if the program is
# stateful then it does not mutate the original compiled program.
result: Trace = []
for item in trace:
outputs = getattr(artifact, item.symbol)(*item.inputs)
if isinstance(outputs, torch.Tensor):
outputs = [outputs]
result.append(
TraceItem(symbol=item.symbol,
inputs=item.inputs,
outputs=outputs))
return result
| StarcoderdataPython |
42109 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit test for status"""
import sys
import unittest
from os.path import dirname, realpath
from pm.status import Y, Conserved, PM, NA
class RoutineTest(unittest.TestCase):
"""Routine test."""
def test_pm_status_gt_order(self):
"""Status should have a right order when doing gt-comparison"""
r = Y() > Conserved(aa_pm=0) > Conserved(nt_pm=8, aa_pm=0) \
> PM(aa_pm=0) > PM(aa_pm=5, nt_pm=10) > NA() > NA(gaps=1)
self.assertTrue(r)
self.assertTrue(NA(aa_pm=9999999) > NA(aa_pm=None))
def test_pm_status_lt_order(self):
"""Status should have a right order when doing lt-comparison"""
r = NA() < PM() < Conserved(aa_pm=0) < Y()
self.assertTrue(r)
def test_pm_status_le_order(self):
"""Status should give right value when doing le-comparison"""
r = (Y() <= Y()) and (Conserved(aa_pm=0) <= Conserved(aa_pm=0)) \
and (PM() <= PM()) and (NA() <= NA())
self.assertTrue(r)
def test_pm_status_ge_order(self):
"""Status should give right value when doing ge-comparison"""
r = (Y() >= Y()) and (Conserved(aa_pm=0) >= Conserved(aa_pm=0)) \
and (PM() >= PM()) and (NA() >= NA())
self.assertTrue(r)
def test_pm_status_eq_order(self):
"""Status should give right value when doing eq-comparison"""
r = (Y() == Y()) and (Conserved(aa_pm=0) == Conserved(aa_pm=0)) \
and (PM() == PM()) and (NA() == NA())
self.assertTrue(r)
def test_pm_status_ne_order(self):
"""Status should give right value when doing ne-comparison"""
r = NA() != PM() != Conserved(aa_pm=0) != Y()
self.assertTrue(r)
def test_convert_pm_status_to_string(self):
"""Convert status object to string"""
input_pairs = ((Y(), 'Y'),
(Conserved(aa_pm=0), 'Conserved'),
(PM(), 'PM'),
(NA(), 'NA'))
for status, str_status in input_pairs:
self.assertEqual(str(status), str_status)
def test_pm_status_orderablity(self):
"""pm.status should be orderable with gaps-removed but still consistent stdseq"""
self.assertTrue(PM(stdseq='ATGATT', nt_pm=1) > NA(stdseq='ATG-ATT', gaps=1, nt_pm=1))
class ErrorTest(unittest.TestCase):
def test_raise_TypeError1(self):
"""status should raise TypeError when comparing between status operand with incosistent stdseq"""
with self.assertRaises(TypeError):
Y(stdseq='atg') > Conserved(stdseq='tga', aa_pm=0) \
> PM(stdseq='aaa') > NA(stdseq='tgg')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1652184 | from src.log import set_logging
from src import buienradar
import logging
from datetime import datetime
from src import export
def run():
actuals, forecast = buienradar.get_data()
export.export_datasets(actuals, forecast)
if __name__ == "__main__":
set_logging()
logging.info(f"started script at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
run()
| StarcoderdataPython |
1679352 | <reponame>wangdehuaj/jyrm<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: us-ascii -*-
import wx
import string
from options import AdvancedOptions
from message import Message
try:
from opentumblr.tumblr import Api
except ImportError:
from tumblr import Api
class Link(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.parent = parent
self.api = self.parent.api
self.tags = None
self.date = None
self.private = 0
self.p_main = wx.Panel(self, -1)
self.s_link_staticbox = wx.StaticBox(self.p_main, -1, "")
self.b_options = wx.Button(self.p_main, -1, "Advanced options")
self.l_addlink = wx.StaticText(self.p_main, -1, "Add a Link")
self.l_name = wx.StaticText(self.p_main, -1, "Name (optional)")
self.tc_name = wx.TextCtrl(self.p_main, -1, "")
self.l_urllink = wx.StaticText(self.p_main, -1, "URL")
self.tc_urllink = wx.TextCtrl(self.p_main, -1, "")
self.l_description = wx.StaticText(self.p_main, -1, "Description (optional)")
self.tc_description = wx.TextCtrl(self.p_main, -1, "", style=wx.TE_MULTILINE)
self.b_create = wx.Button(self.p_main, -1, "Create post")
self.b_cancel = wx.Button(self.p_main, -1, "Cancel")
self.Bind(wx.EVT_BUTTON, self.AdvancedOptions, id = self.b_options.GetId())
self.Bind(wx.EVT_BUTTON, self.OnCreateLink, id = self.b_create.GetId())
self.Bind(wx.EVT_BUTTON, self.OnCancel, id = self.b_cancel.GetId())
self.__set_properties()
self.__do_layout()
def __set_properties(self):
self.l_addlink.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.l_name.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.tc_name.SetBackgroundColour(wx.Colour(255, 255, 255))
self.l_urllink.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Lucida Grande"))
self.l_description.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Lucida Grande"))
#self.tc_description.SetMinSize((245, 203))
self.tc_description.SetBackgroundColour(wx.Colour(255, 255, 255))
self.p_main.SetBackgroundColour(wx.Colour(255, 255, 255))
def __do_layout(self):
s_main = wx.BoxSizer(wx.VERTICAL)
s_link = wx.StaticBoxSizer(self.s_link_staticbox, wx.VERTICAL)
s_buttons = wx.BoxSizer(wx.HORIZONTAL)
s_link.Add(self.b_options, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 2)
s_link.Add(self.l_addlink, 0, wx.ALL, 2)
s_link.Add(self.l_name, 0, wx.ALL, 2)
s_link.Add(self.tc_name, 0, wx.ALL|wx.EXPAND, 2)
s_link.Add(self.l_urllink, 0, wx.ALL, 2)
s_link.Add(self.tc_urllink, 0, wx.ALL|wx.EXPAND, 2)
s_link.Add(self.l_description, 0, wx.ALL, 2)
s_link.Add(self.tc_description, 1, wx.ALL|wx.EXPAND, 2)
s_buttons.Add(self.b_create, 1, wx.LEFT|wx.EXPAND, 2)
s_buttons.Add(self.b_cancel, 1, wx.LEFT|wx.EXPAND, 2)
s_link.Add(s_buttons, 0, wx.ALL|wx.EXPAND, 2)
self.p_main.SetSizer(s_link)
s_main.Add(self.p_main, 1, wx.ALL|wx.EXPAND, 10)
self.SetSizer(s_main)
s_main.Fit(self)
def AdvancedOptions(self, evt):
self.options = AdvancedOptions(self, -1)
self.options.Center()
if self.options.ShowModal() == wx.ID_OK:
self.tags = self.options.tc_tag.GetValue().encode('utf-8')
self.tags = string.replace(self.tags,' ', ',')
self.date = self.options.tc_date.GetValue().encode('utf-8')
if self.options.cb_publishing.GetValue() == 'private':
self.private = 1
else:
self.private = 0
if self.options.cb_publishing.GetValue() == 'add to queue':
self.date = 'on.2'
self.options.Destroy()
def OnCreateLink(self, evt):
self.name = self.tc_name.GetValue().encode('utf-8')
self.urllink = self.tc_urllink.GetValue()
self.description = self.tc_description.GetValue().encode('utf-8')
if self.urllink:
self.api = Api(self.api.name, self.api.email, self.api.password, self.private, self.date, self.tags)
try:
self.post = self.api.write_link(self.name,self.urllink,self.description)
except:
print "posteado en el blog primario"
self.OnCancel(self)
else:
Message('URL is required')
def OnCancel(self, evt):
""" Sirve para cancel y cerrar la opcion de text """
self.parent.SetPanel(None)
| StarcoderdataPython |
8066099 | <gh_stars>1-10
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='python-digitalocean-ssh',
version='0.0.8',
author='<NAME>',
author_email='<EMAIL>',
description='Combine DO droplets with your ssh configuration',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/hytromo/digital-ocean-to-ssh-config',
packages=setuptools.find_packages(),
install_requires=[
'python-digitalocean',
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| StarcoderdataPython |
11328513 | <filename>pystacia/api/func.py
# coding: utf-8
# pystacia/api/func.py
# Copyright (C) 2011-2012 by <NAME>
# This module is part of Pystacia and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from threading import Lock
from six import b as bytes_, text_type
from pystacia.util import memoized
from pystacia.compat import pypy
from pystacia.api.metadata import data as metadata
@memoized
def get_c_method(api_type, method, throw=True):
type_data = metadata[api_type]
method_name = type_data['format'](method)
if not throw and not hasattr(get_dll(False), method_name):
return False
c_method = getattr(get_dll(False), method_name)
msg = formattable('Annoting {0}')
logger.debug(msg.format(method_name))
method_data = type_data['symbols'][method]
argtypes = method_data[0]
if 'arg' in type_data:
argtypes = (type_data['arg'],) + argtypes
c_method.argtypes = argtypes
restype = type_data.get('result', None)
if len(method_data) == 2:
restype = method_data[1]
c_method.restype = restype
return method_name, c_method
if pypy:
__lock = Lock()
def handle_result(result, restype, args, argtypes):
if restype == c_char_p:
result = native_str(result)
if restype in (c_uint, c_ssize_t, c_size_t):
result = int(result)
elif restype == enum and not jython:
result = result.value
elif restype == MagickBoolean and not result:
exc_type = ExceptionType()
if argtypes[0] == MagickWand_p:
klass = 'magick'
elif argtypes[0] == PixelWand_p:
klass = 'pixel'
description = c_call(klass, 'get_exception', args[0], byref(exc_type))
try:
raise PystaciaException(native_str(string_at(description)))
finally:
c_call('magick_', 'relinquish_memory', description)
return result
def prepare_args(c_method, obj, args):
keep_ = []
args_ = []
should_lock = False
if isinstance(obj, Resource):
args = (obj,) + args
for arg, type in zip(args, c_method.argtypes): # @ReservedAssignment
if type == c_char_p:
should_lock = True
if isinstance(arg, text_type):
arg = bytes_(arg)
elif type in (c_size_t, c_ssize_t, c_uint):
arg = int(arg)
elif type == PixelWand_p and not isinstance(arg, PixelWand_p):
arg = color_cast(arg)
keep_.append(arg)
if isinstance(arg, Resource):
arg = arg.resource
args_.append(arg)
return keep_, args_, should_lock
def c_call(obj, method, *args, **kw):
if hasattr(obj.__class__, '_api_type'):
api_type = obj.__class__._api_type
else:
api_type = obj
msg = formattable('Translating method {0}.{1}')
logger.debug(msg.format(api_type, method))
method_name, c_method = get_c_method(api_type, method)
try:
init = kw.pop('__init')
except KeyError:
init = True
if init:
get_dll()
# if objects are casted here and then
# there is only their resource passed
# there is a risk that GC will collect
# them and __del__ will be called between
# driving Imagick to SIGSEGV
# lets keep references to them
keep_, args_, should_lock = prepare_args(c_method, obj, args)
msg = formattable('Calling {0}')
logger.debug(msg.format(method_name))
if pypy and should_lock:
__lock.acquire()
result = c_method(*args_)
if pypy and should_lock:
__lock.release()
del keep_
return handle_result(
result, c_method.restype, args_, c_method.argtypes)
from pystacia.util import PystaciaException
from pystacia.compat import native_str, formattable, jython
from pystacia.api import get_dll, logger
from pystacia.api.type import (
MagickWand_p, PixelWand_p, MagickBoolean, ExceptionType, enum)
from pystacia.api.compat import (
c_char_p, c_size_t, c_uint, string_at, c_ssize_t, byref)
from pystacia.common import Resource
from pystacia.color import cast as color_cast
| StarcoderdataPython |
6562854 | from . import Project
from .nodejs import NodejsProject
from ..util import needs_update
from ..tmux import Tmux
class LaravelProject(Project):
description = 'Laravel'
def __init__(self, cwd):
super().__init__(cwd)
if self.exists('package.json'):
self.npm = NodejsProject(cwd)
else:
self.npm = None
self.can_build = self.npm and self.npm.can_build
self.can_test = (self.exists('tests') or
(self.npm and self.npm.can_test))
self.can_lint = self.npm and self.npm.can_lint
@classmethod
def find(cls):
return cls.find_containing('artisan')
def ensure_deps(self):
if needs_update(self.path('composer.json'), self.path('vender')):
self.cmd('composer install')
def run(self, env):
if self.npm and self.npm.can_run:
tmux = Tmux(self.cwd)
with tmux.pane():
self.ensure_deps()
self.cmd('php artisan serve')
with tmux.pane():
self.npm.run(env)
tmux.run()
else:
self.ensure_deps()
self.cmd('php artisan serve')
def build(self, env):
if self.npm and self.npm.can_build:
self.npm.build(env)
else:
super().build(env)
def test(self):
if self.exists('tests'):
self.ensure_deps()
self.cmd('./vendor/bin/phpunit')
if self.npm and self.npm.can_test:
self.npm.test()
if not self.can_test:
super().test()
def lint(self, fix):
if self.npm and self.npm.can_lint:
self.npm.lint(fix)
else:
super().lint(fix)
| StarcoderdataPython |
209852 | <filename>pushservice/src/Services/SMTP.py
from __future__ import print_function
from __future__ import absolute_import
#######################################################################
#
# Push Service for Enigma-2
# Coded by betonme (c) 2012 <<EMAIL>ank(at)gmail.com>
# Support: http://www.i-have-a-dreambox.com/wbb2/thread.php?threadid=167779
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#######################################################################
# Config
from Components.config import config, NoSave, ConfigText, ConfigNumber, ConfigYesNo, ConfigPassword
# Plugin internal
from Plugins.Extensions.PushService.__init__ import _
from Plugins.Extensions.PushService.ServiceBase import ServiceBase
# Plugin specific
from .mail.mail import Message, sendmail
# Constants
MAIL_HEADER_TEMPLATE = _("{box:s} {name:s}: {plugin:s}: {subject:s}")
MAIL_BODY_TEMPLATE = _("{body:s}\n\n") \
+ _("Provided by Dreambox Plugin {name:s} {version:s} - {plugin:s}\n") \
+ _("C 2012 by betonme @ IHAD\n") \
+ _("Support {support:s}\n") \
+ _("Donate {donate:s}")
class SMTP(ServiceBase):
ForceSingleInstance = False
def __init__(self):
# Is called on instance creation
ServiceBase.__init__(self)
self.connectors = []
# Default configuration
self.setOption('smtpserver', NoSave(ConfigText(default="smtp.server.com", fixed_size=False)), _("SMTP Server"))
self.setOption('smtpport', NoSave(ConfigNumber(default=587)), _("SMTP Port"))
self.setOption('smtpssl', NoSave(ConfigYesNo(default=True)), _("SMTP SSL"))
self.setOption('smtptls', NoSave(ConfigYesNo(default=True)), _("SMTP TLS"))
self.setOption('timeout', NoSave(ConfigNumber(default=30)), _("Timeout"))
self.setOption('username', NoSave(ConfigText(default="user", fixed_size=False)), _("User name"))
self.setOption('password', NoSave(ConfigPassword(default="password")), _("Password"))
self.setOption('mailfrom', NoSave(ConfigText(default="<EMAIL>", fixed_size=False)), _("Mail from"))
self.setOption('mailto', NoSave(ConfigText(fixed_size=False)), _("Mail to or leave empty (From will be used)"))
def push(self, callback, errback, pluginname, subject, body="", attachments=[]):
from Plugins.Extensions.PushService.plugin import NAME, VERSION, SUPPORT, DONATE
# Set SMTP parameters
mailconf = {}
mailconf["host"] = self.getValue('smtpserver')
mailconf["port"] = self.getValue('smtpport')
mailconf["username"] = self.getValue('username')
mailconf["password"] = self.getValue('password')
mailconf["ssl"] = self.getValue('smtpssl')
mailconf["tls"] = self.getValue('smtptls')
mailconf["timeout"] = self.getValue('timeout')
# Create message object
from_addr = self.getValue('mailfrom')
to_addrs = [self.getValue('mailto') or from_addr]
# Prepare message
if body == "":
body = subject
subject = MAIL_HEADER_TEMPLATE.format(**{'box': config.pushservice.boxname.value, 'name': NAME, 'plugin': pluginname, 'subject': subject})
body = MAIL_BODY_TEMPLATE.format(**{'body': str(body), 'name': NAME, 'version': VERSION, 'plugin': pluginname, 'support': SUPPORT, 'donate': DONATE})
message = Message(from_addr, to_addrs, subject, body) #TODO change mime="text/plain", charset="utf-8")
if attachments:
for attachment in attachments:
message.attach(attachment) #TODO change mime=None, charset=None, content=None):
# Send message
print(_("PushService PushMail: Sending message: %s") % subject)
deferred, connector = sendmail(mailconf, message)
# Add callbacks
deferred.addCallback(callback)
deferred.addErrback(errback)
self.connectors.append(connector)
def cancel(self):
# Cancel push
if self.connectors:
for connector in self.connectors:
connector.disconnect()
| StarcoderdataPython |
3291838 | <reponame>OpenSCAP/oval-graph
import json
import subprocess
import time
from ..test_tools import TestTools
from .command_constants import ARF_TO_JSON, COMMAND_START, TEST_ARF_XML_PATH
def run_commad_and_save_output_to_file(parameters):
path = str(TestTools.get_random_path_in_tmp()) + '.json'
with open(path, 'w+') as output:
subprocess.check_call(parameters, stdout=output)
return path
def test_command_arf_to_json():
path = str(TestTools.get_random_path_in_tmp()) + '.json'
out = subprocess.check_output(ARF_TO_JSON)
with open(path, "w+") as data:
data.writelines(out.decode('utf-8'))
TestTools.compare_results_json(path)
def test_command_arf_to_json_is_tty():
src = run_commad_and_save_output_to_file(ARF_TO_JSON)
TestTools.compare_results_json(src)
def test_command_parameter_all():
command = [*COMMAND_START,
"arf-to-json",
"--all",
TEST_ARF_XML_PATH,
".",
]
src = run_commad_and_save_output_to_file(command)
with open(src, "r") as data:
rules = json.load(data)
assert len(rules.keys()) == 184
def test_command_parameter_all_and_show_failed_rules():
command = [*COMMAND_START,
'arf-to-json',
'--all',
'--show-failed-rules',
TEST_ARF_XML_PATH,
r'_package_\w+_removed'
]
src = run_commad_and_save_output_to_file(command)
with open(src, "r") as data:
rules = json.load(data)
assert len(rules.keys()) == 1
def test_command_with_parameter_out():
command = [*COMMAND_START,
'arf-to-json',
'--all',
TEST_ARF_XML_PATH,
r'_package_\w+_removed'
]
src = run_commad_and_save_output_to_file(command)
time.sleep(5)
command.append('-o' + src)
subprocess.check_call(command)
with open(src, "r") as data:
rules = json.load(data)
assert len(rules.keys()) == 4
| StarcoderdataPython |
6494198 | <reponame>Argmaster/pygerber
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from pygerber.drawing_state import DrawingState
from pygerber.constants import Unit
from pygerber.tokens import token as tkn
from .validator import Validator
INCH_TO_MM_RATIO = 25.4
class Coordinate(Validator):
def __init__(self) -> None:
super().__init__(default=None)
def __call__(self, token: tkn.Token, state: DrawingState, value: str) -> Any:
value = self.parse(state, value)
if value is not None:
value = self.ensure_mm(state, value)
return value
def parse(self, state: DrawingState, value: str) -> Any:
if value is not None:
return state.parse_co(value)
def ensure_mm(self, state: tkn.Token, value: float):
if state.unit == Unit.INCHES:
return value * INCH_TO_MM_RATIO
else:
return value
class UnitFloat(Coordinate):
def __init__(self, default: float = None) -> None:
self.default = default
def __call__(self, token: tkn.Token, state: DrawingState, value: str) -> Any:
if value is not None:
return self.ensure_mm(state, float(value))
else:
return self.default
| StarcoderdataPython |
4950129 | <reponame>jaredleekatzman/sagemaker-python-sdk
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import print_function, absolute_import
import os
import sagemaker
from sagemaker import job, model, utils
from sagemaker.amazon import amazon_estimator
def prepare_framework(estimator, s3_operations):
"""Prepare S3 operations (specify where to upload source_dir) and environment variables
related to framework.
Args:
estimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update.
s3_operations (dict): The dict to specify s3 operations (upload source_dir).
"""
bucket = estimator.code_location if estimator.code_location else estimator.sagemaker_session._default_bucket
key = '{}/source/sourcedir.tar.gz'.format(estimator._current_job_name)
script = os.path.basename(estimator.entry_point)
if estimator.source_dir and estimator.source_dir.lower().startswith('s3://'):
code_dir = estimator.source_dir
else:
code_dir = 's3://{}/{}'.format(bucket, key)
s3_operations['S3Upload'] = [{
'Path': estimator.source_dir or script,
'Bucket': bucket,
'Key': key,
'Tar': True
}]
estimator._hyperparameters[model.DIR_PARAM_NAME] = code_dir
estimator._hyperparameters[model.SCRIPT_PARAM_NAME] = script
estimator._hyperparameters[model.CLOUDWATCH_METRICS_PARAM_NAME] = estimator.enable_cloudwatch_metrics
estimator._hyperparameters[model.CONTAINER_LOG_LEVEL_PARAM_NAME] = estimator.container_log_level
estimator._hyperparameters[model.JOB_NAME_PARAM_NAME] = estimator._current_job_name
estimator._hyperparameters[model.SAGEMAKER_REGION_PARAM_NAME] = estimator.sagemaker_session.boto_region_name
def prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size=None):
""" Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data.
Args:
estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase):
An estimator for a built-in Amazon algorithm to get information from and update.
inputs: The training data.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
"""
if isinstance(inputs, list):
for record in inputs:
if isinstance(record, amazon_estimator.RecordSet) and record.channel == 'train':
estimator.feature_dim = record.feature_dim
break
elif isinstance(inputs, amazon_estimator.RecordSet):
estimator.feature_dim = inputs.feature_dim
else:
raise TypeError('Training data must be represented in RecordSet or list of RecordSets')
estimator.mini_batch_size = mini_batch_size
def training_base_config(estimator, inputs=None, job_name=None, mini_batch_size=None):
"""Export Airflow base training config from an estimator
Args:
estimator (sagemaker.estimator.EstimatorBase):
The estimator to export training config from. Can be a BYO estimator,
Framework estimator or Amazon algorithm estimator.
inputs: Information about the training data. Please refer to the ``fit()`` method of
the associated estimator, as this can take any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
job_name (str): Specify a training job name if needed.
mini_batch_size (int): Specify this argument only when estimator is a built-in estimator of an
Amazon algorithm. For other estimators, batch size should be specified in the estimator.
Returns (dict):
Training config that can be directly used by SageMakerTrainingOperator in Airflow.
"""
default_bucket = estimator.sagemaker_session.default_bucket()
s3_operations = {}
if job_name is not None:
estimator._current_job_name = job_name
else:
base_name = estimator.base_job_name or utils.base_name_from_image(estimator.train_image())
estimator._current_job_name = utils.airflow_name_from_base(base_name)
if estimator.output_path is None:
estimator.output_path = 's3://{}/'.format(default_bucket)
if isinstance(estimator, sagemaker.estimator.Framework):
prepare_framework(estimator, s3_operations)
elif isinstance(estimator, amazon_estimator.AmazonAlgorithmEstimatorBase):
prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size)
job_config = job._Job._load_config(inputs, estimator, expand_role=False, validate_uri=False)
train_config = {
'AlgorithmSpecification': {
'TrainingImage': estimator.train_image(),
'TrainingInputMode': estimator.input_mode
},
'OutputDataConfig': job_config['output_config'],
'StoppingCondition': job_config['stop_condition'],
'ResourceConfig': job_config['resource_config'],
'RoleArn': job_config['role'],
}
if job_config['input_config'] is not None:
train_config['InputDataConfig'] = job_config['input_config']
if job_config['vpc_config'] is not None:
train_config['VpcConfig'] = job_config['vpc_config']
if estimator.hyperparameters() is not None:
hyperparameters = {str(k): str(v) for (k, v) in estimator.hyperparameters().items()}
if hyperparameters and len(hyperparameters) > 0:
train_config['HyperParameters'] = hyperparameters
if s3_operations:
train_config['S3Operations'] = s3_operations
return train_config
def training_config(estimator, inputs=None, job_name=None, mini_batch_size=None):
"""Export Airflow training config from an estimator
Args:
estimator (sagemaker.estimator.EstimatorBase):
The estimator to export training config from. Can be a BYO estimator,
Framework estimator or Amazon algorithm estimator.
inputs: Information about the training data. Please refer to the ``fit()`` method of
the associated estimator, as this can take any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
job_name (str): Specify a training job name if needed.
mini_batch_size (int): Specify this argument only when estimator is a built-in estimator of an
Amazon algorithm. For other estimators, batch size should be specified in the estimator.
Returns (dict):
Training config that can be directly used by SageMakerTrainingOperator in Airflow.
"""
train_config = training_base_config(estimator, inputs, job_name, mini_batch_size)
train_config['TrainingJobName'] = estimator._current_job_name
if estimator.tags is not None:
train_config['Tags'] = estimator.tags
return train_config
def tuning_config(tuner, inputs, job_name=None):
"""Export Airflow tuning config from an estimator
Args:
tuner (sagemaker.tuner.HyperparameterTuner): The tuner to export tuning config from.
inputs: Information about the training data. Please refer to the ``fit()`` method of
the associated estimator in the tuner, as this can take any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
job_name (str): Specify a tuning job name if needed.
Returns (dict):
Tuning config that can be directly used by SageMakerTuningOperator in Airflow.
"""
train_config = training_base_config(tuner.estimator, inputs)
hyperparameters = train_config.pop('HyperParameters', None)
s3_operations = train_config.pop('S3Operations', None)
if hyperparameters and len(hyperparameters) > 0:
tuner.static_hyperparameters = \
{utils.to_str(k): utils.to_str(v) for (k, v) in hyperparameters.items()}
if job_name is not None:
tuner._current_job_name = job_name
else:
base_name = tuner.base_tuning_job_name or utils.base_name_from_image(tuner.estimator.train_image())
tuner._current_job_name = utils.airflow_name_from_base(base_name, tuner.TUNING_JOB_NAME_MAX_LENGTH, True)
for hyperparameter_name in tuner._hyperparameter_ranges.keys():
tuner.static_hyperparameters.pop(hyperparameter_name, None)
train_config['StaticHyperParameters'] = tuner.static_hyperparameters
tune_config = {
'HyperParameterTuningJobName': tuner._current_job_name,
'HyperParameterTuningJobConfig': {
'Strategy': tuner.strategy,
'HyperParameterTuningJobObjective': {
'Type': tuner.objective_type,
'MetricName': tuner.objective_metric_name,
},
'ResourceLimits': {
'MaxNumberOfTrainingJobs': tuner.max_jobs,
'MaxParallelTrainingJobs': tuner.max_parallel_jobs,
},
'ParameterRanges': tuner.hyperparameter_ranges(),
},
'TrainingJobDefinition': train_config
}
if tuner.metric_definitions is not None:
tune_config['TrainingJobDefinition']['AlgorithmSpecification']['MetricDefinitions'] = \
tuner.metric_definitions
if tuner.tags is not None:
tune_config['Tags'] = tuner.tags
if s3_operations is not None:
tune_config['S3Operations'] = s3_operations
return tune_config
| StarcoderdataPython |
311028 | <filename>suburbs.py
import json
import requests
def getSuburbs():
settings = json.load(open("settings.json"))
url = "https://v0.postcodeapi.com.au/suburbs?name="+settings["suburb"]+"&postcode="+settings["postcode"]+"&state="+settings["state"]
payload={}
headers = {
'Accept': 'application/json'
}
response = requests.request("GET", url, headers=headers, data=payload)
suburb = response.json()
if (len(suburb)!=1):
print("Check Suburb data in Settings. Must include Suburb Name, Postcode and State (Abrv)")
quit()
latitude = suburb[0]["latitude"]
longitude = suburb[0]["longitude"]
radius = settings["radius"]*1000
url = "https://v0.postcodeapi.com.au/radius?latitude="+str(latitude)+"&longitude="+str(longitude)+"&distance="+str(int(radius))
payload={}
headers = {
'Accept': 'application/json'
}
response = requests.request("GET", url, headers=headers, data=payload)
suburblist = response.json()
s = []
for suburb in suburblist:
s.append(suburb["name"])
# Remove Duplicate suburbs
return list(set(s))
| StarcoderdataPython |
3425194 | <reponame>ViaTechSystems/encrypted-model-fields
from __future__ import unicode_literals
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
import cryptography.fernet
from . import fields
class TestSettings(TestCase):
def setUp(self):
self.key1 = cryptography.fernet.Fernet.generate_key()
self.key2 = cryptography.fernet.Fernet.generate_key()
def test_settings(self):
with self.settings(FIELD_ENCRYPTION_KEY=self.key1):
fields.get_crypter()
def test_settings_tuple(self):
with self.settings(FIELD_ENCRYPTION_KEY=(self.key1, self.key2,)):
fields.get_crypter()
def test_settings_list(self):
with self.settings(FIELD_ENCRYPTION_KEY=[self.key1, self.key2, ]):
fields.get_crypter()
def test_settings_empty(self):
with self.settings(FIELD_ENCRYPTION_KEY=None):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
with self.settings(FIELD_ENCRYPTION_KEY=''):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
with self.settings(FIELD_ENCRYPTION_KEY=[]):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
with self.settings(FIELD_ENCRYPTION_KEY=tuple()):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
def test_settings_bad(self):
with self.settings(FIELD_ENCRYPTION_KEY=self.key1[:5]):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
with self.settings(FIELD_ENCRYPTION_KEY=(self.key1[:5], self.key2,)):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
with self.settings(FIELD_ENCRYPTION_KEY=[self.key1[:5], self.key2[:5], ]):
self.assertRaises(ImproperlyConfigured, fields.get_crypter)
| StarcoderdataPython |
6649648 | <reponame>mgrover1/intake-esm-datastore
import os
import click
import numpy as np
import requests
from core import Builder, extract_attr_with_regex, get_asset_list, reverse_filename_format
def cmip6_parser(filepath):
"""
Extract attributes of a file using information from CMI6 DRS.
References
CMIP6 DRS: http://goo.gl/v1drZl
Controlled Vocabularies (CVs) for use in CMIP6: https://github.com/WCRP-CMIP/CMIP6_CVs
Directory structure =
<mip_era>/
<activity_id>/
<institution_id>/
<source_id>/
<experiment_id>/
<member_id>/
<table_id>/
<variable_id>/
<grid_label>/
<version>
file name=<variable_id>_<table_id>_<source_id>_<experiment_id >_<member_id>_<grid_label>[_<time_range>].nc
For time-invariant fields, the last segment (time_range) above is omitted.
Example when there is no sub-experiment: tas_Amon_GFDL-CM4_historical_r1i1p1f1_gn_196001-199912.nc
Example with a sub-experiment: pr_day_CNRM-CM6-1_dcppA-hindcast_s1960-r2i1p1f1_gn_198001-198412.nc
"""
basename = os.path.basename(filepath)
filename_template = (
'{variable_id}_{table_id}_{source_id}_{experiment_id}_{member_id}_{grid_label}_{time_range}.nc'
)
gridspec_template = '{variable_id}_{table_id}_{source_id}_{experiment_id}_{member_id}_{grid_label}.nc'
templates = [filename_template, gridspec_template]
fileparts = reverse_filename_format(basename, templates=templates)
try:
parent = os.path.dirname(filepath).strip('/')
parent_split = parent.split(f"/{fileparts['source_id']}/")
part_1 = parent_split[0].strip('/').split('/')
grid_label = parent.split(f"/{fileparts['variable_id']}/")[1].strip('/').split('/')[0]
fileparts['grid_label'] = grid_label
fileparts['activity_id'] = part_1[-2]
fileparts['institution_id'] = part_1[-1]
version_regex = r'v\d{4}\d{2}\d{2}|v\d{1}'
version = extract_attr_with_regex(parent, regex=version_regex) or 'v0'
fileparts['version'] = version
fileparts['path'] = filepath
if fileparts['member_id'].startswith('s'):
fileparts['dcpp_init_year'] = float(fileparts['member_id'].split('-')[0][1:])
fileparts['member_id'] = fileparts['member_id'].split('-')[-1]
else:
fileparts['dcpp_init_year'] = np.nan
except Exception:
pass
return fileparts
def cmip5_parser(filepath):
"""Extract attributes of a file using information from CMIP5 DRS.
Notes
-----
Reference:
- CMIP5 DRS: https://pcmdi.llnl.gov/mips/cmip5/docs/cmip5_data_reference_syntax.pdf?id=27
"""
freq_regex = r'/3hr/|/6hr/|/day/|/fx/|/mon/|/monClim/|/subhr/|/yr/'
realm_regex = r'aerosol|atmos|land|landIce|ocean|ocnBgchem|seaIce'
version_regex = r'v\d{4}\d{2}\d{2}|v\d{1}'
file_basename = os.path.basename(filepath)
filename_template = '{variable}_{mip_table}_{model}_{experiment}_{ensemble_member}_{temporal_subset}.nc'
gridspec_template = '{variable}_{mip_table}_{model}_{experiment}_{ensemble_member}.nc'
templates = [filename_template, gridspec_template]
fileparts = reverse_filename_format(file_basename, templates)
frequency = extract_attr_with_regex(filepath, regex=freq_regex, strip_chars='/')
realm = extract_attr_with_regex(filepath, regex=realm_regex)
version = extract_attr_with_regex(filepath, regex=version_regex) or 'v0'
fileparts['frequency'] = frequency
fileparts['modeling_realm'] = realm
fileparts['version'] = version
fileparts['path'] = filepath
try:
part1, part2 = os.path.dirname(filepath).split(fileparts['experiment'])
part1 = part1.strip('/').split('/')
fileparts['institute'] = part1[-2]
fileparts['product_id'] = part1[-3]
except Exception:
pass
return fileparts
def _pick_latest_version(df):
import itertools
print(f'Dataframe size before picking latest version: {len(df)}')
grpby = list(set(df.columns.tolist()) - {'path', 'version', 'dcpp_init_year'})
grouped = df.groupby(grpby)
def _pick_latest_v(group):
idx = []
if group.version.nunique() > 1:
idx = group.sort_values(by=['version'], ascending=False).index[1:].values.tolist()
return idx
print('Getting latest version...\n')
idx_to_remove = grouped.apply(_pick_latest_v).tolist()
idx_to_remove = list(itertools.chain(*idx_to_remove))
df = df.drop(index=idx_to_remove)
print(f'Dataframe size after picking latest version: {len(df)}')
print('\nDone....\n')
return df
def build_cmip(
root_path,
cmip_version,
depth=4,
columns=None,
exclude_patterns=['*/files/*', '*/latest/*'],
pick_latest_version=False,
):
parsers = {'6': cmip6_parser, '5': cmip5_parser}
cmip_columns = {
'6': [
'activity_id',
'institution_id',
'source_id',
'experiment_id',
'member_id',
'table_id',
'variable_id',
'grid_label',
'dcpp_init_year',
'version',
'time_range',
'path',
],
'5': [
'product_id',
'institute',
'model',
'experiment',
'frequency',
'modeling_realm',
'mip_table',
'ensemble_member',
'variable',
'temporal_subset',
'version',
'path',
],
}
filelist = get_asset_list(root_path, depth=depth)
cmip_version = str(cmip_version)
if columns is None:
columns = cmip_columns[cmip_version]
b = Builder(columns, exclude_patterns)
df = b(filelist, parsers[cmip_version])
if cmip_version == '6':
# Some entries are invalid: Don't conform to the CMIP6 Data Reference Syntax
cmip6_activity_id_url = (
'https://raw.githubusercontent.com/WCRP-CMIP/CMIP6_CVs/master/CMIP6_activity_id.json'
)
resp = requests.get(cmip6_activity_id_url)
activity_ids = list(resp.json()['activity_id'].keys())
# invalids = df[~df.activity_id.isin(activity_ids)]
df = df[df.activity_id.isin(activity_ids)]
if pick_latest_version:
df = _pick_latest_version(df)
return df.sort_values(by=['path'])
@click.command()
@click.option('--root-path', type=click.Path(exists=True), help='Root path of the CMIP project output.')
@click.option(
'-d',
'--depth',
default=4,
type=int,
show_default=True,
help='Recursion depth. Recursively walk root_path to a specified depth',
)
@click.option(
'--pick-latest-version',
default=False,
is_flag=True,
show_default=True,
help='Whether to only catalog lastest version of data assets or keep all versions',
)
@click.option('-v', '--cmip-version', type=int, help='CMIP phase (e.g. 5 for CMIP5 or 6 for CMIP6)')
@click.option('--csv-filepath', type=str, help='File path to use when saving the built catalog')
def cli(root_path, depth, pick_latest_version, cmip_version, csv_filepath):
if cmip_version not in set([5, 6]):
raise ValueError(f'cmip_version = {cmip_version} is not valid. Valid options include: 5 and 6.')
if csv_filepath is None:
raise ValueError("Please provide csv-filepath. e.g.: './cmip5.csv.gz'")
df = build_cmip(root_path, cmip_version, depth=depth, pick_latest_version=pick_latest_version)
df.to_csv(csv_filepath, compression='gzip', index=False)
if __name__ == '__main__':
cli()
| StarcoderdataPython |
4858273 | import unittest
import numpy as np
import torch
from Utility.Torch import Glimpses
class testView(unittest.TestCase):
def testBasic(self):
""" Tests whether view works """
test_tensor = torch.randn([20, 30, 10])
Glimpses.reshape(test_tensor, 10, [5, 2])
Glimpses.reshape(test_tensor, [30, 10], [50, 6])
Glimpses.reshape(test_tensor, torch.tensor([30, 10]), torch.tensor([50, 6]))
Glimpses.reshape(test_tensor, torch.tensor([30, 10]), torch.tensor([50, 6], dtype=torch.int32))
class testLocal(unittest.TestCase):
def testAsLayer(self):
"""
Test if a simple layer works.
"""
# Perform direct logic test
tensor = torch.arange(30)
kernel, stride, dilation = 1, 1, 1
final = tensor.unsqueeze(-1)
test = Glimpses.local(tensor, kernel, stride, dilation)
test = torch.all(test == final)
self.assertTrue(test, "Logical failure: results did not match manual calculation")
def testKernel(self):
"""
Test if a straightforward local kernel, as used in a convolution, works
"""
# Perform kernel compile and logical test
tensor = torch.tensor([0, 1, 2, 3, 4, 5])
final = torch.tensor([[0 ,1] ,[1, 2], [2, 3], [3, 4], [4, 5]])
kernel, stride, dilation = 2, 1, 1
test = Glimpses.local(tensor, kernel, stride, dilation)
test = torch.all(test == final)
self.assertTrue(test, "Logical failure: Kernels not equal")
def testStriding(self):
"""
Test if a strided kernel, as used in a convolution, works
"""
# Perform striding compile and logical test
tensor = torch.tensor([0, 1, 2, 3, 4, 5])
final = torch.tensor([[0], [2], [4]])
kernel, stride, dilation = 1, 2, 1
test = Glimpses.local(tensor, kernel, stride, dilation)
test = torch.all(test == final)
self.assertTrue(test, "Logical failure: striding did not match")
def testDilation(self):
"""
Test if a combination of dilated kernels works.
"""
# Perform dilation test
tensor = torch.tensor([0, 1, 2, 3, 4, 5])
final = torch.tensor([[0, 2], [1, 3], [2, 4], [3, 5]])
final2 = torch.tensor([[0, 2, 4], [1, 3, 5]])
final3 = torch.tensor([[0, 3] ,[1 ,4] ,[2 ,5]])
kernel1, stride1, dilation1 = 2, 1, 2
kernel2, stride2, dilation2 = 3, 1, 2
kernel3, stride3, dilation3 = 2, 1, 3
test = Glimpses.local(tensor, kernel1, stride1, dilation1)
test2 = Glimpses.local(tensor, kernel2, stride2, dilation2)
test3 = Glimpses.local(tensor, kernel3, stride3, dilation3)
test = torch.all(final == test)
test2 = torch.all(final2 == test2)
test3 = torch.all(final3 == test3)
self.assertTrue(test, "Logical failure: dilation with kernel did not match")
self.assertTrue(test2, "Logical failure: dilation with kernel did not match")
self.assertTrue(test3, "Logical failure: dilation with kernel did not match")
def testRearranged(self):
"""
Test if a tensor currently being viewed, such as produced by swapdims, works
"""
# make tensor
tensor = torch.arange(20)
tensor = tensor.view((2, 10)) # This is what the final buffer should be viewed with respect to
tensor = tensor.swapdims(-1, -2).clone() # Now a new tensor with a new data buffer
tensor = tensor.swapdims(-1, -2) # Buffer is being viewed by stridings. This could fuck things up
# Declare kernel, striding, final
kernel, striding, dilation = 2, 2, 2
# Make expected final
final = []
final.append([[0 ,2] ,[2 ,4], [4 ,6] ,[6 ,8]])
final.append([[10, 12] ,[12, 14] ,[14, 16], [16, 18]])
final = torch.tensor(final)
# test
test = Glimpses.local(tensor, kernel, striding, dilation)
test = torch.all(final == test)
self.assertTrue(test, "Logical failure: buffer issues")
def testWhenSliced(self):
"""
Test if a tensor which is a view through a slice works."""
# make tensor
tensor = torch.arange(20)
tensor = tensor.view((2, 10)) # This is what the final buffer should be viewed with respect to
tensor = tensor[:, 2:6]
# Declare kernel, striding, final
kernel, striding, dilation = 2, 2, 2
# Make expected final
final = []
final.append([[2, 4]])
final.append([[12, 14]])
final = torch.tensor(final)
# test
test = Glimpses.local(tensor, kernel, striding, dilation)
test = torch.all(final == test)
self.assertTrue(test, "Logical failure: buffer issues")
tensor[..., 0] = 30
test = torch.all(final != test)
self.assertTrue(test, "Logical failure: sync issues")
def test_Striding2(self):
test_tensor = torch.randn([10, 16])
output = Glimpses.local(test_tensor, 2, 2, 1)
self.assertTrue(output.shape[-2] == 8)
output = Glimpses.local(test_tensor, 4, 4, 1)
self.assertTrue(output.shape[-2] == 4)
class testDilocal(unittest.TestCase):
def test_basic(self):
""" Tests whether an uncomplicated, unchanging case works. This means stride, kernel is 1"""
tensor = torch.Tensor([[1, 2, 3, 4], [5, 6, 7, 8]])
outcome = Glimpses.dilocal(tensor, 1, 1, [1, 2])
self.assertTrue(np.array_equal(outcome.shape, [2, 2, 4, 1]))
def testDilation(self):
"""
Test if a combination of dilated kernels works.
"""
# Setup constants
tensor = torch.tensor([0, 1, 2, 3, 4, 5])
stride = 1
kernel=3
dilation = [1, 2, 3]
#State expected result
final = []
final.append(torch.tensor([[0, 0, 1], [0,1,2],[1,2,3],[2, 3, 4],[3,4,5], [4, 5, 0]]))
final.append(torch.tensor([[0, 0, 2],[0, 1, 3], [0, 2, 4], [1,3,5], [2,4,0],[3,5,0]]))
final.append(torch.tensor([[0, 0, 3], [0, 1, 4], [0, 2,5], [0, 3, 0], [1,4,0], [2,5,0]]))
final = torch.stack(final)
#Perform test
test = Glimpses.dilocal(tensor, kernel, stride, dilation)
self.assertTrue(np.array_equal(test, final)) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.