id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
327285 | <gh_stars>1-10
"""
MultiEn/Microsoft's DialogGPT chatbot (Conversational NLU).
"""
__all__ = ['ChatbotDialoggptEnMultiWrapper']
from .chatbot_dialoggpt_en import ChatbotDialoggptEn
from .translator_marian import TranslatorMarian
class ChatbotDialoggptEnMultiWrapper(ChatbotDialoggptEn):
"""
MultiEn/Microsoft's DialogGPT chatbot (multilingual via English).
Parameters:
----------
lang : str
Target language.
"""
def __init__(self,
lang,
**kwargs):
super(ChatbotDialoggptEnMultiWrapper, self).__init__(**kwargs)
self.lang = lang
self.target_to_en_translator = TranslatorMarian(src=lang, dst="en", use_cuda=self.use_cuda)
self.en_to_target_translator = TranslatorMarian(src="en", dst=lang, use_cuda=self.use_cuda)
def __call__(self,
input_message,
context=None):
"""
Process a question.
Parameters:
----------
input_message : str
Question.
context : list of str, default None
History of conversation.
Returns:
-------
str
Answer.
"""
input_message = self.target_to_en_translator(input_message)
answer = super(ChatbotDialoggptEnMultiWrapper, self).__call__(input_message, context)
answer = self.en_to_target_translator(answer)
return answer
| StarcoderdataPython |
5158121 | <gh_stars>10-100
import json
import argparse
import gzip
import os
def parse_cve_file(filename, save_file):
cve_dict = {}
with gzip.open(filename, "rt", encoding="utf-8") as f:
cve_data = json.load(f)
for item in cve_data["CVE_Items"]:
cpes = set()
cwes = set()
score = 0
cve_id = item["cve"]["CVE_data_meta"]["ID"]
cve_description = item["cve"]["description"]["description_data"][0]["value"]
for cpe_node in item["configurations"]["nodes"]:
if "cpe_match" not in cpe_node:
continue
for cpe in cpe_node["cpe_match"]:
if cpe["vulnerable"]:
cpes.add(cpe["cpe23Uri"])
for p_data in item["cve"]["problemtype"]["problemtype_data"]:
for desc in p_data["description"]:
cwes.add(desc["value"].split("-")[1])
if "baseMetricV2" in item["impact"]:
score = item["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
if "baseMetricV3" in item["impact"]:
score = (
score + item["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
) / 2
entry_dict = {
"cpes": list(cpes),
"cwes": list(cwes),
"score": score,
"description": cve_description,
}
cve_dict[cve_id] = entry_dict
with open(save_file, "w") as cve_f:
cve_f.write(json.dumps(cve_dict, indent=4, sort_keys=True))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse CVE File")
parser.add_argument(
"--cve_path", type=str, required=True, help="File path to raw_CVE.json.gz"
)
parser.add_argument(
"--save_path",
type=str,
required=True,
help="Folder path to save parsed data",
)
parser.add_argument(
"--only_recent_cves",
action='store_true',
help="Add argument if using only recent CVE data from 2015-2020"
)
args = parser.parse_args()
cve_path = args.cve_path
save_path = args.save_path
only_recent_cves = args.only_recent_cves
if only_recent_cves:
save_path_file = "cve_map_cpe_cwe_score_2015_2020.json"
else:
save_path_file = "cve_map_cpe_cwe_score.json"
save_file = os.path.join(save_path, save_path_file)
parse_cve_file(cve_path, save_file)
| StarcoderdataPython |
5035002 | import random
import re
print('---------------------------------')
print(' GUESS THAT PRIMER GAME')
print('---------------------------------')
print()
primer_length = 5
goal = ''.join(random.choice(['A', 'C', 'G', 'T']) for idx in range(primer_length))
print(goal)
guess = ''
name = input('Player what is your name? ')
guess = input('Guess a 5 bp sequence (only capital letter [ACGT] will be allowed): ')
while guess != goal:
guess = input('Not right. Try again: ')
misses = 0
letter_check = re.compile('^[ACGT]*$')
assert letter_check.match(guess)
for il, gl in zip(guess, goal):
if il != gl:
misses+=1
if misses == 0:
pass
print('Congrats! The primer sequence is %s!' % goal)
print('done') | StarcoderdataPython |
3339518 | <gh_stars>0
numero = int(input('Digite um número para calcular seu fatorual: '))
contador = numero
fatorial = 1
while contador > 0:
print('{}'.format(contador), end='')
print(' x ' if contador >1 else ' = ', end='')
fatorial *= contador
contador -=1
print('{}'.format(fatorial))
| StarcoderdataPython |
250787 | <reponame>dev-japo/potion-client
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
from setuptools import setup
setup(
name='Potion-client',
version='2.5.1',
packages=[str('potion_client')], # https://bugs.python.org/issue13943
url='https://github.com/biosustain/potion-client',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A client for APIs written in Flask-Potion',
long_description=codecs.open('README.rst', encoding='utf-8').read(),
install_requires=[
'jsonschema>=2.4',
'requests>=2.5',
'six'
],
test_suite='nose.collector',
tests_require=[
'responses',
'nose>=1.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Internet',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License'
]
)
| StarcoderdataPython |
8145396 | <filename>src/keyframes.py<gh_stars>0
#!/usr/bin/env python3
# scenedetect
from scenedetect import VideoManager, SceneManager
from scenedetect.detectors import ContentDetector
def keyframe_detection_with_aom_first_pass(parameters):
pass
def keyframe_detection_with_scenedetect(parameters):
video = VideoManager([parameters.input_file])
scene = SceneManager()
scene.add_detector(ContentDetector(threshold = 30.0))
video.set_downscale_factor()
video.start()
scene.detect_scenes(frame_source = video)
return scene.get_scene_list()
def detect_keyframes(parameters):
"""
Returns all the keyframes that will be used to cut the source file
in splits
There are two ways, depending of user's choice :
-> computing first pass with aomenc and using the log file data
-> using scene detection
"""
if (parameters.keyframe_detection == "AOM_1st_PASS"):
keyframes = []
elif (parameters.keyframe_detection == "OpenCV_scenedetect"):
analysis_results = keyframe_detection_with_scenedetect(parameters)
keyframes = [obj[0].frame_num for obj in analysis_results] + [analysis_results[-1][1].frame_num]
return keyframes
| StarcoderdataPython |
6446836 | import pytest
from lxml import etree
from tests.utils import assert_nodes_equal, load_xml, render_node
from zeep import xsd
def test_build_occurs_1():
custom_type = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'authentication'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.String()),
])
))
obj = custom_type(item_1='foo', item_2='bar')
assert obj.item_1 == 'foo'
assert obj.item_2 == 'bar'
result = render_node(custom_type, obj)
expected = load_xml("""
<document>
<ns0:authentication xmlns:ns0="http://tests.python-zeep.org/">
<ns0:item_1>foo</ns0:item_1>
<ns0:item_2>bar</ns0:item_2>
</ns0:authentication>
</document>
""")
assert_nodes_equal(result, expected)
obj = custom_type.parse(expected[0], None)
assert obj.item_1 == 'foo'
assert obj.item_2 == 'bar'
def test_build_occurs_1_skip_value():
custom_type = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'authentication'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.String()),
])
))
obj = custom_type(item_1=xsd.SkipValue, item_2='bar')
assert obj.item_1 == xsd.SkipValue
assert obj.item_2 == 'bar'
result = render_node(custom_type, obj)
expected = load_xml("""
<document>
<ns0:authentication xmlns:ns0="http://tests.python-zeep.org/">
<ns0:item_2>bar</ns0:item_2>
</ns0:authentication>
</document>
""")
assert_nodes_equal(result, expected)
def test_build_min_occurs_2_max_occurs_2():
custom_type = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'authentication'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.String()),
], min_occurs=2, max_occurs=2)
))
assert custom_type.signature()
elm = custom_type(_value_1=[
{'item_1': 'foo-1', 'item_2': 'bar-1'},
{'item_1': 'foo-2', 'item_2': 'bar-2'},
])
assert elm._value_1 == [
{'item_1': 'foo-1', 'item_2': 'bar-1'},
{'item_1': 'foo-2', 'item_2': 'bar-2'},
]
expected = load_xml("""
<ns0:container xmlns:ns0="http://tests.python-zeep.org/">
<ns0:item_1>foo</ns0:item_1>
<ns0:item_2>bar</ns0:item_2>
<ns0:item_1>foo</ns0:item_1>
<ns0:item_2>bar</ns0:item_2>
</ns0:container>
""")
obj = custom_type.parse(expected, None)
assert obj._value_1 == [
{
'item_1': 'foo',
'item_2': 'bar',
},
{
'item_1': 'foo',
'item_2': 'bar',
},
]
def test_build_min_occurs_2_max_occurs_2_error():
custom_type = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'authentication'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.String()),
], min_occurs=2, max_occurs=2)
))
with pytest.raises(TypeError):
custom_type(_value_1={
'item_1': 'foo-1', 'item_2': 'bar-1', 'error': True
})
def test_build_sequence_and_attributes():
custom_element = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'authentication'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.String()),
]),
[
xsd.Attribute(
etree.QName('http://tests.python-zeep.org/', 'attr_1'),
xsd.String()),
xsd.Attribute('attr_2', xsd.String()),
]
))
expected = load_xml("""
<ns0:authentication xmlns:ns0="http://tests.python-zeep.org/" ns0:attr_1="x" attr_2="y">
<ns0:item_1>foo</ns0:item_1>
<ns0:item_2>bar</ns0:item_2>
</ns0:authentication>
""")
obj = custom_element.parse(expected, None)
assert obj.item_1 == 'foo'
assert obj.item_2 == 'bar'
assert obj.attr_1 == 'x'
assert obj.attr_2 == 'y'
def test_build_sequence_with_optional_elements():
custom_type = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'container'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2_1'),
xsd.String(),
nillable=True)
])
)
),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_3'),
xsd.String(),
max_occurs=2),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_4'),
xsd.String(),
min_occurs=0),
])
))
expected = etree.fromstring("""
<ns0:container xmlns:ns0="http://tests.python-zeep.org/">
<ns0:item_1>1</ns0:item_1>
<ns0:item_2/>
<ns0:item_3>3</ns0:item_3>
</ns0:container>
""")
obj = custom_type.parse(expected, None)
assert obj.item_1 == '1'
assert obj.item_2 is None
assert obj.item_3 == ['3']
assert obj.item_4 is None
def test_build_max_occurs_unbounded():
custom_type = xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'authentication'),
xsd.ComplexType(
xsd.Sequence([
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_1'),
xsd.String()),
xsd.Element(
etree.QName('http://tests.python-zeep.org/', 'item_2'),
xsd.String()),
], max_occurs='unbounded')
))
expected = etree.fromstring("""
<ns0:container xmlns:ns0="http://tests.python-zeep.org/">
<ns0:item_1>foo</ns0:item_1>
<ns0:item_2>bar</ns0:item_2>
</ns0:container>
""")
obj = custom_type.parse(expected, None)
assert obj._value_1 == [
{
'item_1': 'foo',
'item_2': 'bar',
}
]
def test_xml_sequence_with_choice():
schema = xsd.Schema(load_xml("""
<schema
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/tst"
elementFormDefault="qualified"
targetNamespace="http://tests.python-zeep.org/tst">
<element name="container">
<complexType>
<sequence>
<choice>
<element name="item_1" type="xsd:string" />
<element name="item_2" type="xsd:string" />
</choice>
<element name="item_3" type="xsd:string" />
</sequence>
</complexType>
</element>
</schema>
"""))
xml = load_xml("""
<tst:container
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:tst="http://tests.python-zeep.org/tst">
<tst:item_1>blabla</tst:item_1>
<tst:item_3>haha</tst:item_3>
</tst:container>
""")
elm = schema.get_element('{http://tests.python-zeep.org/tst}container')
result = elm.parse(xml, schema)
assert result.item_1 == 'blabla'
assert result.item_3 == 'haha'
def test_xml_sequence_with_choice_max_occurs_2():
schema = xsd.Schema(load_xml("""
<schema
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/tst"
elementFormDefault="qualified"
targetNamespace="http://tests.python-zeep.org/tst">
<element name="container">
<complexType>
<sequence>
<choice maxOccurs="2">
<element name="item_1" type="xsd:string" />
<element name="item_2" type="xsd:string" />
</choice>
<element name="item_3" type="xsd:string" />
</sequence>
<attribute name="item_1" type="xsd:string" use="optional" />
<attribute name="item_2" type="xsd:string" use="optional" />
</complexType>
</element>
</schema>
"""))
xml = load_xml("""
<tst:container
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:tst="http://tests.python-zeep.org/tst">
<tst:item_1>item-1-1</tst:item_1>
<tst:item_1>item-1-2</tst:item_1>
<tst:item_3>item-3</tst:item_3>
</tst:container>
""")
elm = schema.get_element('{http://tests.python-zeep.org/tst}container')
result = elm.parse(xml, schema)
assert result._value_1 == [
{'item_1': 'item-1-1'},
{'item_1': 'item-1-2'},
]
assert result.item_3 == 'item-3'
def test_xml_sequence_with_choice_max_occurs_3():
schema = xsd.Schema(load_xml("""
<schema
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/tst"
elementFormDefault="qualified"
targetNamespace="http://tests.python-zeep.org/tst">
<element name="container">
<complexType>
<sequence>
<choice maxOccurs="3">
<sequence>
<element name="item_1" type="xsd:string" />
<element name="item_2" type="xsd:string" />
</sequence>
<element name="item_3" type="xsd:string" />
</choice>
<element name="item_4" type="xsd:string" />
</sequence>
</complexType>
</element>
</schema>
"""))
xml = load_xml("""
<tst:container
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:tst="http://tests.python-zeep.org/tst">
<tst:item_1>text-1</tst:item_1>
<tst:item_2>text-2</tst:item_2>
<tst:item_1>text-1</tst:item_1>
<tst:item_2>text-2</tst:item_2>
<tst:item_3>text-3</tst:item_3>
<tst:item_4>text-4</tst:item_4>
</tst:container>
""")
elm = schema.get_element('{http://tests.python-zeep.org/tst}container')
result = elm.parse(xml, schema)
assert result._value_1 == [
{'item_1': 'text-1', 'item_2': 'text-2'},
{'item_1': 'text-1', 'item_2': 'text-2'},
{'item_3': 'text-3'},
]
assert result.item_4 == 'text-4'
def test_xml_sequence_with_nil_element():
schema = xsd.Schema(load_xml("""
<schema
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
elementFormDefault="qualified"
targetNamespace="http://tests.python-zeep.org/">
<element name="container">
<complexType>
<sequence>
<element name="item" type="xsd:string" maxOccurs="unbounded"/>
</sequence>
</complexType>
</element>
</schema>
"""))
xml = load_xml("""
<tns:container
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:tns="http://tests.python-zeep.org/">
<tns:item>text-1</tns:item>
<tns:item>text-2</tns:item>
<tns:item/>
<tns:item>text-4</tns:item>
<tns:item>text-5</tns:item>
</tns:container>
""")
elm = schema.get_element('{http://tests.python-zeep.org/}container')
result = elm.parse(xml, schema)
assert result.item == [
'text-1',
'text-2',
None,
'text-4',
'text-5',
]
def test_xml_sequence_unbounded():
schema = xsd.Schema(load_xml("""
<schema
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
elementFormDefault="qualified"
targetNamespace="http://tests.python-zeep.org/">
<complexType name="ValueListType">
<sequence maxOccurs="unbounded" minOccurs="0">
<element ref="tns:Value"/>
</sequence>
</complexType>
<element name="ValueList" type="tns:ValueListType"/>
<element name="Value" type="tns:LongName"/>
<simpleType name="LongName">
<restriction base="string">
<maxLength value="256"/>
</restriction>
</simpleType>
</schema>
"""))
elm_type = schema.get_type('{http://tests.python-zeep.org/}ValueListType')
with pytest.raises(TypeError):
elm_type(Value='bla')
elm_type(_value_1={'Value': 'bla'})
def test_xml_sequence_recover_from_missing_element():
schema = xsd.Schema(load_xml("""
<schema
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
elementFormDefault="qualified"
targetNamespace="http://tests.python-zeep.org/">
<complexType name="container">
<sequence>
<element name="item_1" type="xsd:string"/>
<element name="item_2" type="xsd:string"/>
<element name="item_3" type="xsd:string"/>
<element name="item_4" type="xsd:string"/>
</sequence>
</complexType>
</schema>
"""), strict=False)
xml = load_xml("""
<tns:container
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:tns="http://tests.python-zeep.org/">
<tns:item_1>text-1</tns:item_1>
<tns:item_3>text-3</tns:item_3>
<tns:item_4>text-4</tns:item_4>
</tns:container>
""")
elm_type = schema.get_type('{http://tests.python-zeep.org/}container')
result = elm_type.parse_xmlelement(xml, schema)
assert result.item_1 == 'text-1'
assert result.item_2 is None
assert result.item_3 == 'text-3'
assert result.item_4 == 'text-4'
| StarcoderdataPython |
3388602 | <gh_stars>1-10
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystoneauth1 import loading
from keystoneauth1 import session
from oslo_config import cfg
from oslo_log import log as logging
from warre import app
from warre.common import blazar
from warre.common import clients
from warre.common import keystone
from warre.extensions import db
from warre import models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def app_context(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
with self.app.app_context():
return f(self, *args, **kwargs)
return decorated
class Manager(object):
def __init__(self):
self.app = app.create_app(init_config=False)
@app_context
def create_lease(self, reservation_id):
LOG.info("Creating Blazar lease for %s", reservation_id)
reservation = db.session.query(models.Reservation).filter_by(
id=reservation_id).first()
bot_session = self.get_bot_session(reservation.project_id)
blazar_client = blazar.BlazarClient(session=bot_session)
try:
lease = blazar_client.create_lease(reservation)
except Exception as e:
reservation.status = models.Reservation.ERROR
reservation.status_reason = str(e)
LOG.exception(e)
else:
reservation.lease_id = lease['id']
reservation.status = models.Reservation.ALLOCATED
LOG.info("Created Blazar lease with ID %s", reservation.lease_id)
db.session.add(reservation)
db.session.commit()
def ensure_bot_access(self, project_id):
k_session = keystone.KeystoneSession().get_session()
client = clients.get_admin_keystoneclient(k_session)
client.roles.grant(user=CONF.warre.bot_user_id, project=project_id,
role=CONF.warre.bot_role_id)
def get_bot_session(self, project_id):
self.ensure_bot_access(project_id)
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(auth_url=CONF.warre.bot_auth_url,
user_id=CONF.warre.bot_user_id,
password=<PASSWORD>,
project_id=project_id,
user_domain_id='default',
project_domain_id='default')
return session.Session(auth=auth)
| StarcoderdataPython |
1810610 |
#def test_marker_login(app):
# username = app.username
# password = <PASSWORD>
# app.session.ensure_login_marker(username, password)
# user = app.session.get_logged_user_marker()
# assert username == user
def test_marker_login(app):
username = app.username
password = <PASSWORD>
app.sessionSM.sm_login(username, password)
user = app.sessionSM.get_logged_user_sm()
assert username == user
| StarcoderdataPython |
6415201 | import sys, os, re, time
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as InterFun
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
# Define folder path for csvs
FOLDER_PATH_RUNS = os.path.join('output', 'cheetah-multi-task', '2021_04_26_8_task')
FOLDER_PATH_FIG = os.path.join('log', 'figures')
CONCAT_RUNS = False
SMOOTHING = 0.1
# Setup:
# List of run names that should be plotted
RUNS_TO_PLOT = [
# 'MLP_5T',
# 'GRU_5T',
# 'CONV_5T',
# 'TRANSFORMER_5T',
# 'MLP_5T',
# 'MLP_5T_PCGRAD',
# 'MLP_10T',
# 'MLP_10T_PCGRAD',
# 'MLP_1T',
# 'MLP_2T',
# 'MLP_3T',
# 'MLP_4T',
# 'MLP_5T',
# 'MLP_10T',
# 'MLP_20T',
# 'MLP_5T_LD1',
# 'MLP_5T_LD2',
# 'MLP_5T_LD3',
# 'MLP_5T_LD4',
# 'MLP_AT0S',
# 'MLP_AT1S',
# 'MLP_AT5S',
# 'MLP_AT10S',
# 'MLP_AT25S',
# 'MLP_P_A0001_R01',
# 'MLP_P_A0001_R0',
# 'MLP_P_A001_R01',
# 'MLP_P_A01_R01',
# 'MLP_5_PRIOR_GMM',
# 'MLP_5_TRUE_GMM',
# 'MLP_5_COMB._ACTIV.',
# 'MLP_5_DIRECT_ACTIV.',
# 'AKZ0.001_BE0.01_GS0.01',
# 'AKZ0.001_BE0.01_GS0.1',
# 'AKZ0.001_BE0.1_GS0.01',
# 'AKZ0.01_BE0.1_GS0.01',
# 'AKZ0.01_BE0.1_GS0.1',
# 'AKZ0.1_BE0.01_GS0.1',
# 'AKZ0.1_BE0.1_GS0.01',
# 'AKZ0.1_BE0.1_GS0.1'
# 'SM_NONE',
# 'SM_LINEAR',
'8_TASK_GRU_64'
]
# Setup:
# DICT = {Title: regex, ...}
RUN_REGEX_DICT = {
'MLP_1T': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=1_ls=2_prior_gmm',
'MLP_2T': '2021_02_27_20_07_39_prior_gmm_mlp_2',
'MLP_3T': '2021_02_27_20_07_25_prior_gmm_mlp_3',
'MLP_4T': '2021_02_27_20_07_12_prior_gmm_mlp_4',
'MLP_5T': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=2_prior_gmm',
'MLP_10T': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=10_ls=2_prior_gmm',
'MLP_20T': '2021_02_24_16_35_15_prior_gmm_mlp_20',
'MLP_5T_LD1': '2021_02_27_20_05_41_prior_gmm_mlp_5_ld1',
'MLP_5T_LD2': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=2_prior_gmm',
'MLP_5T_LD3': '2021_02_27_20_05_51_prior_gmm_mlp_5_ld3',
'MLP_5T_LD4': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=4_prior_gmm',
'MLP_AT0S': '2021_02_25_17_05_02_prior_gmm_mlp_5',
'MLP_AT1S': '2021_03_02_07_22_39_prior_gmm_mlp_at1',
'MLP_AT5S': '2021_03_01_18_12_38_prior_gmm_mlp_at5',
'MLP_AT10S': '2021_03_01_18_13_10_prior_gmm_mlp_at10',
'MLP_AT25S': '2021_03_02_07_23_06_prior_gmm_mlp_at25',
'MLP_P_A0001_R01': '2021_02_25_17_05_02_prior_gmm_mlp_5',
'MLP_P_A0001_R0': '2021_03_01_03_25_34_prior_gmm_a_0001_r_0',
'MLP_P_A001_R01': '2021_03_01_03_25_53_prior_gmm_a_001_r_01',
'MLP_P_A01_R01': '2021_03_01_03_26_12_prior_gmm_a_01_r_01',
'MLP_5_PRIOR_GMM' : '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=2_prior_gmm',
'MLP_5_TRUE_GMM' : '.*cheetah_multi_task_io=true_gmm_et=mlp_ts=5_ls=2_true_gmm',
'MLP_5_COMB._ACTIV.' : '.*cheetah_multi_task_io=comb_et=mlp_ts=5_ls=2_activation_combination',
'MLP_5_DIRECT_ACTIV.' : '.*cheetah_multi_task_io=direct_et=mlp_ts=5_ls=2_direct_activation',
'GRU_5T': '.*cheetah_multi_task_io=prior_et=gru_ts=5_ls=2_prior_gmm',
'GRU_10T': '2021_02_25_17_05_58_prior_gmm_gru_10',
'CONV_5T': '.*cheetah_multi_task_io=prior_gmm_et=conv_ts=5_ls=2_prior_gmm',
'CONV_10T': '2021_02_25_17_05_23_prior_gmm_conv_10',
'TRANSFORMER_5T': '.*cheetah_multi_task_io=prior_et=transformer_ts=5_ls=2_prior_gmm',
'TRANSFORMER_10T': '2021_02_26_15_39_57_prior_gmm_transformer_10',
'MLP_5T_PCGRAD': '2021_03_01_03_15_43_prior_gmm_mlp_5_pcgrad',
'MLP_10T_PCGRAD': '2021_02_26_16_42_03_prior_gmm_mlp_10_pcgrad',
#'TIBIAMRL': 'PLACEHOLDER',
'AKZ0.001_BE0.01_GS0.01': '.*cheetah_multi_task_akz~0.001_be~0.01_gs~0.01_prior_gmm',
'AKZ0.001_BE0.01_GS0.1': '.*cheetah_multi_task_akz~0.001_be~0.01_gs~0.1_prior_gmm',
'AKZ0.001_BE0.1_GS0.01': '.*cheetah_multi_task_akz~0.001_be~0.1_gs~0.01_prior_gmm',
'AKZ0.01_BE0.1_GS0.01': '.*cheetah_multi_task_akz~0.01_be~0.1_gs~0.01_prior_gmm',
'AKZ0.01_BE0.1_GS0.1': '.*cheetah_multi_task_akz~0.01_be~0.1_gs~0.1_prior_gmm',
'AKZ0.1_BE0.01_GS0.1': '.*cheetah_multi_task_akz~0.1_be~0.01_gs~0.1_prior_gmm',
'AKZ0.1_BE0.1_GS0.01': '.*cheetah_multi_task_akz~0.1_be~0.1_gs~0.01_prior_gmm',
'AKZ0.1_BE0.1_GS0.1': '.*cheetah_multi_task_akz~0.1_be~0.1_gs~0.1_prior_gmm',
'GRU_T10': '.*cheetah_multi_task_et~gru_ts~10_prior_gmm',
'TRANSFORMER_T1': '.*cheetah_multi_task_et~transformer_ts~1_prior_gmm',
'TRANSFORMER_T5': '.*cheetah_multi_task_et~transformer_ts~5_prior_gmm',
'T_MULTIPLICATION': '.*cheetah_multi_task_tc~multiplication_prior_gmm',
'SM_NONE': '.*cheetah_multi_task_td~None_sm~None_prior_gmm',
'SM_LINEAR': '.*cheetah_multi_task_td~None_sm~linear_prior_gmm',
'TD_NONE_SMNONE': '.*cheetah_multi_task_td~None_sm~None_prior_gmm',
'TD_NONE_SMLINEAR': '.*cheetah_multi_task_td~None_sm~linear_prior_gmm',
'TD_WORST_SMNONE': '.*cheetah_multi_task_td~worst_sm~None_prior_gmm',
'8_TASK_GRU_64': '.*cheetah_multi_task_ts~64_true_gmm',
}
# Setup:
# DICT = {run name: [(Title, tag), ...], ...}
RUN_TAGS_DICT = {
'default': [
('Evaluation Test ND Average Reward', 'evaluation/nd_test/average_reward'),
('Evaluation Test ND Max Reward', 'evaluation/nd_test/max_reward'),
('Evaluation Test ND Min Reward', 'evaluation/nd_test/min_reward'),
('Evaluation Test ND Std Reward', 'evaluation/nd_test/std_reward'),
('Evaluation Test ND Success Rate', 'evaluation/nd_test/success_rate'),
('Evaluation Test Average Reward', 'evaluation/test/average_reward'),
('Evaluation Test Max Reward', 'evaluation/test/max_reward'),
('Evaluation Test Min Reward', 'evaluation/test/min_reward'),
('Evaluation Test Std Reward', 'evaluation/test/std_reward'),
('Evaluation Test Success Rate', 'evaluation/test/success_rate'),
('Evaluation Training Average Reward', 'evaluation/train/average_reward'),
('Evaluation Training Max Reward', 'evaluation/train/max_reward'),
('Evaluation Training Min Reward', 'evaluation/train/min_reward'),
('Evaluation Training Std Reward', 'evaluation/train/std_reward'),
('Evaluation Training Success Rate', 'evaluation/train/success_rate'),
('Policy Training Alpha Loss', 'rl/alpha'),
('Policy Training Policy Loss', 'rl/policy_loss'),
('Policy Training QF1 Loss', 'rl/qf1_loss'),
('Policy Training QF2 Loss', 'rl/qf2_loss'),
('Task Inference Training Mixture Model Combined Loss', 'training/ti_mixture_loss'),
('Task Inference Training Mixture Model Elbo Loss', 'training/ti_mixture_elbo_loss'),
('Task Inference Training Mixture Model State Loss', 'training/ti_mixture_state_losses'),
('Task Inference Training Mixture Model Reward Loss', 'training/ti_mixture_reward_losses'),
('Task Inference Training Mixture Model Regularization Loss', 'training/ti_mixture_regularization_loss'),
('Task Inference Training Mixture Model Class Activation Accuracy', 'training/ti_classification_acc'),
('Task Inference Training Mixture Model Clustering Loss', 'training/ti_mixture_clustering_losses')
],
}
def main(run_name=None, interpolation_type='scipy', smooth=True, format_='pdf', plot_std=True, save_=True,
summary_pref='', fit_plt=False):
global RUN_REGEX_DICT
global FOLDER_PATH_RUNS
global RUNS_TO_PLOT
if run_name is not None:
run_name = run_name if run_name[-1] != '/' else run_name[:-1]
head, tail = os.path.split(run_name)
if len(head) > 0:
FOLDER_PATH_RUNS = head
RUN_REGEX_DICT = {
'TIBIAMRL': tail,
}
else:
RUN_REGEX_DICT = {
'TIBIAMRL': run_name,
}
RUNS_TO_PLOT = ['TIBIAMRL']
# Prepare data
data_dict = {}
# Get all folders in folder
folders = sorted([d for d in os.listdir(FOLDER_PATH_RUNS) if os.path.isdir(os.path.join(FOLDER_PATH_RUNS, d))])
for run_name in RUNS_TO_PLOT:
for folder in folders:
if re.match(RUN_REGEX_DICT[run_name], folder) is not None:
(dirpath, subfolders, subfiles) = next(os.walk(os.path.join(FOLDER_PATH_RUNS, folder, 'tensorboard')))
#(dirpath, _, subsubfiles) = next(os.walk(os.path.join(dirpath, subfolders[0])))
# Add tf events from first subfolder
print(f'Reading in events of {[file for file in subfiles if "events.out" in file][0]} [{folder}]')
acc = EventAccumulator(os.path.join(dirpath, [file for file in subfiles if 'events.out' in file][0])).Reload()
# Gather all info for given tags
for title, tag in RUN_TAGS_DICT[run_name if run_name in RUN_TAGS_DICT.keys() else 'default']:
try:
list_of_events = acc.Scalars(summary_pref + tag)
except Exception as e:
print(f'\tAcquiring data for tag "{summary_pref + tag}" went wrong! ({e})')
continue
_, steps, values = list(zip(*map(lambda x: x._asdict().values(), list_of_events)))
df = pd.DataFrame(data=np.array([np.array(steps), np.array(values)]).T, columns=['Step', 'Value'])
df.drop_duplicates(subset='Step', keep='last', inplace=True)
# Add dfs to data_dict
if title in data_dict.keys():
if not CONCAT_RUNS:
if run_name in data_dict[title].keys():
data_dict[title][run_name].append(df)
else:
data_dict[title][run_name] = [df]
else:
last_step = data_dict[title][run_name][0]['Step'].to_numpy()[-1]
df['Step'] += last_step
data_dict[title][run_name][0] = data_dict[title][run_name][0].append(df)
else:
data_dict[title] = {run_name: [df]}
print(f'Using {["own", "InterpolatedUnivariateSpline (scipy)"][int(interpolation_type == "scipy")]} interpolation method to patch missing data in some plots')
# Find min length for plotting only valid data and transform pd frames in numpy arrays
for title in data_dict.keys():
# Find corresponding values and interpolate
for run_name in list(data_dict[title].keys()):
# Only interpolate in case we have multiple runs that need to be averaged
min_steps = data_dict[title][run_name][0]['Step'].to_numpy()
if len(data_dict[title][run_name]) > 1:
temp_l = np.array([df['Step'].to_numpy()[-1] for df in data_dict[title][run_name]])
min_steps = data_dict[title][run_name][temp_l.argmin()]['Step'].to_numpy()
if interpolation_type == 'scipy':
for ind, df in enumerate(data_dict[title][run_name]):
interpolation_function = InterFun(df['Step'].to_numpy(), df['Value'].to_numpy())
data_dict[title][run_name][ind] = interpolation_function(min_steps)
elif interpolation_type == 'own':
for ind, df in enumerate(data_dict[title][run_name]):
steps, values = df['Step'].to_numpy(), df['Value'].to_numpy()
bigger_array = np.zeros_like(min_steps, dtype=np.float)
for arr_ind, step in enumerate(min_steps):
bigger_array[arr_ind] = values[np.where(steps >= step)[0][0]] if np.sum(steps >= step) > 0 else values[-1]
data_dict[title][run_name][ind] = bigger_array
else:
data_dict[title][run_name][0] = data_dict[title][run_name][0]['Value'].to_numpy()
data_dict[title][run_name + '_steps'] = min_steps
# Start plotting
print(f'Plotting ...')
# Use Latex text
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
# Make folder in case not yet existing
file_name = "_".join([RUN_REGEX_DICT[run_name] for run_name in RUNS_TO_PLOT])
fig_folder = os.path.join(FOLDER_PATH_FIG, f'{time.strftime("%Y-%m-%d-%H_%M_%S")}_{file_name if len(RUNS_TO_PLOT) < 2 else "comparison"}_smoothing{SMOOTHING}')
if not os.path.isdir(fig_folder) and save_:
os.mkdir(fig_folder)
for title in data_dict.keys():
plot_title = ('Comparison ' if len(data_dict[title]) > 2 else '') + title
plt.ioff()
plt.title(plot_title)
max_mean, min_mean = -np.inf, np.inf
for run_name in data_dict[title].keys():
if '_steps' in run_name:
continue
data_arr = np.array(data_dict[title][run_name])
steps = data_dict[title][run_name + '_steps']
mean = data_arr.mean(axis=0) if not smooth else smooth_values(data_arr.mean(axis=0))
std = np.sqrt(data_arr.var(axis=0))
plt.plot(steps, mean)
if plot_std: plt.fill_between(steps, mean + std, mean - std, alpha=0.3)
max_mean = mean.max() if max_mean < mean.max() else max_mean
min_mean = mean.min() if min_mean > mean.min() else min_mean
if fit_plt: plt.ylim([min_mean, max_mean])
plt.legend([f'{el}_[{len(data_dict[title][el])}]' for el in data_dict[title].keys() if '_steps' not in el],
bbox_to_anchor=(1, 1), loc='upper left')
plt.xlabel('Steps')
plt.ylabel(title)
# Always show 0
# y_min, y_max = plt.gca().get_ylim()
# if y_min > 0 and not fit_plt:
# plt.ylim([0, y_max])
# Save or show
if save_:
plt.savefig(os.path.join(fig_folder, plot_title + '.' + format_), format=format_, dpi=100,
bbox_inches='tight')
else:
plt.show()
plt.close()
def smooth_values(scalars, weight=None): # Scalars as np.array, weight between 0 and 1
if weight is None: weight = SMOOTHING
last = scalars[0] # First value in the plot (first timestep)
smoothed = np.zeros_like(scalars)
for idx, point in enumerate(scalars):
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed[idx] = smoothed_val # Save it
last = smoothed_val # Anchor the last smoothed value
return np.array(smoothed)
if __name__ == '__main__':
if len(sys.argv) > 0:
main(*sys.argv[1:])
else:
main()
| StarcoderdataPython |
11330761 | # -*- coding: utf-8 -*-
#
# comparison_schemes.py
#
"""
Features selection and classifications
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
from skfeature.function.similarity_based.fisher_score import fisher_score
from sklearn.feature_selection import (
GenericUnivariateSelect,
VarianceThreshold,
mutual_info_classif,
)
from skrebate import ReliefF
def get_features_selectors(config):
relieff_param = {
"ReliefF__n_neighbors": sp_randint(
config["config"]["selectors"]["ReliefF"]["n_neighbors_from"],
config["config"]["selectors"]["ReliefF"]["n_neighbors_to"],
),
"ReliefF__n_features_to_select": sp_randint(
config["config"]["selectors"]["ReliefF"][
"n_features_to_select_from"
],
config["config"]["selectors"]["ReliefF"][
"n_features_to_select_to"
],
),
}
mutual_info_param = {
"mutual_info_classif__param": sp_randint(
config["config"]["selectors"]["mutual_info"]["param_from"],
config["config"]["selectors"]["mutual_info"]["param_to"],
)
}
fisher_param = {
"fisher_score__param": sp_randint(
config["config"]["selectors"]["fisher_score"]["param_from"],
config["config"]["selectors"]["fisher_score"]["param_to"],
)
}
var_t_param = {
"VarianceThreshold__threshold": sp_uniform(
config["config"]["selectors"]["VarianceThreshold"][
"threshold_from"
],
config["config"]["selectors"]["VarianceThreshold"]["threshold_to"],
)
}
f_list = dict()
f_list["relief_f"] = (ReliefF.__name__, ReliefF()), relieff_param
f_list["mutual_info"] = (
(
mutual_info_classif.__name__,
GenericUnivariateSelect(mutual_info_classif, mode="k_best"),
),
mutual_info_param,
)
f_list["fisher_score"] = (
(
fisher_score.__name__,
GenericUnivariateSelect(fisher_score, mode="k_best"),
),
fisher_param,
)
f_list["variance_threshold"] = (
(VarianceThreshold.__name__, VarianceThreshold()),
var_t_param,
)
f_list["No feature selection"] = ("No_feature_selection", None), None
return f_list
| StarcoderdataPython |
192783 | <gh_stars>0
from nltk import tokenize
import re
def test():
"""Driver"""
filename = 'test.txt'
with open(filename, 'r') as f:
data = f.read()
data = data.replace('Fig.', 'Figure')
sentences = tokenize.sent_tokenize(data)
print(sentences)
# erroneously splits on "Fig. 3"
assert len(sentences) == 6
def split_sentences(filepath='sample_paper.txt'):
"""Driver"""
with open(filepath, 'r') as f:
data = f.read()
# chop off the references
ref_matches = [m.start() for m in re.finditer('references 1', data.lower())]
if ref_matches:
data = data[:ref_matches[-1]]
else:
print('Did not find references 1 so let us try just finding the word references')
ref_matches = [m.start() for m in re.finditer('references', data.lower())]
if ref_matches:
data = data[:ref_matches[-1]]
else:
raise ValueError('No references in {}'.format(filepath))
# do some replacement
pairs = {
'Fig.': 'Fig',
'e.g.': 'eg',
'i.e.': 'ie',
'et al.': 'et al',
}
for key, val in pairs.items():
data = data.replace(key, val)
sentences = tokenize.sent_tokenize(data)
#print(sentences)
out = ['text,label,has_citation']
for sentence in sentences:
out.append('"' + sentence.replace('"', '""') + '",nan,nan')
csv_filepath = filepath.replace('txt_files', 'sentences').replace('.txt', '.csv')
with open(csv_filepath, 'w', encoding='utf-8') as f:
f.write('\n'.join(out))
if __name__ == '__main__':
split_sentences() | StarcoderdataPython |
11329436 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import csv
import re
import os
import mwparserfromhell as wparser
import string
import pywikibot
import datetime
import requests
import pymysql
import random
from urllib.parse import quote
from wikidataStuff.WikidataStuff import WikidataStuff as wds
site_cache = {}
def remove_empty_dicts_from_list(list_of_dicts):
return [i for i in list_of_dicts if i]
def save_to_file(filename, content, silent=False):
with open(filename, 'w', encoding="utf-8") as f:
f.write(content)
if not silent:
print("SAVED FILE " + filename)
def json_to_file(filename, json_content, silent=False):
with open(filename, 'w', encoding="utf-8") as f:
json.dump(json_content, f, sort_keys=True,
indent=4,
ensure_ascii=False,
default=datetime_convert)
if not silent:
print("SAVED FILE " + filename)
def create_dir(out_path):
"""
Create a directory if it doesn't exist.
@param out_path: directory to create
"""
if not out_path:
raise ValueError('Cannot a create directory without a name.')
if not os.path.exists(out_path):
os.mkdir(out_path)
elif os.path.isfile(out_path):
raise ValueError(
'Cannot create the directory "{}" as a file with that name '
'already exists.'.format(out_path))
def get_specific_table_name(countryname, languagename):
return "monuments_{}_({})".format(countryname, languagename)
def get_number_of_rows(connection, tablename):
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM `" + tablename + "`"
cursor.execute(query)
result = cursor.fetchone()
return result[0]
def table_exists(connection, tablename):
try:
if get_number_of_rows(connection, tablename) > 0:
return True
except pymysql.ProgrammingError:
return False
def load_json(filename):
try:
with open(filename, encoding="utf-8") as f:
try:
return json.load(f)
except ValueError:
print("Failed to decode file {}.".format(filename))
except OSError:
print("File {} does not exist.".format(filename))
def datetime_convert(dt_object):
if isinstance(dt_object, datetime.datetime):
return dt_object.__str__()
def remove_multiple_spaces(text):
return re.sub(' +', ' ', text)
def remove_markup(text):
remove_br = re.compile('<br.*?>\W*', re.I)
text = remove_br.sub(' ', text)
text = " ".join(text.split())
if "[" in text or "''" in text:
text = wparser.parse(text)
text = text.strip_code()
return remove_multiple_spaces(text.strip())
def contains_digit(text):
return any(x.isdigit() for x in text)
def get_external_links(wikitext):
"""Retrieve external url's from wikitext."""
urls = []
links = wparser.parse(wikitext).filter_external_links()
if len(links) > 0:
for link in links:
urls.append(link.url)
return urls
def is_legit_house_number(text):
number_regex = re.compile(
'\d{1,3}\s?([A-Z]{1})?((-|–)\d{1,3})?\s?([A-Z]{1})?')
m = number_regex.match(text)
if m:
return True
else:
return False
def get_street_address(address, language):
address = remove_markup(address)
if language == "sv":
# Try to see if it's a legit-ish street address
# numbers like 3, 3A, 2-4
# oh, and sometimes it's not a _street_ name: "Norra Kik 7"
# street names can consist of several words: "<NAME> gata 19"
# how about: "Östra skolan, Bergaliden 24"
# "Västanåvägen 12-6, Näsum"
# If there's a comma, order can vary
#####
# regex should match: 12, 3, 43-45, 34b, 43B, 25 a, 43B-43E
legit_address = None
interesting_part = ""
patterns = ["gatan", "vägen", " väg", " gata",
" torg", "torget", " plats", "platsen", " gränd",
"kajen", "promenaden", "liden", "stigen"]
if "," in address:
address_split = address.split(",", re.IGNORECASE)
for part in address_split:
if (any(substring in part for substring in patterns) and
contains_digit(part)):
interesting_part = part.strip()
else:
if (any(substring in address for substring in patterns) and
contains_digit(address)):
interesting_part = address
if len(interesting_part) > 1:
interesting_part_split = interesting_part.split(" ")
for part in interesting_part_split:
if contains_digit(part) and is_legit_house_number(part):
legit_address = interesting_part.rstrip(',.-')
return legit_address
def get_wikilinks(text):
parsed = wparser.parse(text)
return parsed.filter_wikilinks()
def get_unique_wikilinks(text):
results = []
wikilinks = get_wikilinks(text)
for wikilink in wikilinks:
if wikilink not in results:
results.append(wikilink)
return results
def count_wikilinks(text):
return len(get_wikilinks(text))
def q_from_wikipedia(language, page_title):
"""
Get the ID of the WD item linked to a wp page.
If the page exists, has no item and is in the article
namespace, create an item for it.
"""
# various cleanup
if page_title.startswith("[[") and page_title.endswith("]]"):
internal_links = get_wikilinks(page_title)
if not internal_links:
return
page_title = internal_links[0].title
if isinstance(page_title, str):
# get_wikilinks()[0].title does not return a str
page_title = page_title.replace('\n', ' ')
if not page_title:
return
wp_site = pywikibot.Site(language, "wikipedia")
page = pywikibot.Page(wp_site, page_title)
summary = "Creating item for {} on {}wp."
summary = summary.format(page_title, language)
wd_repo = create_site_instance("wikidata", "wikidata")
wdstuff = wds(wd_repo, edit_summary=summary, no_wdss=True)
if page.exists():
if page.isRedirectPage():
page = page.getRedirectTarget()
if page.isDisambig():
return
try:
item = pywikibot.ItemPage.fromPage(page)
except pywikibot.NoPage:
if page.namespace() != 0: # main namespace
return
item = wdstuff.make_new_item_from_page(page, summary)
return item.getID()
def q_from_first_wikilink(language, text):
try:
wikilink = get_wikilinks(text)[0]
return q_from_wikipedia(language, wikilink.title)
except IndexError:
return
def get_matching_items_from_dict(value, dict_name):
"""
Return all items in a dict for which the label matches the provided value.
@param value: the value to match
@param dict_name: the dict to look in
"""
matches = [dict_name[x]["items"]
for x in dict_name if x.lower() == value]
if len(matches) == 0:
return []
else:
return matches[0]
def get_item_from_dict_by_key(dict_name,
search_term,
search_in,
return_content_of="item"):
"""
Return all items in a dict with a certain field match.
It will normally return the content of the field
'item' which is expected to contain a Q-item.
It is, however, possible to overwrite the name
of the field whose contents should be returned.
@param dict_name: the dictionary to look in
@pram search_term: the value to match
@param search_in: the field in which to look for matching value
@param return_content_of: the field whose content to return
"""
results = []
matches = [x for x in dict_name if x[search_in] == search_term]
if len(matches) == 0:
return []
else:
for match in matches:
results.append(match[return_content_of])
return results
def legit_year(text):
year = None
if text and text.isdigit():
if int(text) >= 1 and int(text) <= 2020:
year = int(text)
return year
def legit_year_range(text):
year_range = None
if "-" in text and len(text.split("-")) == 2:
part_one = text.split("-")[0]
part_two = text.split("-")[1]
if parse_year(part_one) and parse_year(part_two):
if (len(part_one) == len(part_two) and
int(part_two) > int(part_one)):
year_range = (int(part_one), int(part_two))
elif len(part_one) == 4 and len(part_two) == 2:
full_length_part_two = part_one[:2] + part_two
if int(full_length_part_two) > int(part_one):
year_range = (int(part_one), int(full_length_part_two))
return year_range
def parse_year(text):
year = None
if legit_year(text):
year = legit_year(text)
elif ("-") in text:
year = legit_year_range(text)
return year
def get_longest_string(in_list):
"""
Get the longest string(s) in a list.
:param in_list: list of strings
:return: single string if there's only one with the max length,
or a list of strings if there are several.
"""
if len(in_list) == 0:
return None
max_length = max(len(x) for x in in_list)
matches = [x for x in in_list if len(x) == max_length]
if len(matches) == 1:
return matches[0]
else:
return matches
def get_longest_match(word, keywords):
"""
Given a list of keywords, get longest keyword that overlaps with input.
A naive attempt to match words in languages that use
compound nouns written together. Given a string and a list of
keywords, return the longest of these keywords that's
contained in the input string. That way, if the keyword list
contains both a simple word ("bro") and its compound ("järnvägsbro"),
we only get the more specific one:
* "götaälvsbron" -> "bro"
* "en stor järnvägsbro" -> "järnvägsbro"
"""
matches = []
for k in keywords:
if k in word:
matches.append(k)
return get_longest_string(matches)
def remove_characters(text, string_of_chars_to_remove):
translator = str.maketrans(
{key: None for key in string_of_chars_to_remove})
return text.translate(translator)
def comma_to_period(text):
return text.replace(",", ".")
def remove_marks_from_ends(text):
return text.lstrip(string.punctuation).rstrip(string.punctuation)
def string_to_float(text):
text_clean = remove_marks_from_ends(text)
text_clean = comma_to_period(text_clean)
return float(text_clean)
def parse_ship_dimensions(text):
dimensions_vocab = {
"längd": "length",
"bredd": "width",
"djup": "draft",
"brt": "grt"
}
dimensions_dict = {}
dimensions_list = text.split(" ")
for i, item in enumerate(dimensions_list):
if contains_digit(item):
try:
number_part = string_to_float(comma_to_period(item))
associated_word = remove_marks_from_ends(
dimensions_list[i - 1].lower())
word_part = dimensions_vocab[associated_word]
dimensions_dict[word_part] = number_part
except (ValueError, KeyError):
continue
return dimensions_dict
def is_vowel(char):
vowels = "auoiyéeöåäáæø"
if char.lower() in vowels:
return True
else:
return False
def get_last_char(text):
return text[-1]
def last_char_is_vowel(text):
return is_vowel(get_last_char(text))
def first_char_is_number(text):
"""Check if string starts with a number."""
return text[0].isdigit()
def socken_to_q(socken, landskap):
if last_char_is_vowel(socken) or get_last_char(socken) == "s":
socken_name = socken + " socken"
else:
socken_name = socken + "s socken"
socken_and_landskap = socken_name + ", " + landskap
if wp_page_exists("sv", socken_and_landskap):
return q_from_wikipedia("sv", socken_and_landskap)
elif wp_page_exists("sv", socken_name):
return q_from_wikipedia("sv", socken_name)
def get_http_code(url):
r = requests.get(url)
return r.status_code
def get_bbr_link(text):
"""
raa/bbr/21300000003265
"""
base_url = "http://kulturarvsdata.se/raa/"
url_bbr = base_url + "bbr/" + text
url_bbra = base_url + "bbra/" + text
if get_http_code(url_bbra) == 200:
return "raa/bbra/" + text
elif get_http_code(url_bbr) == 200:
return "raa/bbr/" + text
def get_rid_of_brackets(text):
if "(" in text:
return re.sub('\(.*?\)', '', text).strip()
else:
return text
def get_text_inside_brackets(text):
"""
Get the content of the first encountered occurence of round brackets.
Handles nested brackets by getting the content of
the first level:
foo (cat) → cat
text (foo (bar (cat))) around → foo (bar (cat))
Does not handle multiple instances of brackets on the same level.
text (foo) text (bar) → ValueError
Does not handle mismatched brackets.
foo (bar → ValueError
"""
if "(" in text:
first_bracket_index = text.find('(')
last_bracket_index = text[first_bracket_index:].rfind(')')
if last_bracket_index < 0 or text[:first_bracket_index].rfind(')') > 0:
raise ValueError("Unmatched brackets encountered.")
last_bracket_index += first_bracket_index
result = text[first_bracket_index + 1:last_bracket_index]
if (result.find(')') > 0 and
(result.find('(') > result.find(')') or
result.find('(') < 0)):
raise ValueError("Unmatched brackets encountered.")
return result
else:
return text
def get_number_from_string(text):
try:
result = int(''.join(part for part in text if part.isdigit()))
except ValueError:
result = None
return result
def string_is_q_item(text):
pattern = re.compile("^Q[0-9]+$", re.I)
try:
m = pattern.match(text)
except TypeError:
return False
if m:
return True
else:
return False
def string_is_p_item(text):
pattern = re.compile("^P[0-9]+$", re.I)
try:
m = pattern.match(text)
except TypeError:
return False
if m:
return True
else:
return False
def tuple_is_coords(sometuple):
result = False
if isinstance(sometuple, tuple) and len(sometuple) == 2:
if all(isinstance(x, float) for x in sometuple):
result = True
return result
def file_is_on_commons(text):
text = text.replace(" ", "_")
site = create_site_instance("commons", "commons")
page = pywikibot.Page(site, "File:" + text)
return page.exists()
def commonscat_exists(text):
text = text.replace(" ", "_")
site = create_site_instance("commons", "commons")
page = pywikibot.Page(site, "Category:" + text)
return page.exists()
def wp_page_exists(language, title):
site = create_site_instance(language, "wikipedia")
page = pywikibot.Page(site, title)
if page.exists():
return True
else:
return False
def is_valid_url(url):
import re
pattern = re.compile(
r'^https?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
try:
m = pattern.match(url)
except TypeError:
return False
if m:
return True
else:
return False
def datetime_object_to_dict(datetime_object):
date_dict = {}
date_dict["year"] = datetime_object.year
date_dict["day"] = datetime_object.day
date_dict["month"] = datetime_object.month
return date_dict
def datetime_to_dict(date_obj, dateformat):
"""Convert a datetime object to a dict."""
date_dict = {}
date_dict["year"] = date_obj.year
if "%m" in dateformat:
date_dict["month"] = date_obj.month
if "%d" in dateformat:
date_dict["day"] = date_obj.day
return date_dict
def date_to_dict(datestring, dateformat):
"""Convert a datet string to a dict."""
date_obj = datetime.datetime.strptime(datestring, dateformat)
return datetime_to_dict(date_obj, dateformat)
def today_dict():
"""Get today's date as pywikibot-ready dictionary."""
return datetime_object_to_dict(datetime.date.today())
def dict_to_iso_date(date_dict):
"""
Convert pywikiboty-style date dictionary
to ISO string ("2002-10-23").
@param date_dict: dictionary like
{"year" : 2002, "month" : 10, "day" : 23}
"""
iso_date = ""
if "year" in date_dict:
iso_date += str(date_dict["year"])
if "month" in date_dict:
iso_date += "-" + str(date_dict["month"])
if "day" in date_dict:
iso_date += "-" + str(date_dict["day"])
return iso_date
def append_line_to_file(text, filename):
with open(filename, 'a', encoding="utf-8") as f:
f.write(text + "\n")
def dump_list_to_file(some_list, filename):
f = open(filename, 'w', encoding="utf-8")
for item in some_list:
f.write(item + "\n")
def wd_template(template_type, value):
return "{{" + template_type + "|" + value + "}}"
def get_current_timestamp():
return datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
def get_random_list_sample(some_list, amount):
return random.sample(some_list, amount)
def get_value_of_property(q_number, property_id, site):
results = []
item = pywikibot.ItemPage(site, q_number)
if item.exists() and item.claims.get(property_id):
for claim in item.claims.get(property_id):
target = claim.getTarget()
if isinstance(target, pywikibot.ItemPage):
target = target.getID()
results.append(target)
return results
def get_P31(q_number, site):
return get_value_of_property(q_number, "P31", site)
def is_whitelisted_P31(q_number, site, allowed_values):
result = False
all_P31s = get_P31(q_number, site)
if all_P31s:
for P31 in all_P31s:
if P31 in allowed_values:
result = True
else:
# A great many items do not have any P31 at all,
# in which case assume it's correct.
# Otherwise there'd be too many false negatives.
result = True
return result
def is_blacklisted_P31(q_number, site, dissalowed_values):
# Also blacklist any items which contains a P279 (sub-class) statement
# as these by definition cannot be unique instances
if len(get_value_of_property(q_number, "P279", site)) > 0:
return True
item_P31 = get_P31(q_number, site)
if len(set(dissalowed_values).intersection(set(item_P31))) > 0:
# this means one of this item's P31's is in the
# disallowed list
return True
return False
def is_right_country(q_number, site, expected_country):
"""Ensure that the target item has the expected country."""
item_P17 = get_value_of_property(q_number, "P17", site)
if not item_P17 or expected_country in item_P17:
return True
return False
def create_wlm_url(country, language, id):
url_base = ("https://tools.wmflabs.org/heritage/api/api.php?"
"action=search&format=json&srcountry={}&srlang={}&srid={}")
return url_base.format(
country, language, quote(id))
def create_site_instance(language, family):
"""Create an instance of a Wiki site (convenience function)."""
site_key = (language, family)
site = site_cache.get(site_key)
if not site:
site = pywikibot.Site(language, family)
site_cache[site_key] = site
return site
def package_quantity(value, unit=None):
"""Package a quantity value in a standardised form."""
quantity = {"quantity_value": value}
if unit:
quantity["unit"] = unit
return quantity
def package_time(date_dict):
"""Package a time/date statement in a standardised form."""
return {"time_value": date_dict}
def package_monolingual(text, lang):
"""Package a monolingual statement in a standardised form."""
return {"monolingual_value": text, "lang": lang}
def get_data_from_csv_file(filename):
"""Load data from csv file into a list."""
with open(filename, "r") as f_obj:
reader = csv.DictReader(f_obj, delimiter=',')
csv_data = list(reader)
return csv_data
| StarcoderdataPython |
3513676 | <reponame>JonathanGailliez/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComputeProfile(Model):
"""Describes the compute profile.
:param roles: The list of roles in the cluster.
:type roles: list[~azure.mgmt.hdinsight.models.Role]
"""
_attribute_map = {
'roles': {'key': 'roles', 'type': '[Role]'},
}
def __init__(self, **kwargs):
super(ComputeProfile, self).__init__(**kwargs)
self.roles = kwargs.get('roles', None)
| StarcoderdataPython |
12831692 | <gh_stars>1-10
from collections import namedtuple
import json
from os import listdir, makedirs
from os.path import join as p, isdir, isfile
import transaction
from unittest.mock import patch
from tempfile import TemporaryDirectory
import pytest
import rdflib
from rdflib.term import URIRef
from owmeta_core.bundle import (Installer, Descriptor, make_include_func, FilesDescriptor,
UncoveredImports, DependencyDescriptor, TargetIsNotEmpty,
Remote, Bundle, BUNDLE_MANIFEST_FILE_NAME)
from owmeta_core.context import IMPORTS_CONTEXT_KEY, CLASS_REGISTRY_CONTEXT_KEY
from owmeta_core.context_common import CONTEXT_IMPORTS
Dirs = namedtuple('Dirs', ('source_directory', 'bundles_directory'))
@pytest.fixture
def dirs():
with TemporaryDirectory() as source_directory,\
TemporaryDirectory() as bundles_directory:
yield Dirs(source_directory, bundles_directory)
def test_bundle_install_directory(dirs):
d = Descriptor('test')
bi = Installer(*dirs, graph=rdflib.ConjunctiveGraph())
bi.install(d)
assert isdir(p(dirs.bundles_directory, 'test', '1'))
def test_context_hash_file_exists(dirs):
d = Descriptor('test')
ctxid = 'http://example.org/ctx1'
d.includes.add(make_include_func(ctxid))
g = rdflib.ConjunctiveGraph()
cg = g.get_context(ctxid)
cg.add((aURI('a'), aURI('b'), aURI('c')))
bi = Installer(*dirs, graph=g)
bi.install(d)
assert isfile(p(dirs.bundles_directory, 'test', '1', 'graphs', 'hashes'))
def test_context_index_file_exists(dirs):
d = Descriptor('test')
ctxid = 'http://example.org/ctx1'
d.includes.add(make_include_func(ctxid))
g = rdflib.ConjunctiveGraph()
cg = g.get_context(ctxid)
cg.add((aURI('a'), aURI('b'), aURI('c')))
bi = Installer(*dirs, graph=g)
bi.install(d)
assert isfile(p(dirs.bundles_directory, 'test', '1', 'graphs', 'index'))
def test_context_hash_file_contains_ctxid(dirs):
d = Descriptor('test')
ctxid = 'http://example.org/ctx1'
d.includes.add(make_include_func(ctxid))
g = rdflib.ConjunctiveGraph()
cg = g.get_context(ctxid)
with transaction.manager:
cg.add((aURI('a'), aURI('b'), aURI('c')))
bi = Installer(*dirs, graph=g)
bi.install(d)
with open(p(dirs.bundles_directory, 'test', '1', 'graphs', 'hashes'), 'rb') as f:
assert f.read().startswith(ctxid.encode('UTF-8'))
def test_context_index_file_contains_ctxid(dirs):
d = Descriptor('test')
ctxid = 'http://example.org/ctx1'
d.includes.add(make_include_func(ctxid))
g = rdflib.ConjunctiveGraph()
cg = g.get_context(ctxid)
with transaction.manager:
cg.add((aURI('a'), aURI('b'), aURI('c')))
bi = Installer(*dirs, graph=g)
bi.install(d)
with open(p(dirs.bundles_directory, 'test', '1', 'graphs', 'index'), 'rb') as f:
assert f.read().startswith(ctxid.encode('UTF-8'))
def test_multiple_context_hash(dirs):
d = Descriptor('test')
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(ctxid_2))
g = rdflib.ConjunctiveGraph()
cg = g.get_context(ctxid_1)
with transaction.manager:
cg.add((aURI('a'), aURI('b'), aURI('c')))
cg = g.get_context(ctxid_2)
with transaction.manager:
cg.add((aURI('a'), aURI('b'), aURI('c')))
bi = Installer(*dirs, graph=g)
bi.install(d)
with open(p(dirs.bundles_directory, 'test', '1', 'graphs', 'hashes'), 'rb') as f:
contents = f.read()
assert ctxid_1.encode('UTF-8') in contents
assert ctxid_2.encode('UTF-8') in contents
def test_no_dupe(dirs):
'''
Test that if we have two contexts with the same contents that we don't create more
than one file for it.
The index will point to the same file for the two contexts
'''
d = Descriptor('test')
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(ctxid_2))
g = rdflib.ConjunctiveGraph()
cg = g.get_context(ctxid_1)
with transaction.manager:
cg.add((aURI('a'), aURI('b'), aURI('c')))
cg = g.get_context(ctxid_2)
with transaction.manager:
cg.add((aURI('a'), aURI('b'), aURI('c')))
bi = Installer(*dirs, graph=g)
bi.install(d)
graph_files = [x for x in listdir(p(dirs.bundles_directory, 'test', '1', 'graphs')) if x.endswith('.nt')]
assert len(graph_files) == 1
def test_file_copy(dirs):
d = Descriptor('test')
open(p(dirs[0], 'somefile'), 'w').close()
d.files = FilesDescriptor()
d.files.includes.add('somefile')
g = rdflib.ConjunctiveGraph()
bi = Installer(*dirs, graph=g)
bi.install(d)
bfiles = p(dirs.bundles_directory, 'test', '1', 'files')
assert set(listdir(bfiles)) == set(['hashes', 'somefile'])
def test_file_pattern_copy(dirs):
d = Descriptor('test')
open(p(dirs[0], 'somefile'), 'w').close()
d.files = FilesDescriptor()
d.files.patterns.add('some*')
g = rdflib.ConjunctiveGraph()
bi = Installer(*dirs, graph=g)
bi.install(d)
bfiles = p(dirs.bundles_directory, 'test', '1', 'files')
assert set(listdir(bfiles)) == set(['hashes', 'somefile'])
def test_file_hash(dirs):
d = Descriptor('test')
open(p(dirs[0], 'somefile'), 'w').close()
d.files = FilesDescriptor()
d.files.includes.add('somefile')
g = rdflib.ConjunctiveGraph()
bi = Installer(*dirs, graph=g)
bi.install(d)
assert isfile(p(dirs.bundles_directory, 'test', '1', 'files', 'hashes'))
def test_file_hash_content(dirs):
d = Descriptor('test')
open(p(dirs[0], 'somefile'), 'w').close()
d.files = FilesDescriptor()
d.files.includes.add('somefile')
g = rdflib.ConjunctiveGraph()
bi = Installer(*dirs, graph=g)
bi.install(d)
with open(p(dirs.bundles_directory, 'test', '1', 'files', 'hashes'), 'rb') as f:
contents = f.read()
assert b'somefile' in contents
def test_uncovered_imports(dirs):
'''
If we have imports and no dependencies, then thrown an exception if we have not
included them in the bundle
'''
imports_ctxid = 'http://example.org/imports'
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_imp = g.get_context(imports_ctxid)
with transaction.manager:
cg_1.add((aURI('a'), aURI('b'), aURI('c')))
cg_2.add((aURI('d'), aURI('e'), aURI('f')))
cg_imp.add((URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)))
bi = Installer(*dirs, imports_ctx=imports_ctxid, graph=g)
with pytest.raises(UncoveredImports):
bi.install(d)
def test_imports_are_included(dirs):
'''
If we have imports and no dependencies, then thrown an exception if we have not
included them in the bundle
'''
imports_ctxid = 'http://example.org/imports'
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(ctxid_2))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_imp = g.get_context(imports_ctxid)
with transaction.manager:
cg_1.add((aURI('a'), aURI('b'), aURI('c')))
cg_2.add((aURI('d'), aURI('e'), aURI('f')))
cg_imp.add((URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)))
bi = Installer(*dirs, imports_ctx=imports_ctxid, graph=g)
bi.install(d)
with Bundle(d.id, dirs.bundles_directory) as bnd:
g = bnd.rdf.get_context(bnd.conf[IMPORTS_CONTEXT_KEY])
assert (URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)) in g
def test_unrelated_imports_excluded(dirs):
imports_ctxid = 'http://example.org/imports'
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
ctxid_3 = 'http://example.org/ctx3'
ctxid_4 = 'http://example.org/ctx4'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(ctxid_2))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_3 = g.get_context(ctxid_3)
cg_4 = g.get_context(ctxid_4)
cg_imp = g.get_context(imports_ctxid)
with transaction.manager:
cg_1.add((aURI('a'), aURI('b'), aURI('c')))
cg_2.add((aURI('d'), aURI('e'), aURI('f')))
cg_3.add((aURI('g'), aURI('h'), aURI('i')))
cg_4.add((aURI('j'), aURI('k'), aURI('l')))
cg_imp.add((URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)))
cg_imp.add((URIRef(ctxid_3), CONTEXT_IMPORTS, URIRef(ctxid_4)))
bi = Installer(*dirs, imports_ctx=imports_ctxid, graph=g)
bi.install(d)
with Bundle(d.id, dirs.bundles_directory) as bnd:
g = bnd.rdf.get_context(bnd.conf[IMPORTS_CONTEXT_KEY])
assert (URIRef(ctxid_3), CONTEXT_IMPORTS, URIRef(ctxid_4)) not in g
def test_imports_in_dependencies(dirs):
'''
If we have imports and a dependency includes the context, then we shouldn't have an
error.
Versioned bundles are assumed to be immutable, so we won't re-fetch a bundle already
in the local index
'''
imports_ctxid = 'http://example.org/imports'
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(imports_ctxid))
d.dependencies.add(DependencyDescriptor('dep'))
dep_d = Descriptor('dep')
dep_d.includes.add(make_include_func(ctxid_2))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_imp = g.get_context(imports_ctxid)
with transaction.manager:
cg_1.add((aURI('a'), aURI('b'), aURI('c')))
cg_2.add((aURI('d'), aURI('e'), aURI('f')))
cg_imp.add((URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)))
bi = Installer(*dirs, imports_ctx=imports_ctxid, graph=g)
bi.install(dep_d)
bi.install(d)
def test_imports_in_unfetched_dependencies(dirs):
'''
If we have imports and a dependency includes the context, then we shouldn't have an
error.
Versioned bundles are assumed to be immutable, so we won't re-fetch a bundle already
in the local index
'''
imports_ctxid = 'http://example.org/imports'
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(imports_ctxid))
d.dependencies.add(DependencyDescriptor('dep'))
dep_d = Descriptor('dep')
dep_d.includes.add(make_include_func(ctxid_2))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_imp = g.get_context(imports_ctxid)
cg_1.add((URIRef('http://example.com/a'), URIRef('http://example.com/b'), URIRef('http://example.com/c')))
cg_2.add((URIRef('http://example.com/d'), URIRef('http://example.com/e'), URIRef('http://example.com/f')))
cg_imp.add((URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)))
class loader_class(object):
def __init__(self, *args):
self.bi = None
def can_load(self, *args):
return True
def can_load_from(self, *args):
return True
def bundle_versions(self, *args):
return [1]
def __call__(self, *args):
self.bi.install(dep_d)
loader = loader_class()
class remote_class(Remote):
def generate_loaders(self, *args):
yield loader
bi = Installer(*dirs, imports_ctx=imports_ctxid, graph=g,
remotes=[remote_class('remote')])
loader.bi = bi
with patch('owmeta_core.bundle.LOADER_CLASSES', (loader_class,)):
bi.install(d)
def test_imports_in_transitive_dependency_not_included(dirs):
'''
If we have imports and a transitive dependency includes the context, then we should
still have an error.
Versioned bundles are assumed to be immutable, so we won't re-fetch a bundle already
in the local index
'''
imports_ctxid = 'http://example.org/imports'
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
d.includes.add(make_include_func(imports_ctxid))
d.dependencies.add(DependencyDescriptor('dep'))
dep_d = Descriptor('dep')
dep_d.dependencies.add(DependencyDescriptor('dep_dep'))
dep_dep_d = Descriptor('dep_dep')
dep_dep_d.includes.add(make_include_func(ctxid_2))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_imp = g.get_context(imports_ctxid)
cg_1.add((aURI('a'), aURI('b'), aURI('c')))
cg_2.add((aURI('d'), aURI('e'), aURI('f')))
cg_imp.add((URIRef(ctxid_1), CONTEXT_IMPORTS, URIRef(ctxid_2)))
bi = Installer(*dirs, imports_ctx=imports_ctxid, graph=g)
bi.install(dep_dep_d)
bi.install(dep_d)
with pytest.raises(UncoveredImports):
bi.install(d)
def test_class_registry_in_manifest(dirs):
'''
If a class registry context is specified, then include it
'''
cr_ctxid = 'http://example.org/class_registry'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
bi = Installer(*dirs, class_registry_ctx=cr_ctxid, graph=g)
bdir = bi.install(d)
with open(p(bdir, BUNDLE_MANIFEST_FILE_NAME)) as mf:
manifest_data = json.load(mf)
assert manifest_data[CLASS_REGISTRY_CONTEXT_KEY]
def test_class_registry_contents(dirs):
'''
If a class registry context is specified, then include it
'''
cr_ctxid = 'http://example.org/class_registry'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_cr = g.get_context(cr_ctxid)
with transaction.manager:
cg_cr.add((aURI('blah'), aURI('bruh'), aURI('uhhhh')))
bi = Installer(*dirs, class_registry_ctx=cr_ctxid, graph=g)
bi.install(d)
with Bundle(d.id, dirs.bundles_directory) as bnd:
g = bnd.rdf.get_context(bnd.conf[CLASS_REGISTRY_CONTEXT_KEY])
assert (aURI('blah'), aURI('bruh'), aURI('uhhhh')) in g
def test_fail_on_non_empty_target(dirs):
d = Descriptor('test')
g = rdflib.ConjunctiveGraph()
bi = Installer(*dirs, graph=g)
bundles_directory = dirs[1]
sma = p(bundles_directory, 'test', '1', 'blah')
makedirs(sma)
with pytest.raises(TargetIsNotEmpty):
bi.install(d)
def test_dependency_version_in_manifest_without_spec(dirs):
'''
It is permitted to not specify the version of a bundle dependency in the descriptor,
but we must pin a specific version of the bundle in the manifest.
'''
ctxid_1 = 'http://example.org/ctx1'
ctxid_2 = 'http://example.org/ctx2'
# Make a descriptor that includes ctx1 and the imports, but not ctx2
d = Descriptor('test')
d.includes.add(make_include_func(ctxid_1))
d.dependencies.add(DependencyDescriptor('dep'))
dep_d = Descriptor('dep')
dep_d.includes.add(make_include_func(ctxid_2))
# Add some triples so the contexts aren't empty -- we can't save an empty context
g = rdflib.ConjunctiveGraph()
cg_1 = g.get_context(ctxid_1)
cg_2 = g.get_context(ctxid_2)
cg_1.add((aURI('a'), aURI('b'), aURI('c')))
cg_2.add((aURI('d'), aURI('e'), aURI('f')))
bi = Installer(*dirs, graph=g)
bi.install(dep_d)
bi.install(d)
test_bnd = Bundle('test', bundles_directory=dirs.bundles_directory)
assert test_bnd.manifest_data['dependencies'][0]['version'] == 1
def aURI(c):
return URIRef(f'http://example.org/uri#{c}')
| StarcoderdataPython |
9697042 | <filename>nistscraper/utils.py
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 10:50:01 2020
@author: 21EthanD
"""
import numpy as np
import matplotlib.pyplot as plt
def ideal_gas_law(T, Vm):
"""
Calculates the pressure in atm of a gas given the temperature in K and the molar volume in L/mol using the ideal gas law
Parameters
----------
T: Temperature in Kelvin (a constant - double or int)
Vm: Molar Volume (a constant - double or int)
Returns
-------
double
pressure of an ideal gas in atm according to the ideal gas law (given the temperature and the molar volume)
Examples
--------
T = 298.15 K
v = 2.0 L/mol
Input: ideal_gas_law(298.15, 2.0)
Output: 12.231603749999998
"""
R = 0.08205 #L*atm/K/mol
return R * T / Vm
def cTOall(value, key):
if (key == "temp"):
return [value, value * 1.8 + 32, value + 273.15, (value + 273.15) * 1.8]
elif (key == "pres"):
return [value, value * 101.325, value * 1.01325, value * 14.6959]
def arrTemp(initial, temp):
if (initial == "r"):
temp = (temp - 491.67) / 1.8
elif (initial == "k"):
temp -= 273.15
elif (initial == "f"):
temp = (temp - 32) / 1.8
return cTOall(temp, "temp")
def convTemp(initial, temp, convert):
"""
Converts a temperature in given units to another temperature in specified units.
Compatible units are Rankine, Kelvin, Fahrenheit, Celsius.
Parameters
----------
initial: numerical value of temperature to be converted (double or int)
temp: units of initial temperature (specific string - "r" for Rankine, "k" for Kelvin, "f" for Fahrenheit, "c" for Celsius)
convert: desired units of temperature to be converted (specific string - "r" for Rankine, "k" for Kelvin, "f" for Fahrenheit, "c" for Celsius)
Returns
-------
double
converted value of temperature in given desired units of temperature (Rankine, Kelvin, Fahrenheit, or Celsius)
Examples
--------
initial = 0.0
temp = "c"
convert = "k"
Input: convTemp(initial, temp, convert)
Output: 273.15
"""
vec = arrTemp(initial, temp)
if (convert == "r"):
return vec[3]
elif (convert == "k"):
return vec[2]
elif (convert == "f"):
return vec[1]
return vec[0]
def arrPres(initial, pres):
if (initial == "psi"):
pres /= 14.6959
elif (initial == "kPa"):
pres /= 101.325
elif (initial == "bar"):
pres /= 1.01325
return cTOall(pres, "pres")
def convPres(initial, pres, convert):
"""
Converts a pressure in given units to another pressure in specified units.
Compatible units are atmospheres, pounds per square inch, kilopascals, bar.
Parameters
----------
initial: numerical value of pressure to be converted (double or int)
pres: units of initial pressure (specific string - "atm" for atmospheres, "psi" for pounds per square inch, "kPa" for kilopascals, "bar" for bar)
convert: desired units of temperature to be converted (specific string - "atm" for atmospheres, "psi" for pounds per square inch, "kPa" for kilopascals, "bar" for bar)
Returns
-------
double
converted value of pressure in given desired units of pressure (atmospheres, pounds per square inch, kilopascals, bar)
Examples
--------
initial = 1.0
pres = "atm"
convert = "kPa"
Input: convPres(initial, temp, convert)
Output: 101.325
"""
vec = arrPres(initial, pres)
if (convert == "psi"):
return vec[3]
elif (convert == "bar"):
return vec[2]
elif (convert == "kPa"):
return vec[1]
return vec[0]
print(convVol("L/mol", 1, "m^3/kmol"))
def convVol(initial, vol, convert):
"""
Converts a volume in given units to another volume in specified units.
Compatible units are L/mol, m^3/kmol.
Parameters
----------
initial: numerical value of volume to be converted (double or int)
pres: units of initial volume (specific string - "L/mol" for liters per mol, "m^3/kmol" for meters cubed per kilomol)
convert: desired units of volume to be converted (specific string - "L/mol" for liters per mol, "m^3/kmol" for meters cubed per kilomol)
Returns
-------
double
converted value of volume in given desired units of volume (specific string - "L/mol" for liters per mol, "m^3/kmol" for meters cubed per kilomol)
Examples
--------
initial = "L/mol"
vol = 3.0
convert = "m^3/kmol"
Input: convVol(initial, vol, convert)
Output: 0.003
"""
if (initial == "L/mol"):
return vol * 0.001
return vol * 1000
| StarcoderdataPython |
236534 | <reponame>seongsujeong/GEOSAK
#!/usr/bin/env python3
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import grwt
import sys
#To plot the real numbered array
def plot_real(raster_in,range=None,cmap='viridis'):
if type(raster_in)==grwt.raster:
arr_to_plot=raster_in.z
if raster_in.boundary==None:
vec_extent=None
else:
vec_boundary=raster_in.boundary #xmin, ymin, xmax, ymax
vec_extent=[vec_boundary[0], vec_boundary[2], vec_boundary[3], vec_boundary[1]]
elif type(raster_in)==np.ndarray:
arr_to_plot=raster_in
vec_extent=None
#determine the range
if range==None:
vec_clim=[np.nanpercentile(arr_to_plot,3),np.nanpercentile(arr_to_plot,97)]
else:
vec_clim=range
if vec_extent==None:
fig=plt.imshow(arr_to_plot,clim=vec_clim,cmap=cmap)
else: #provide vec_extent
fig=plt.imshow(arr_to_plot,clim=vec_clim,cmap=cmap,extent=vec_extent)
plt.show()
#To plot the complex numbered data e.g. interferogram
def plot_complex(raster_in,range=None,cmap='hsv'):
if type(raster_in)==grwt.raster:
arr_to_plot=raster_in.z
if raster_in.boundary==None:
vec_extent=None
else:
vec_boundary=raster_in.boundary #xmin, ymin, xmax, ymax
vec_extent=[vec_boundary[0], vec_boundary[2], vec_boundary[3], vec_boundary[1]]
elif type(raster_in)==np.ndarray:
arr_to_plot=raster_in
vec_extent=None
nx_arr=arr_to_plot.shape[1]
ny_arr=arr_to_plot.shape[0]
arr_hsv=np.zeros((ny_arr,nx_arr,3))
if range==None:
magmin=np.nanpercentile(arr_to_plot,3)
magmax=np.nanpercentile(arr_to_plot,97)
else:
magmin=range[0]
magmax=range[1]
magn=(arr_to_plot-magmin)/(magmax-magmin)
magn[magn>1.0]=1.0
magn[magn<0.0]=0.0
arr_hsv[:,:,0]=(np.arctan2(arr_to_plot.real,arr_to_plot.imag)+np.pi)/(2*np.pi)
arr_hsv[:,:,1]=magn
arr_hsv[:,:,2]=magn
arr_rgb=matplotlib.colors.hsv_to_rgb(arr_hsv)
if vec_extent==None:
fig=plt.imshow(arr_rgb)
else:
fig=plt.imshow(arr_rgb,extent=vec_extent)
plt.show()
if __name__=='__main__':
str_usage='''
'''
| StarcoderdataPython |
9706214 | <reponame>cfginn/sap-simulation-package<filename>utils/boilerplate/test_hippo.py
import unittest
from pysapets.hippo import Hippo
from pysapets.animal import Animal
import pysapets.constants as constants
from unittest.mock import patch
from io import StringIO
from copy import deepcopy
class HippoTest(unittest.TestCase):
def setUp(self):
self.hippo = Hippo()
self.friends = [self.hippo, Animal(2, 2), Animal(2, 2), Animal(2, 2), Animal(2, 2)]
# test that get_type returns the correct type
def test_get_type(self):
self.assertEqual(self.hippo.get_type(), constants.HIPPO)
# test that hippo starts with base health of 7
def test_get_health(self):
self.assertEqual(self.hippo.get_health(), 7)
# test that hippo starts with base attack of 4
def test_get_attack(self):
self.assertEqual(self.hippo.get_attack(), 4)
# test that initializing hippo with additional health increases health
def test_init_add_health(self):
newHippo = Hippo(addHealth = 3)
self.assertEqual(newHippo.get_health(), 7 + 3)
# test that initializing an hippo with additional attack increases attack
def test_init_add_attack(self):
newHippo = Hippo(addAttack = 3)
self.assertEqual(newHippo.get_attack(), 4 + 3)
# test that initializing hippo with additional health and attack increases health and attack
def test_init_add_health_attack(self):
newHippo = Hippo(addHealth = 3, addAttack = 3)
self.assertEqual(newHippo.get_health(), 7 + 3)
self.assertEqual(newHippo.get_attack(), 4 + 3)
# test that hippo ability has correct trigger
def test_get_ability_trigger(self):
self.assertEqual(self.hippo.get_ability_trigger(), constants.KNOCK_OUT)
# test that hippo ability has correct triggeredBy
def test_get_ability_triggeredBy(self):
self.assertEqual(self.hippo.get_ability_triggeredBy(), constants.SELF)
# TODO add relevant tests for hippo ability
def test_run_ability(self):
pass
def test_run_ability_level_1(self):
pass
def test_run_ability_level_2(self):
pass
def test_run_ability_level_3(self):
pass
| StarcoderdataPython |
9659518 | <reponame>hoangperry/pytorch-ts<gh_stars>0
from typing import List, Optional
import torch
import torch.nn as nn
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.model.predictor import Predictor
from gluonts.torch.model.predictor import PyTorchPredictor
from gluonts.torch.util import copy_parameters
from gluonts.transform import (
InstanceSplitter,
ValidationSplitSampler,
TestSplitSampler,
AddObservedValuesIndicator,
Transformation,
Chain,
RemoveFields,
ExpectedNumInstanceSampler,
)
from pts import Trainer
from pts.model import PyTorchEstimator
from pts.model.utils import get_module_forward_input_names
from .n_beats_network import (
NBEATSPredictionNetwork,
NBEATSTrainingNetwork,
VALID_N_BEATS_STACK_TYPES,
)
class NBEATSEstimator:
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
num_stacks: int = 30,
widths: Optional[List[int]] = None,
num_blocks: Optional[List[int]] = None,
num_block_layers: Optional[List[int]] = None,
expansion_coefficient_lengths: Optional[List[int]] = None,
sharing: Optional[List[bool]] = None,
stack_types: Optional[List[str]] = None,
loss_function: Optional[str] = "MAPE",
**kwargs,
) -> None:
super().__init__(trainer=trainer, **kwargs)
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else 2 * prediction_length
)
# num_stacks has to be handled separately because other arguments have to match its length
self.num_stacks = num_stacks
self.loss_function = loss_function
self.widths = self._validate_nbeats_argument(
argument_value=widths,
argument_name="widths",
default_value=[512],
validation_condition=lambda val: val > 0,
invalidation_message="Values of 'widths' should be > 0",
)
self.num_blocks = self._validate_nbeats_argument(
argument_value=num_blocks,
argument_name="num_blocks",
default_value=[1],
validation_condition=lambda val: val > 0,
invalidation_message="Values of 'num_blocks' should be > 0",
)
self.num_block_layers = self._validate_nbeats_argument(
argument_value=num_block_layers,
argument_name="num_block_layers",
default_value=[4],
validation_condition=lambda val: val > 0,
invalidation_message="Values of 'block_layers' should be > 0",
)
self.sharing = self._validate_nbeats_argument(
argument_value=sharing,
argument_name="sharing",
default_value=[False],
validation_condition=lambda val: True,
invalidation_message="",
)
self.expansion_coefficient_lengths = self._validate_nbeats_argument(
argument_value=expansion_coefficient_lengths,
argument_name="expansion_coefficient_lengths",
default_value=[32],
validation_condition=lambda val: val > 0,
invalidation_message="Values of 'expansion_coefficient_lengths' should be > 0",
)
self.stack_types = self._validate_nbeats_argument(
argument_value=stack_types,
argument_name="stack_types",
default_value=["G"],
validation_condition=lambda val: val in VALID_N_BEATS_STACK_TYPES,
invalidation_message=f"Values of 'stack_types' should be one of {VALID_N_BEATS_STACK_TYPES}",
)
self.train_sampler = ExpectedNumInstanceSampler(
num_instances=1.0, min_future=prediction_length
)
self.validation_sampler = ValidationSplitSampler(min_future=prediction_length)
def _validate_nbeats_argument(
self,
argument_value,
argument_name,
default_value,
validation_condition,
invalidation_message,
):
# set default value if applicable
new_value = argument_value if argument_value is not None else default_value
# check whether dimension of argument matches num_stack dimension
assert len(new_value) == 1 or len(new_value) == self.num_stacks, (
f"Invalid lengths of argument {argument_name}: {len(new_value)}. Argument must have "
f"length 1 or {self.num_stacks} "
)
# check validity of actual values
assert all(
[validation_condition(val) for val in new_value]
), invalidation_message
# make length of arguments consistent
if len(new_value) == 1:
return new_value * self.num_stacks
else:
return new_value
# Here we do only a simple operation to convert the input data to a form
# that can be digested by our model by only splitting the target in two, a
# conditioning part and a to-predict part, for each training example.
def create_transformation(self) -> Transformation:
return Chain(
[
RemoveFields(
field_names=[
FieldName.FEAT_STATIC_REAL,
FieldName.FEAT_DYNAMIC_REAL,
FieldName.FEAT_DYNAMIC_CAT,
]
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
]
)
def create_instance_splitter(self, mode: str):
assert mode in ["training", "validation", "test"]
instance_sampler = {
"training": self.train_sampler,
"validation": self.validation_sampler,
"test": TestSplitSampler(),
}[mode]
return InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
instance_sampler=instance_sampler,
past_length=self.context_length,
future_length=self.prediction_length,
time_series_fields=[FieldName.OBSERVED_VALUES],
)
def create_training_network(self, device: torch.device) -> NBEATSTrainingNetwork:
return NBEATSTrainingNetwork(
prediction_length=self.prediction_length,
context_length=self.context_length,
num_stacks=self.num_stacks,
widths=self.widths,
num_blocks=self.num_blocks,
num_block_layers=self.num_block_layers,
expansion_coefficient_lengths=self.expansion_coefficient_lengths,
sharing=self.sharing,
stack_types=self.stack_types,
loss_function=self.loss_function,
freq=self.freq,
).to(device)
def create_predictor(
self,
transformation: Transformation,
trained_network: nn.Module,
device: torch.device,
) -> Predictor:
prediction_network = NBEATSPredictionNetwork(
prediction_length=self.prediction_length,
context_length=self.context_length,
num_stacks=self.num_stacks,
widths=self.widths,
num_blocks=self.num_blocks,
num_block_layers=self.num_block_layers,
expansion_coefficient_lengths=self.expansion_coefficient_lengths,
sharing=self.sharing,
stack_types=self.stack_types,
).to(device)
copy_parameters(trained_network, prediction_network)
input_names = get_module_forward_input_names(prediction_network)
prediction_splitter = self.create_instance_splitter("test")
return PyTorchPredictor(
input_transform=transformation + prediction_splitter,
input_names=input_names,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
device=device,
)
| StarcoderdataPython |
4954513 | # Devices taken from
# https://github.com/mgp25/Instagram-API/blob/master/src/Devices/GoodDevices.php
DEFAULT_DEVICE = 'samsung_galaxy_s9_plus'
DEVICES = {
# Released on March 2016
'samsung_galaxy_s7': {
'instagram_version': '26.0.0.10.86',
'android_version': 24,
'android_release': '7.0',
'dpi': '640dpi',
'resolution': '1440x2560',
'manufacturer': 'samsung',
'device': 'SM-G930F',
'model': 'herolte',
'cpu': 'samsungexynos8890',
},
# Released on January 2017
'huawei_mate_9_pro': {
'instagram_version': '26.0.0.10.86',
'android_version': 24,
'android_release': '7.0',
'dpi': '640dpi',
'resolution': '1440x2560',
'manufacturer': 'HUAWEI',
'device': 'LON-L29',
'model': 'HWLON',
'cpu': 'hi3660'
},
# Released on February 2018
'samsung_galaxy_s9_plus': {
'instagram_version': '192.168.127.12.119',
'android_version': 24,
'android_release': '7.0',
'dpi': '640dpi',
'resolution': '1440x2560',
'manufacturer': 'samsung',
'device': 'SM-G965F',
'model': 'star2qltecs',
'cpu': 'samsungexynos9810'
},
# Released on November 2016
'one_plus_3t': {
'instagram_version': '26.0.0.10.86',
'android_version': 24,
'android_release': '7.0',
'dpi': '380dpi',
'resolution': '1080x1920',
'manufacturer': 'OnePlus',
'device': 'ONEPLUS A3010',
'model': 'OnePlus3T',
'cpu': 'qcom'
},
# Released on April 2016
'lg_g5': {
'instagram_version': '26.0.0.10.86',
'android_version': 23,
'android_release': '6.0.1',
'dpi': '640dpi',
'resolution': '1440x2392',
'manufacturer': 'LGE/lge',
'device': 'RS988',
'model': 'h1',
'cpu': 'h1'
},
# Released on June 2016
'zte_axon_7': {
'instagram_version': '26.0.0.10.86',
'android_version': 23,
'android_release': '6.0.1',
'dpi': '640dpi',
'resolution': '1440x2560',
'manufacturer': 'ZTE',
'device': 'ZTE A2017U',
'model': 'ailsa_ii',
'cpu': 'qcom'
},
# Released on March 2016
'samsung_galaxy_s7_edge': {
'instagram_version': '192.168.3.11.86',
'android_version': 23,
'android_release': '6.0.1',
'dpi': '640dpi',
'resolution': '1440x2560',
'manufacturer': 'samsung',
'device': 'SM-G935',
'model': 'hero2lte',
'cpu': 'samsungexynos8890'
},
}
| StarcoderdataPython |
6568091 | <reponame>e11it/hue-1
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
We would like to keep the last X characters
of log message around for us to view in case of emergency.
This log handler lets us do that.
"""
from builtins import object
import logging, collections
class FixedBuffer(object):
"""
The what: a buffer that maintains a fixed-size sliding window on
the log history. As messages come in, old messages get pushed out.
The plan: use a deque to keep a list of messages by reference (so
minimal copying required). If the total size in characters exceeds
some maximum, pop off messages until we get below the max, and then
pad back up with the last maxsize-size characters of the most recently
removed message to bring us back up to the maximum.
Net cost is eventually one string copy per insert and a linear amount of
reference manipulation. Benefit is the ability to save a slice through
the really big messages (although huge messages are rare) rather than
lose them completely when they get popped.
"""
def __init__(self, maxsize=50000):
"""
maxsize is in characters, not bytes.
"""
self.buffer = collections.deque()
self.maxsize = maxsize
self.size = 0
def insert(self, message):
self.size += len(message)
self.buffer.append(message)
if self.size > self.maxsize:
while self.size > self.maxsize:
last = self.buffer.popleft()
self.size -= len(last)
# Prepend only as many characters of the outgoing string
# as we can fit in the buffer
self.buffer.appendleft(last[-(self.maxsize-self.size):])
self.size = self.maxsize
def __str__(self):
return '\n'.join([m for m in self.buffer])
def __iter__(self):
return iter(self.buffer)
class FixedBufferHandler(logging.Handler):
"""
Super simple log handler.
"""
def __init__(self,buffer_size=50000):
logging.Handler.__init__(self)
self.buf = FixedBuffer(buffer_size)
def emit(self,record):
self.buf.insert(self.format(record))
| StarcoderdataPython |
1808231 | class RootException(Exception):
"""
Base class for all custom exceptions.
"""
| StarcoderdataPython |
1982691 | from __future__ import unicode_literals
TRANSACTION_APPROVED = '0'
SUMMARY_CODES = {
TRANSACTION_APPROVED: 'Transaction Approved',
'1': 'Transaction Declined',
'2': 'Transaction Erred',
'3': 'Transaction Rejected',
}
EFT_RESPONSE_CODES = {
'00': 'Approved or completed successfully',
'01': 'Refer to card issuer',
'03': 'Invalid merchant',
'04': 'Pick-up card',
'05': 'Do not honour',
'08': 'Honour with identification',
'12': 'Invalid transaction',
'13': 'Invalid amount',
'14': 'Invalid card number (no such number)',
'30': 'Format error',
'36': 'Restricted card',
'41': 'Lost card',
'42': 'No universal account',
'43': 'Stolen card, pick up',
'51': 'Not sufficient funds',
'54': 'Expired card',
'61': 'Exceeds withdrawal amount limits',
'62': 'Restricted card',
'65': 'Exceeds withdrawal frequency limit',
'91': 'Issuer or switch is inoperative',
'92': 'Financial institution or intermediate network facility cannot be found for routing',
'94': 'Duplicate transmission',
'Q1': 'Unknown Buyer',
'Q2': 'Transaction Pending',
'Q3': 'Payment Gateway Connection Error',
'Q4': 'Payment Gateway Unavailable',
'Q5': 'Invalid Transaction',
'Q6': 'Duplicate Transaction - requery to determine status',
'QA': 'Invalid parameters or Initialisation failed',
'QB': 'Order type not currently supported',
'QC': 'Invalid Order Type',
'QD': 'Invalid Payment Amount - Payment amount less than minimum/exceeds maximum allowed limit',
'QE': 'Internal Error',
'QF': 'Transaction Failed',
'QG': 'Unknown Customer Order Number',
'QH': 'Unknown Customer Username or Password',
'QI': 'Transaction incomplete - contact Westpac to confirm reconciliation',
'QJ': 'Invalid Client Certificate',
'QK': 'Unknown Customer Merchant',
'QL': 'Business Group not configured for customer',
'QM': 'Payment Instrument not configured for customer',
'QN': 'Configuration Error',
'QO': 'Missing Payment Instrument',
'QP': 'Missing Supplier Account',
'QQ': 'Invalid Credit Card \ Invalid Credit Card Verification Number',
'QR': 'Transaction Retry',
'QS': 'Transaction Successful',
'QT': 'Invalid currency',
'QU': 'Unknown Customer IP Address',
'QV': 'Invalid Original Order Number specified for Refund, Refund amount exceeds capture amount, or Previous capture was not approved',
'QW': 'Invalid Reference Number',
'QX': 'Network Error has occurred',
'QY': 'Card Type Not Accepted',
'QZ': 'Zero value transaction',
}
CVN_RESPONSE_CODES = {
'M': 'Matched', # i.e. the CVN is correct
'N': 'Not Matched', # i.e. the CVN is incorrect
'P': 'Not Processed', # i.e. the CVN was not processed for some reason; do not assume that the CVN is necessarily correct
'S': 'Suspicious',
'U': 'Unknown', # i.e. the CVN was not processed for some reason; do not assume that the CVN is necessarily correct
}
CARD_SCHEMES = {
'AMEX': 'American Express',
'BANKCARD': 'Bank Card',
'DINERS': 'Diners Club',
'MASTERCARD': 'MasterCard',
'VISA': 'VISA',
}
ECI_CHOICES = {
'CCT': 'Call Centre Transaction',
'IVR': 'IVR Transaction',
'MTO': 'MOTO Transaction',
'SSL': 'Channel Encrypted Transaction (SSL or other)',
'REC': 'Recurring payment',
'5': '3D Secure transaction',
'6': '3D Secure transaction',
'7': '3D Secure transaction',
}
APPROVED_TRANSACTION_STATUS = "approved"
APPROVED_CONDITIONAL_TRANSACTION_STATUS = "approved*"
DECLINED_TRANSACTION_STATUS = "declined"
PENDING_TRANSACTION_STATUS = "pending"
VOID_TRANSACTION_STATUS = "voided"
SUSPENDED_TRANSACTION_STATUS = "suspended"
TRANSACTION_STATUS_CHOICES = (
(APPROVED_TRANSACTION_STATUS, "Approved"),
(APPROVED_CONDITIONAL_TRANSACTION_STATUS, "Approved*"),
(PENDING_TRANSACTION_STATUS, "Pending"),
(DECLINED_TRANSACTION_STATUS, "Declined"),
(VOID_TRANSACTION_STATUS, "Voided"),
(SUSPENDED_TRANSACTION_STATUS, "Suspended"),
)
TRANSACTION_TYPE_CHOICES = (
("payment", "Payment"),
("refund", "Refund"),
("preAuth", "Pre-Authorisation"),
)
CREDIT_CARD_PAYMENT_CHOICE = "creditCard"
BANK_ACCOUNT_PAYMENT_CHOICE = "bankAccount"
OTHER_PAYMENT_CHOICES = (
(CREDIT_CARD_PAYMENT_CHOICE, "Credit Card"),
("payPal", "PayPal"),
)
DIRECT_DEBIT_CHOICES = (
(BANK_ACCOUNT_PAYMENT_CHOICE, "Bank Account"),
("bpay", "BPAY"),
("australiaPost", "Australia Post"),
("westpacBranch", "Westpac Branch"),
("remittanceProcessingService", "Remittance Processing Service"),
)
PAYMENT_METHOD_CHOICES = OTHER_PAYMENT_CHOICES + DIRECT_DEBIT_CHOICES
VALID_PAYMENT_METHOD_CHOICES = ['card', 'direct_debit']
| StarcoderdataPython |
5153707 | <filename>env/lib/python3.10/site-packages/Quartz/QuickLookUI/_metadata.py
# This file is generated by objective.metadata
#
# Last update: Wed Aug 4 11:44:15 2021
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
constants = """$$"""
enums = """$QLPreviewViewStyleCompact@1$QLPreviewViewStyleNormal@0$"""
misc.update({})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(
b"NSObject",
b"acceptsPreviewPanelControl:",
{"retval": {"type": b"Z"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"beginPreviewPanelControl:",
{"retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"endPreviewPanelControl:",
{"retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"numberOfPreviewItemsInPreviewPanel:",
{"required": True, "retval": {"type": b"q"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"preparePreviewOfFileAtURL:completionHandler:",
{
"required": False,
"retval": {"type": b"v"},
"arguments": {
2: {"type": b"@"},
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
},
"type": "@?",
},
},
},
)
r(
b"NSObject",
b"preparePreviewOfSearchableItemWithIdentifier:queryString:completionHandler:",
{
"required": False,
"retval": {"type": b"v"},
"arguments": {
2: {"type": b"@"},
3: {"type": b"@"},
4: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
},
"type": "@?",
},
},
},
)
r(
b"NSObject",
b"previewItemDisplayState",
{"required": False, "retval": {"type": b"@"}},
)
r(b"NSObject", b"previewItemTitle", {"required": False, "retval": {"type": b"@"}})
r(b"NSObject", b"previewItemURL", {"required": True, "retval": {"type": b"@"}})
r(
b"NSObject",
b"previewPanel:handleEvent:",
{
"required": False,
"retval": {"type": b"Z"},
"arguments": {2: {"type": b"@"}, 3: {"type": b"@"}},
},
)
r(
b"NSObject",
b"previewPanel:previewItemAtIndex:",
{
"required": True,
"retval": {"type": b"@"},
"arguments": {2: {"type": b"@"}, 3: {"type": b"q"}},
},
)
r(
b"NSObject",
b"previewPanel:sourceFrameOnScreenForPreviewItem:",
{
"required": False,
"retval": {"type": b"{CGRect={CGPoint=dd}{CGSize=dd}}"},
"arguments": {2: {"type": b"@"}, 3: {"type": b"@"}},
},
)
r(
b"NSObject",
b"previewPanel:transitionImageForPreviewItem:contentRect:",
{
"required": False,
"retval": {"type": b"@"},
"arguments": {
2: {"type": b"@"},
3: {"type": b"@"},
4: {
"type": b"^{CGRect={CGPoint=dd}{CGSize=dd}}",
"type_modifier": b"n",
},
},
},
)
r(
b"NSObject",
b"providePreviewForFileRequest:completionHandler:",
{
"required": False,
"retval": {"type": b"v"},
"arguments": {
2: {"type": b"@"},
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
},
},
"type": b"@?",
},
},
},
)
r(
b"NSObject",
b"setPreviewItemDisplayState:",
{"retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"setPreviewItemTitle:",
{"retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"setPreviewItemURL:",
{"retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"QLPreviewPanel",
b"enterFullScreenMode:withOptions:",
{"retval": {"type": b"Z"}},
)
r(b"QLPreviewPanel", b"isInFullScreenMode", {"retval": {"type": b"Z"}})
r(b"QLPreviewPanel", b"sharedPreviewPanelExists", {"retval": {"type": b"Z"}})
r(
b"QLPreviewReply",
b"initForPDFWithPageSize:documentCreationBlock:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"@"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"o^@"},
},
}
}
}
},
)
r(
b"QLPreviewReply",
b"initWithContextSize:isBitmap:drawingBlock:",
{
"arguments": {
3: {"type": b"Z"},
4: {
"callable": {
"retval": {"type": b"Z"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"^{CGContext=}"},
2: {"type": b"@"},
3: {"type": b"o^@"},
},
}
},
}
},
)
r(
b"QLPreviewReply",
b"initWithDataOfContentType:contentSize:dataCreationBlock:",
{
"arguments": {
4: {
"callable": {
"retval": {"type": b"@"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"o^@"},
},
}
}
}
},
)
r(b"QLPreviewView", b"autostarts", {"retval": {"type": b"Z"}})
r(b"QLPreviewView", b"setAutostarts:", {"arguments": {2: {"type": b"Z"}}})
r(
b"QLPreviewView",
b"setShouldCloseWithWindow:",
{"arguments": {2: {"type": b"Z"}}},
)
r(b"QLPreviewView", b"shouldCloseWithWindow", {"retval": {"type": b"Z"}})
finally:
objc._updatingMetadata(False)
protocols = {
"QLPreviewPanelController": objc.informal_protocol(
"QLPreviewPanelController",
[
objc.selector(
None, b"beginPreviewPanelControl:", b"v@:@", isRequired=False
),
objc.selector(
None, b"acceptsPreviewPanelControl:", b"Z@:@", isRequired=False
),
objc.selector(None, b"endPreviewPanelControl:", b"v@:@", isRequired=False),
],
)
}
expressions = {}
# END OF FILE
| StarcoderdataPython |
4981993 | <gh_stars>0
# Copyright 2019, The Johns Hopkins University Applied Physics Laboratory LLC
# All rights reserved.
# Distributed under the terms of the Apache 2.0 License.
import abc
import collections
import contextlib
import functools
import inspect
import io
import logging
import re
from .core import DocType, EntityType
from .string import String
from .utilities import CaseInsensitiveDict, CaseInsensitiveSet
logger = logging.getLogger(__name__)
class Preprocessor(abc.ABC):
"""
A preprocessor runs before coref and candidate generation to filter and clean the mentions.
"""
@abc.abstractmethod
def process(self, document):
"""
Process the mentions in a document
:param document: Document
"""
pass
def pcm():
"""Convenience function for Preprocessor Context Manager"""
return PreprocessorReporter.get()
class PreprocessorReport:
def __init__(self):
self.modifications = collections.Counter()
self.removals = collections.Counter()
def clear(self):
self.modifications.clear()
self.removals.clear()
def __str__(self):
buf = io.StringIO()
buf.write('Preprocessor removals\n')
buf.write('---------------------\n')
if self.removals:
for c in self.removals:
buf.write('{}: {}\n'.format(c, self.removals[c]))
else:
buf.write('None\n')
buf.write('\n')
buf.write('Preprocessor modifications\n')
buf.write('--------------------------\n')
if self.modifications:
for c in self.modifications:
buf.write('{}: {}\n'.format(c, self.modifications[c]))
else:
buf.write('None\n')
return buf.getvalue()
class ReportProperty(type):
"""
Metaclass that adds a report class property
Note that implementers must have a get() classmethod
"""
@property
def report(cls):
return cls.get()._report
class PreprocessorReporter(metaclass=ReportProperty):
"""
Reporter for Preprocessor components
Records:
* mention modifications
* mention removals
"""
instance = None
def __init__(self):
self._report = PreprocessorReport()
self.disable()
def enable(self):
self.modification = self.modification_debug
self.removal = self.removal_debug
def disable(self):
self.modification = self.modification_production
self.removal = self.removal_production
@classmethod
def get(cls):
if cls.instance is None:
cls.instance = PreprocessorReporter()
return cls.instance
@classmethod
def activate(cls):
cls.get().enable()
@classmethod
def deactivate(cls):
cls.get().disable()
@contextlib.contextmanager
def modification(self, mention):
yield
@contextlib.contextmanager
def modification_production(self, mention):
yield
@contextlib.contextmanager
def modification_debug(self, mention):
caller = self.get_caller()
original = mention.string
yield
if original != mention.string:
self._report.modifications.update({caller: 1})
@contextlib.contextmanager
def removal(self, document):
yield
@contextlib.contextmanager
def removal_production(self, document):
yield
@contextlib.contextmanager
def removal_debug(self, document):
caller = self.get_caller()
original_size = len(document.mentions)
yield
if original_size != len(document.mentions):
num = original_size - len(document.mentions)
self._report.removals.update({caller: num})
@staticmethod
def get_caller():
# 1=self, 2=contextlib, 3=caller
return inspect.stack()[3][0].f_locals['self'].__class__.__name__
class PassThru(Preprocessor):
"""Does not change the entity mentions"""
def process(self, document):
pass
class CascadePreprocessor(Preprocessor):
"""Run a list of processors on the mentions"""
def __init__(self, processors):
"""
:param processors: list of Preprocessor objects
"""
self.processors = processors
def process(self, document):
for processor in self.processors:
processor.process(document)
class TypeValidator(Preprocessor):
"""Removes mentions that have unknown types"""
def process(self, document):
with pcm().removal(document):
original_size = len(document.mentions)
document.mentions = [mention for mention in document.mentions if mention.type in EntityType.TYPES]
if len(document.mentions) != original_size:
logger.error("Document {} has an invalid type".format(document.doc_id))
class TextNormalizer(Preprocessor):
"""
Normalized text
* replaces smart quotes and other special punctuation with ascii punct
* removes emojis
"""
def __init__(self):
self.trans_table = str.maketrans("‘’“”—…", "''\"\"-.")
def process(self, document):
for mention in document.mentions:
with pcm().modification(mention):
mention.string = mention.string.translate(self.trans_table)
mention.string = String.remove_emojis(mention.string)
class GarbageRemover(Preprocessor):
"""
Removes garbage mentions
* removes website urls
* empty mention strings (can be caused by other preprocessors)
"""
def process(self, document):
with pcm().removal(document):
document.mentions = [mention for mention in document.mentions if 'www.' not in mention.string]
document.mentions = [mention for mention in document.mentions if 'http:' not in mention.string]
document.mentions = [mention for mention in document.mentions if 'https:' not in mention.string]
document.mentions = [mention for mention in document.mentions if mention.string]
class FixType(Preprocessor):
"""Fix common type mistakes from NER like al-Qaeda = PER"""
def __init__(self, type_map):
"""
:param type_map: dictionary of lowercase name string -> type
"""
self.map = type_map
def process(self, document):
for mention in document.mentions:
if mention.string.lower() in self.map:
mention.type = self.map[mention.string.lower()]
class TooLongMentionRemover(Preprocessor):
"""Remove mentions that have too many tokens"""
def __init__(self, max_tokens=6):
self.max_tokens = max_tokens
def process(self, document):
with pcm().removal(document):
document.mentions = [mention for mention in document.mentions if self._check(mention)]
def _check(self, mention):
"""Check if the mention passes the token length test"""
return mention.string.count(' ') < self.max_tokens
class Blacklist(Preprocessor):
"""Remove mentions that are in a blacklist of common mistakes"""
def __init__(self, blacklist):
"""
:param blacklist: list or set of blacklist names
"""
self.data = CaseInsensitiveSet(blacklist)
def process(self, document):
with pcm().removal(document):
document.mentions = [mention for mention in document.mentions if mention.string not in self.data]
class AcronymReplacer(Preprocessor):
"""
Uses a map from acronym to entity name
"""
def __init__(self, acronym_map, ci=False):
"""
:param acronym_map: dictionary from acronym -> entity name
:param ci: whether to match the acronym ignoring case
"""
if ci:
self.map = CaseInsensitiveDict(acronym_map)
else:
self.map = acronym_map
def process(self, document):
for mention in document.mentions:
with pcm().modification(mention):
if mention.string in self.map:
mention.string = self.map[mention.string]
class NameProjector(Preprocessor):
"""
Projects name string (usually translation or transliteration).
TODO: check for code switching and not run on English strings?
"""
TRANSLIT = 'translit_string'
TRANSLATE = 'translate_string'
def __init__(self, func, var_name=TRANSLIT):
"""
:param func: Function that takes in string and language and returns string or None
"""
self.func = func
self.var_name = var_name
def process(self, document):
for mention in document.mentions:
with pcm().modification(mention):
new_string = self.func(mention.string, document.lang)
if new_string and new_string != mention.string:
setattr(mention, self.var_name, new_string)
class NameStemmer(Preprocessor):
"""
Replaces tokens of name string with stems.
TODO: does not handle punctuation (dashes, apostrophes, parentheses)
"""
def __init__(self, stemmer):
"""
:param stemmer: Stemmer object
"""
self.stemmer = stemmer
def process(self, document):
for mention in document.mentions:
with pcm().modification(mention):
words = mention.string.split()
words = list(map(functools.partial(self.stemmer.stem, lang=document.lang), words))
mention.string = ' '.join(words)
class TwitterUsernameReplacer(Preprocessor):
"""
Replaces twitter @username with screen name
The @username is still available as original_string on the mention.
The username map is username -> screen name
For example: nytimes New York Times
"""
# TODO case sensitive and not handling multi-token phrase with username in it
def __init__(self, username_map):
self.map = username_map
def process(self, document):
# only process tweets
if document.type != DocType.SN:
return
for mention in document.mentions:
with pcm().modification(mention):
if mention.string and mention.string[0] == '@':
s = mention.string[1:]
s = String.remove_emojis(s)
# chop punctuation off end of username
if s and not (s[-1].isalpha() or s[-1].isdigit() or s[-1] == '_'):
s = s[:-1]
if s in self.map:
mention.string = self.map[s]
class TwitterHashtagProcessor(Preprocessor):
"""
Replaces twitter #HashTag as Hash Tag
The #HashTag is still available as original_string on the mention.
"""
def __init__(self):
# TODO this does not handle numbers and leaves behind empty matches
self.hashtag_regex = re.compile('[A-Z]*[a-z]*')
def process(self, document):
for mention in document.mentions:
with pcm().modification(mention):
if mention.string and mention.string[0] == '#':
mention.string = mention.string[1:]
matches = re.findall(self.hashtag_regex, mention.string)
if matches:
matches = [match for match in matches if match]
s = ' '.join(matches)
# TODO find a better approach for bad strings - like removing the mention
if s:
mention.string = s
| StarcoderdataPython |
1720162 | import facetracker_custom as fc
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
jiung = "jiung"
frameCnt = 0
landmarks = []
toMotionNum = 400
fromMotionNum = 1
model = load_model("output_02.h5")
model.summary() # model Info
frameCnt = 0
for points in fc.run(visualize=1, max_threads=4, capture=0):
frameCnt += 1
if (frameCnt//2 == 1):
frameCnt = 0
landmarks.append(points)
if len(landmarks) == 30:
h = model.predict(landmarks)
print(f"Sleep [{(h[0][1]*100)//1}%]") if h.argmax() == 1 else print(f"NO Sleep [{(h[0][0])*100//1}%]")
| StarcoderdataPython |
8138935 | <reponame>SimonTheVillain/connecting_the_dots
import torch
import torch.utils.data
import numpy as np
class TestSet(object):
def __init__(self, name, dset, test_frequency=1):
self.name = name
self.dset = dset
self.test_frequency = test_frequency
class TestSets(list):
def append(self, name, dset, test_frequency=1):
super().append(TestSet(name, dset, test_frequency))
class MultiDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.current_epoch = 0
self.datasets = []
self.cum_n_samples = [0]
for dataset in datasets:
self.append(dataset)
def append(self, dataset):
self.datasets.append(dataset)
self.__update_cum_n_samples(dataset)
def __update_cum_n_samples(self, dataset):
n_samples = self.cum_n_samples[-1] + len(dataset)
self.cum_n_samples.append(n_samples)
def dataset_updated(self):
self.cum_n_samples = [0]
for dset in self.datasets:
self.__update_cum_n_samples(dset)
def __len__(self):
return self.cum_n_samples[-1]
def __getitem__(self, idx):
didx = np.searchsorted(self.cum_n_samples, idx, side='right') - 1
sidx = idx - self.cum_n_samples[didx]
return self.datasets[didx][sidx]
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, train=True, fix_seed_per_epoch=False):
self.current_epoch = 0
self.train = train
self.fix_seed_per_epoch = fix_seed_per_epoch
def get_rng(self, idx):
rng = np.random.RandomState()
if self.train:
if self.fix_seed_per_epoch:
seed = 1 * len(self) + idx
else:
seed = (self.current_epoch + 1) * len(self) + idx
rng.seed(seed)
else:
rng.seed(idx)
return rng
| StarcoderdataPython |
1714372 | <filename>tools/android/find_unused_resources.py<gh_stars>1-10
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Lists unused Java strings and other resources."""
import optparse
import re
import subprocess
import sys
def GetApkResources(apk_path):
"""Returns the types and names of resources packaged in an APK.
Args:
apk_path: path to the APK.
Returns:
The resources in the APK as a list of tuples (type, name). Example:
[('drawable', 'arrow'), ('layout', 'month_picker'), ...]
"""
p = subprocess.Popen(
['aapt', 'dump', 'resources', apk_path],
stdout=subprocess.PIPE)
dump_out, _ = p.communicate()
assert p.returncode == 0, 'aapt dump failed'
matches = re.finditer(
r'^\s+spec resource 0x[0-9a-fA-F]+ [\w.]+:(?P<type>\w+)/(?P<name>\w+)',
dump_out, re.MULTILINE)
return [m.group('type', 'name') for m in matches]
def GetUsedResources(source_paths, resource_types):
"""Returns the types and names of resources used in Java or resource files.
Args:
source_paths: a list of files or folders collectively containing all the
Java files, resource files, and the AndroidManifest.xml.
resource_types: a list of resource types to look for. Example:
['string', 'drawable']
Returns:
The resources referenced by the Java and resource files as a list of tuples
(type, name). Example:
[('drawable', 'app_icon'), ('layout', 'month_picker'), ...]
"""
type_regex = '|'.join(map(re.escape, resource_types))
patterns = [r'@(())(%s)/(\w+)' % type_regex,
r'\b((\w+\.)*)R\.(%s)\.(\w+)' % type_regex]
resources = []
for pattern in patterns:
p = subprocess.Popen(
['grep', '-REIhoe', pattern] + source_paths,
stdout=subprocess.PIPE)
grep_out, grep_err = p.communicate()
# Check stderr instead of return code, since return code is 1 when no
# matches are found.
assert not grep_err, 'grep failed'
matches = re.finditer(pattern, grep_out)
for match in matches:
package = match.group(1)
if package == 'android.':
continue
type_ = match.group(3)
name = match.group(4)
resources.append((type_, name))
return resources
def FormatResources(resources):
"""Formats a list of resources for printing.
Args:
resources: a list of resources, given as (type, name) tuples.
"""
return '\n'.join(['%-12s %s' % (t, n) for t, n in sorted(resources)])
def ParseArgs(args):
usage = 'usage: %prog [-v] APK_PATH SOURCE_PATH...'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-v', help='Show verbose output', action='store_true')
options, args = parser.parse_args(args=args)
if len(args) < 2:
parser.error('must provide APK_PATH and SOURCE_PATH arguments')
return options.v, args[0], args[1:]
def main(args=None):
verbose, apk_path, source_paths = ParseArgs(args)
apk_resources = GetApkResources(apk_path)
resource_types = list(set([r[0] for r in apk_resources]))
used_resources = GetUsedResources(source_paths, resource_types)
unused_resources = set(apk_resources) - set(used_resources)
undefined_resources = set(used_resources) - set(apk_resources)
# aapt dump fails silently. Notify the user if things look wrong.
if not apk_resources:
print >> sys.stderr, (
'Warning: No resources found in the APK. Did you provide the correct '
'APK path?')
if not used_resources:
print >> sys.stderr, (
'Warning: No resources references from Java or resource files. Did you '
'provide the correct source paths?')
if undefined_resources:
print >> sys.stderr, (
'Warning: found %d "undefined" resources that are referenced by Java '
'files or by other resources, but are not in the APK. Run with -v to '
'see them.' % len(undefined_resources))
if verbose:
print '%d undefined resources:' % len(undefined_resources)
print FormatResources(undefined_resources), '\n'
print '%d resources packaged into the APK:' % len(apk_resources)
print FormatResources(apk_resources), '\n'
print '%d used resources:' % len(used_resources)
print FormatResources(used_resources), '\n'
print '%d unused resources:' % len(unused_resources)
print FormatResources(unused_resources)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3241351 | <gh_stars>1-10
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.analyses.derating.models.inductor_unit_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for the inductor derating module."""
# Third Party Imports
import pytest
# RAMSTK Package Imports
from ramstk.analyses.derating import inductor
@pytest.mark.unit
def test_do_derating_analysis_no_stresses_coil(test_stress_limits):
"""should determine the inductor is not execeeding any limit."""
_overstress, _reason = inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.2,
family_id=2,
temperature_hot_spot=51.3,
temperature_rated_max=130.0,
voltage_ratio=0.2,
)
assert _overstress == 0
assert _reason == ""
@pytest.mark.unit
def test_do_derating_analysis_no_stresses_transformer(test_stress_limits):
"""should determine the transformer is not execeeding any limit."""
_overstress, _reason = inductor.do_derating_analysis(
1,
2,
test_stress_limits["inductor"],
current_ratio=0.2,
family_id=2,
temperature_hot_spot=51.3,
temperature_rated_max=130.0,
voltage_ratio=0.2,
)
assert _overstress == 0
assert _reason == ""
@pytest.mark.unit
def test_do_derating_analysis_current(test_stress_limits):
"""should determine the inductor is execeeding the voltage limit."""
_overstress, _reason = inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.863,
family_id=2,
temperature_hot_spot=59.3,
temperature_rated_max=130.0,
voltage_ratio=0.2,
)
assert _overstress == 1
assert _reason == "Current ratio of 0.863 exceeds the allowable limit of 0.7.\n"
@pytest.mark.unit
def test_do_derating_analysis_hot_spot_temperature(test_stress_limits):
"""should determine the inductor is exceeding the hot spot temperature limit."""
_overstress, _reason = inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.2,
family_id=2,
temperature_hot_spot=111.3,
temperature_rated_max=130.0,
voltage_ratio=0.2,
)
assert _overstress == 1
assert (
_reason == "Hot spot temperature of 111.3C exceeds the derated maximum hot "
"spot temperature of 30.0C less than maximum rated hot spot temperature of "
"130.0C.\n"
)
@pytest.mark.unit
def test_do_derating_analysis_voltage(test_stress_limits):
"""should determine the inductor is execeeding the voltage limit."""
_overstress, _reason = inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.2,
family_id=2,
temperature_hot_spot=59.3,
temperature_rated_max=130.0,
voltage_ratio=0.863,
)
assert _overstress == 1
assert _reason == "Voltage ratio of 0.863 exceeds the allowable limit of 0.7.\n"
@pytest.mark.unit
def test_do_derating_analysis_all_stresses(test_stress_limits):
"""should determine the inductor is execeeding both limits."""
_overstress, _reason = inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.81,
family_id=2,
temperature_hot_spot=109.3,
temperature_rated_max=130.0,
voltage_ratio=0.863,
)
assert _overstress == 1
assert (
_reason == "Current ratio of 0.81 exceeds the allowable limit of 0.7.\nHot "
"spot temperature of 109.3C exceeds the derated maximum hot spot temperature "
"of 30.0C less than maximum rated hot spot temperature of 130.0C.\nVoltage "
"ratio of 0.863 exceeds the allowable limit of 0.7.\n"
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_environment(test_stress_limits):
"""should raise am IndexError when passed an unknown environment."""
with pytest.raises(IndexError):
inductor.do_derating_analysis(
5,
1,
test_stress_limits["inductor"],
current_ratio=0.81,
family_id=2,
temperature_hot_spot=109.3,
temperature_rated_max=130.0,
voltage_ratio=0.863,
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_subcategory(test_stress_limits):
"""should raise am KeyError when passed an unknown subcategory."""
with pytest.raises(KeyError):
inductor.do_derating_analysis(
1,
21,
test_stress_limits["inductor"],
current_ratio=0.81,
family_id=2,
temperature_hot_spot=109.3,
temperature_rated_max=130.0,
voltage_ratio=0.863,
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_family(test_stress_limits):
"""should raise am KeyError when passed an unknown type ID."""
with pytest.raises(KeyError):
inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.81,
family_id=21,
temperature_hot_spot=109.3,
temperature_rated_max=130.0,
voltage_ratio=0.863,
)
@pytest.mark.unit
@pytest.mark.parametrize("current_ratio", ["0.9", None])
def test_do_derating_analysis_non_numeric_current_ratio(
current_ratio,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric current ratio."""
with pytest.raises(TypeError):
inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=current_ratio,
family_id=2,
temperature_hot_spot=51.3,
temperature_rated_max=130.0,
voltage_ratio=0.3,
)
@pytest.mark.unit
@pytest.mark.parametrize("hot_spot_temperature", ["128.3", None])
def test_do_derating_analysis_non_numeric_temperature(
hot_spot_temperature,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric current ratio."""
with pytest.raises(TypeError):
inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.1,
family_id=2,
temperature_hot_spot=hot_spot_temperature,
temperature_rated_max=130.0,
voltage_ratio=0.3,
)
@pytest.mark.unit
@pytest.mark.parametrize("voltage_ratio", ["0.9", None])
def test_do_derating_analysis_non_numeric_voltage_ratio(
voltage_ratio,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric voltage ratio."""
with pytest.raises(TypeError):
inductor.do_derating_analysis(
1,
1,
test_stress_limits["inductor"],
current_ratio=0.1,
family_id=2,
temperature_hot_spot=51.3,
temperature_rated_max=130.0,
voltage_ratio=voltage_ratio,
)
| StarcoderdataPython |
2571 | <filename>frontends/PyCDE/test/polynomial.py
# RUN: %PYTHON% %s 2>&1 | FileCheck %s
from __future__ import annotations
import mlir
import pycde
from pycde import (Input, Output, Parameter, module, externmodule, generator,
types, dim)
from circt.dialects import comb, hw
@module
def PolynomialCompute(coefficients: Coefficients):
class PolynomialCompute:
"""Module to compute ax^3 + bx^2 + cx + d for design-time coefficients"""
# Evaluate polynomial for 'x'.
x = Input(types.i32)
y = Output(types.int(8 * 4))
unused_parameter = Parameter(True)
def __init__(self, name: str):
"""coefficients is in 'd' -> 'a' order."""
self.instanceName = name
@staticmethod
def get_module_name():
return "PolyComputeForCoeff_" + '_'.join(
[str(x) for x in coefficients.coeff])
@generator
def construct(mod):
"""Implement this module for input 'x'."""
x = mod.x
taps = list()
for power, coeff in enumerate(coefficients.coeff):
coeffVal = hw.ConstantOp.create(types.i32, coeff)
if power == 0:
newPartialSum = coeffVal.result
else:
partialSum = taps[-1]
if power == 1:
currPow = x
else:
x_power = [x for i in range(power)]
currPow = comb.MulOp.create(*x_power)
newPartialSum = comb.AddOp.create(
partialSum, comb.MulOp.create(coeffVal, currPow))
taps.append(newPartialSum)
# Final output
return {"y": taps[-1]}
return PolynomialCompute
@externmodule("supercooldevice")
class CoolPolynomialCompute:
x = Input(types.i32)
y = Output(types.i32)
def __init__(self, coefficients):
self.coefficients = coefficients
class Coefficients:
def __init__(self, coeff):
self.coeff = coeff
class Polynomial(pycde.System):
inputs = []
outputs = [('y', types.i32)]
def build(self, top):
i32 = types.i32
x = hw.ConstantOp.create(i32, 23)
poly = PolynomialCompute(Coefficients([62, 42, 6]))("example", x=x)
PolynomialCompute(coefficients=Coefficients([62, 42, 6]))("example2",
x=poly.y)
PolynomialCompute(Coefficients([1, 2, 3, 4, 5]))("example2", x=poly.y)
CoolPolynomialCompute([4, 42], x=x)
return {"y": poly.y}
poly = Polynomial()
poly.graph()
# CHECK-LABEL: digraph "top"
# CHECK: label="top";
# CHECK: [shape=record,label="{hw.constant\ni32\n\nvalue: 23 : i32}"];
poly.print()
# CHECK-LABEL: hw.module @top() -> (%y: i32)
# CHECK: [[REG0:%.+]] = "pycde.PolynomialCompute"(%c23_i32) {instanceName = "example", opNames = ["x"], parameters = {coefficients = {coeff = [62, 42, 6]}, module_name = "PolyComputeForCoeff_62_42_6", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32
# CHECK: [[REG1:%.+]] = "pycde.PolynomialCompute"([[REG0]]) {instanceName = "example2", opNames = ["x"], parameters = {coefficients = {coeff = [62, 42, 6]}, module_name = "PolyComputeForCoeff_62_42_6", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32
# CHECK: [[REG2:%.+]] = "pycde.PolynomialCompute"([[REG0]]) {instanceName = "example2", opNames = ["x"], parameters = {coefficients = {coeff = [1, 2, 3, 4, 5]}, module_name = "PolyComputeForCoeff_1_2_3_4_5", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32
# CHECK: [[REG3:%.+]] = "pycde.CoolPolynomialCompute"(%c23_i32) {coefficients = [4, 42], opNames = ["x"], parameters = {}, resultNames = ["y"]} : (i32) -> i32
# CHECK: hw.output [[REG0]] : i32
poly.generate()
poly.print()
# CHECK-LABEL: hw.module @top
# CHECK: %example.y = hw.instance "example" @PolyComputeForCoeff_62_42_6(%c23_i32) {parameters = {}} : (i32) -> i32
# CHECK: %example2.y = hw.instance "example2" @PolyComputeForCoeff_62_42_6(%example.y) {parameters = {}} : (i32) -> i32
# CHECK: %example2.y_0 = hw.instance "example2" @PolyComputeForCoeff_1_2_3_4_5(%example.y) {parameters = {}} : (i32) -> i32
# CHECK: %pycde.CoolPolynomialCompute.y = hw.instance "pycde.CoolPolynomialCompute" @supercooldevice(%c23_i32) {coefficients = [4, 42], parameters = {}} : (i32) -> i32
# CHECK-LABEL: hw.module @PolyComputeForCoeff_62_42_6(%x: i32) -> (%y: i32)
# CHECK: hw.constant 62
# CHECK: hw.constant 42
# CHECK: hw.constant 6
# CHECK-LABEL: hw.module @PolyComputeForCoeff_1_2_3_4_5(%x: i32) -> (%y: i32)
# CHECK: hw.constant 1
# CHECK: hw.constant 2
# CHECK: hw.constant 3
# CHECK: hw.constant 4
# CHECK: hw.constant 5
# CHECK-NOT: hw.module @pycde.PolynomialCompute
print("\n\n=== Verilog ===")
# CHECK-LABEL: === Verilog ===
poly.print_verilog()
# CHECK-LABEL: module PolyComputeForCoeff_62_42_6(
# CHECK: input [31:0] x,
# CHECK: output [31:0] y);
| StarcoderdataPython |
5126273 | from ._op_item_type_registry import op_register_item_type
from ._op_items_base import OPAbstractItem
# {
# "uuid": "zjc6s5ri3rhcxploofa67jamze",
# "templateUuid": "003",
# "trashed": "N",
# "createdAt": "2021-03-19T23:27:12Z",
# "updatedAt": "2021-03-19T23:30:10Z",
# "changerUuid": "RAXCWKNRRNGL7I3KSZOH5ERLHI",
# "itemVersion": 2,
# "vaultUuid": "yhdg6ovhkjcfhn3u25cp2bnl6e",
# "details": {
# "notesPlain": "Note text here. **Mardown** supported.\n\nWhat does the note text look like?",
# "passwordHistory": [],
# "sections": [
# {
# "name": "linked items",
# "title": "Related Items"
# }
# ]
# },
# "overview": {
# "ainfo": "Note text here. **Mardown** supported.",
# "ps": 0,
# "title": "Example Secure Note"
# }
# }
@op_register_item_type
class OPSecureNoteItem(OPAbstractItem):
TEMPLATE_ID = "003"
def __init__(self, item_dict):
super().__init__(item_dict)
@property
def note_text(self):
text = self.get_details_value("notesPlain")
return text
| StarcoderdataPython |
6584524 | <filename>tests/utils.py
import base64
def create_basic_auth_header(username, password):
payload = b":".join((username.encode("utf-8"), password.encode("utf-8")))
return {
"Authorization": "Basic {base64}".format(
base64=base64.b64encode(payload).decode("utf-8")
)
}
def create_token_auth_header(token):
return {"Authorization": "Bearer {token}".format(token=token)}
| StarcoderdataPython |
9728225 | import torch
import torch.nn as nn
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
class InfoNCE_Loss(nn.Module):
"""Performs predictions and InfoNCE Loss
Modified From:
https://github.com/loeweX/Greedy_InfoMax/blob/master/GreedyInfoMax/vision/models/InfoNCE_Loss.py
https://github.com/loeweX/Greedy_InfoMax/blob/master/LICENSE
Args:
pred_steps (int): number of steps into the future to perform predictions
neg_samples (int): number of negative samples to be used for contrastive loss
in_channels (int): number of channels of input tensors (size of encoding vector from encoder network and autoregressive network)
"""
def __init__(self, pred_steps, neg_samples, in_channels):
super().__init__()
self.pred_steps = pred_steps
self.neg_samples = neg_samples
self.W_k = nn.ModuleList(
nn.Conv2d(in_channels, in_channels, 1, bias=False)
for _ in range(self.pred_steps)
)
self.contrast_loss = ExpNLLLoss()
def forward(self, z, c, skip_step=1):
batch_size = z.shape[0]
total_loss = 0
cur_device = z.get_device()
# For each element in c, contrast with elements below
for k in range(1, self.pred_steps + 1):
### compute log f(c_t, x_{t+k}) = z^T_{t+k} * W_k * c_t
### compute z^T_{t+k} * W_k:
ztwk = (
self.W_k[k - 1]
.forward(z[:, :, (k + skip_step) :, :]) # Bx, C , H , W
.permute(2, 3, 0, 1) # H, W, Bx, C
.contiguous()
) # y, x, b, c
ztwk_shuf = ztwk.view(
ztwk.shape[0] * ztwk.shape[1] * ztwk.shape[2], ztwk.shape[3]
) # y * x * batch, c
rand_index = torch.randint(
ztwk_shuf.shape[0], # y * x * batch
(ztwk_shuf.shape[0] * self.neg_samples, 1),
dtype=torch.long,
device=cur_device,
)
# Sample more
rand_index = rand_index.repeat(1, ztwk_shuf.shape[1])
ztwk_shuf = torch.gather(
ztwk_shuf, dim=0, index=rand_index, out=None
) # y * x * b * n, c
ztwk_shuf = ztwk_shuf.view(
ztwk.shape[0],
ztwk.shape[1],
ztwk.shape[2],
self.neg_samples,
ztwk.shape[3],
).permute(
0, 1, 2, 4, 3
) # y, x, b, c, n
### Compute x_W1 * c_t:
context = (
c[:, :, : -(k + skip_step), :].permute(2, 3, 0, 1).unsqueeze(-2)
) # y, x, b, 1, c
log_fk_main = torch.matmul(context, ztwk.unsqueeze(-1)).squeeze(
-2
) # y, x, b, 1
log_fk_shuf = torch.matmul(context, ztwk_shuf).squeeze(-2) # y, x, b, n
log_fk = torch.cat((log_fk_main, log_fk_shuf), 3) # y, x, b, 1+n
log_fk = log_fk.permute(2, 3, 0, 1) # b, 1+n, y, x
log_fk = torch.softmax(log_fk, dim=1)
true_f = torch.zeros(
(batch_size, log_fk.shape[-2], log_fk.shape[-1]),
dtype=torch.long,
device=cur_device,
) # b, y, x
total_loss += self.contrast_loss(input=log_fk, target=true_f)
total_loss /= self.pred_steps
return total_loss
class ExpNLLLoss(_WeightedLoss):
def __init__(self, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean'):
super(ExpNLLLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input, target):
x = torch.log(input + 1e-11)
return F.nll_loss(x, target, weight=self.weight, ignore_index=self.ignore_index,
reduction=self.reduction) | StarcoderdataPython |
1794967 | <filename>src/bio2bel/io/pykeen.py
# -*- coding: utf-8 -*-
"""Entry points for PyKEEN.
PyKEEN is a machine learning library for knowledge graph embeddings that supports node clustering,
link prediction, entity disambiguation, question/answering, and other tasks with knowledge graphs.
It provides an interface for registering plugins using Python's entrypoints under the
``pykeen.triples.extension_importer`` and ``pykeen.triples.prefix_importer`` groups. More specific
information about how the Bio2BEL plugin is loaded into PyKEEN can be found in Bio2BEL's
`setup.cfg <https://github.com/bio2bel/bio2bel/blob/master/setup.cfg>`_ under the ``[options.entry_points]``
header.
The following example shows how you can parse/load the triples from a Bio2BEL
repository using the ``bio2bel`` prefix:
.. code-block:: python
# Example 1A: Make triples factory
from pykeen.triples import TriplesFactory
tf = TriplesFactory(path='bio2bel:mirtarbase')
# Example 1B: Use directly in the pipeline, which automatically invokes training/testing set stratification
from pykeen.pipeline import pipeline
results = pipeline(
dataset='bio2bel:mirtarbase',
model='TransE',
)
"""
import numpy as np
from .automate import ensure_tsv
__all__ = [
'ensure_triples',
]
def ensure_triples(module_name: str) -> np.ndarray:
"""Load a Bio2BEL repository.
:param module_name: The name of the bio2bel repository (with no prefix)
:return: A three column array with head, relation, and tail in each row
"""
path = ensure_tsv(module_name)
return np.loadtxt(
fname=path,
dtype=str,
delimiter='\t',
)
| StarcoderdataPython |
5191735 | # -*- coding: utf-8 -*-
"""
Web API
~~~~
ref: web_api.yaml
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from flask import g
from v1.services import DataService
from .resource import Resource
class Datas(Resource):
"""
ref: web_api.yaml
"""
def get(self):
"""
ref: web_api.yaml
:return:
"""
pattern = None
if 'pattern' in g.args:
pattern = g.args['pattern']
datas = DataService.list(pattern)
user = g.user.login
datas = filter(lambda x: x.owner == user,
datas) # + filter(lambda x: x.owner != user and x.public_read, datas)
return self.render(data=[data.view() for data in datas])
| StarcoderdataPython |
6642116 | # -*- test-case-name: twisted.test.test_web -*-
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
"""I deal with static resources.
"""
# System Imports
import os, string, time
import io
import traceback
import warnings
from io import StringIO
from zope.interface import implementer
try:
from twisted.web.resource import NoResource, ForbiddenResource
except ImportError:
from twisted.web.error import NoResource, ForbiddenResource
from twisted.web.util import redirectTo
try:
from twisted.web import http
except ImportError:
from twisted.protocols import http
from twisted.python import threadable, log, components, filepath
from twisted.internet import abstract
from twisted.python.util import InsensitiveDict
from twisted.python.runtime import platformType
from nevow import appserver, dirlist, inevow, rend
dangerousPathError = NoResource("Invalid request URL.")
def isDangerous(path):
return path == '..' or '/' in path or os.sep in path
@implementer(inevow.IResource)
class Data:
"""
This is a static, in-memory resource.
"""
def __init__(self, data, type, expires=None):
self.data = data
self.type = type
self.expires = expires
def time(self):
"""
Return the current time as a float.
The default implementation simply uses L{time.time}. This is mainly
provided as a hook for tests to override.
"""
return time.time()
def locateChild(self, ctx, segments):
return appserver.NotFound
def renderHTTP(self, ctx):
request = inevow.IRequest(ctx)
request.setHeader(b"content-type", self.type)
request.setHeader(b"content-length", str(len(self.data)))
if self.expires is not None:
request.setHeader(b"expires",
http.datetimeToString(self.time() + self.expires))
if request.method == "HEAD":
return ''
return self.data
def staticHTML(someString):
return Data(someString, 'text/html')
def addSlash(request):
return b"http%s://%s%s/" % (
request.isSecure() and b's' or b'',
request.getHeader("host").encode('ascii'),
(bytes.split(request.uri, b'?')[0]))
class Registry(components.Componentized):
"""
I am a Componentized object that will be made available to internal Twisted
file-based dynamic web content such as .rpy and .epy scripts.
"""
def __init__(self):
components.Componentized.__init__(self)
self._pathCache = {}
def cachePath(self, path, rsrc):
self._pathCache[path] = rsrc
def getCachedPath(self, path):
return self._pathCache.get(path)
def loadMimeTypes(mimetype_locations=['/etc/mime.types']):
"""
Multiple file locations containing mime-types can be passed as a list.
The files will be sourced in that order, overriding mime-types from the
files sourced beforehand, but only if a new entry explicitly overrides
the current entry.
"""
import mimetypes
# Grab Python's built-in mimetypes dictionary.
contentTypes = mimetypes.types_map
# Update Python's semi-erroneous dictionary with a few of the
# usual suspects.
contentTypes.update(
{
'.conf': 'text/plain',
'.diff': 'text/plain',
'.exe': 'application/x-executable',
'.flac': 'audio/x-flac',
'.java': 'text/plain',
'.ogg': 'application/ogg',
'.oz': 'text/x-oz',
'.swf': 'application/x-shockwave-flash',
'.tgz': 'application/x-gtar',
'.wml': 'text/vnd.wap.wml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.py': 'text/plain',
'.patch': 'text/plain',
'.pjpeg': 'image/pjpeg',
'.tac': 'text/x-python',
}
)
# Users can override these mime-types by loading them out configuration
# files (this defaults to ['/etc/mime.types']).
for location in mimetype_locations:
if os.path.exists(location):
contentTypes.update(mimetypes.read_mime_types(location))
return contentTypes
def getTypeAndEncoding(filename, types, encodings, defaultType):
p, ext = os.path.splitext(filename)
ext = ext.lower()
if ext in encodings:
enc = encodings[ext]
ext = os.path.splitext(p)[1].lower()
else:
enc = None
type = types.get(ext, defaultType)
return type, enc
@implementer(inevow.IResource)
class File:
"""
File is a resource that represents a plain non-interpreted file
(although it can look for an extension like .rpy or .cgi and hand the
file to a processor for interpretation if you wish). Its constructor
takes a file path.
Alternatively, you can give a directory path to the constructor. In this
case the resource will represent that directory, and its children will
be files underneath that directory. This provides access to an entire
filesystem tree with a single Resource.
If you map the URL 'http://server/FILE' to a resource created as
File('/tmp'), then http://server/FILE/ will return an HTML-formatted
listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will
return the contents of /tmp/foo/bar.html .
"""
contentTypes = loadMimeTypes()
contentEncodings = {
".gz" : "application/x-gzip",
".bz2": "application/x-bzip2"
}
processors = {}
indexNames = ["index", "index.html", "index.htm", "index.trp", "index.rpy"]
type = None
def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0):
"""Create a file with the given path.
"""
self.fp = filepath.FilePath(path)
# Remove the dots from the path to split
self.defaultType = defaultType
if ignoredExts in (0, 1) or allowExt:
warnings.warn("ignoredExts should receive a list, not a boolean")
if ignoredExts or allowExt:
self.ignoredExts = ['*']
else:
self.ignoredExts = []
else:
self.ignoredExts = list(ignoredExts)
self.registry = registry or Registry()
self.children = {}
def ignoreExt(self, ext):
"""Ignore the given extension.
Serve file.ext if file is requested
"""
self.ignoredExts.append(ext)
def directoryListing(self):
return dirlist.DirectoryLister(self.fp.path,
self.listNames(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
def putChild(self, name, child):
self.children[name] = child
def locateChild(self, ctx, segments):
r = self.children.get(segments[0], None)
if r:
return r, segments[1:]
path=segments[0]
self.fp.restat()
if not self.fp.isdir():
return rend.NotFound
if path:
fpath = self.fp.child(path)
else:
fpath = self.fp.childSearchPreauth(*self.indexNames)
if fpath is None:
return self.directoryListing(), segments[1:]
if not fpath.exists():
fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
if fpath is None:
return rend.NotFound
# Don't run processors on directories - if someone wants their own
# customized directory rendering, subclass File instead.
if fpath.isfile():
if platformType == "win32":
# don't want .RPY to be different than .rpy, since that
# would allow source disclosure.
processor = InsensitiveDict(self.processors).get(fpath.splitext()[1])
else:
processor = self.processors.get(fpath.splitext()[1])
if processor:
return (
inevow.IResource(processor(fpath.path, self.registry)),
segments[1:])
return self.createSimilarFile(fpath.path), segments[1:]
# methods to allow subclasses to e.g. decrypt files on the fly:
def openForReading(self):
"""Open a file and return it."""
return self.fp.open()
def getFileSize(self):
"""Return file size."""
return self.fp.getsize()
def renderHTTP(self, ctx):
"""You know what you doing."""
self.fp.restat()
if self.type is None:
self.type, self.encoding = getTypeAndEncoding(self.fp.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.fp.exists():
return rend.FourOhFour()
request = inevow.IRequest(ctx)
if self.fp.isdir():
return self.redirect(request)
# fsize is the full file size
# size is the length of the part actually transmitted
fsize = size = self.getFileSize()
request.setHeader(b'accept-ranges', 'bytes')
if self.type:
request.setHeader(b'content-type', self.type)
if self.encoding:
request.setHeader(b'content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError as e:
import errno
if e[0] == errno.EACCES:
return ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.fp.getmtime()) is http.CACHED:
return ''
try:
range = request.getHeader('range')
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-')
if start:
f.seek(int(start))
if end:
end = int(end)
else:
end = fsize-1
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(b'content-range', "bytes %s-%i/%i" % (
start, end, fsize))
#content-length should be the actual size of the stuff we're
#sending, not the full size of the on-server entity.
size = 1 + end - int(start)
request.setHeader(b'content-length', str(size))
except:
traceback.print_exc(file=log.logfile)
if request.method == 'HEAD':
return ''
# return data
return f.read()
def redirect(self, request):
return redirectTo(addSlash(request), request)
def listNames(self):
if not self.fp.isdir():
return []
directory = self.fp.listdir()
directory.sort()
return directory
def createSimilarFile(self, path):
f = self.__class__(path, self.defaultType, self.ignoredExts, self.registry)
# refactoring by steps, here - constructor should almost certainly take these
f.processors = self.processors
f.indexNames = self.indexNames[:]
return f
"""I contain AsIsProcessor, which serves files 'As Is'
Inspired by Apache's mod_asis
"""
@implementer(inevow.IResource)
class ASISProcessor:
def __init__(self, path, registry=None):
self.path = path
self.registry = registry or Registry()
def renderHTTP(self, ctx):
request = inevow.IRequest(ctx)
request.startedWriting = 1
return File(self.path, registry=self.registry)
def locateChild(self, ctx, segments):
return appserver.NotFound
| StarcoderdataPython |
11235955 | # Check if a lepton is also 'loose'.
from Treemaker.Treemaker import cuts
numJets = 3
bMassMax = 50
def setup(variables, isData):
return variables
def createCuts(cutArray):
description = "Is this a loose lepton event (electron or muon)?"
cutArray["isLoose"] = cuts.Cut("Is Loose Lepton", description)
return cutArray
def analyze(event, variables, labels, isData, cutArray):
muonLoose = labels['jhuMuonPFlow']['muonisloose'].product()
electronLoose = labels['jhuElePFlow']['electronisloose'].product()
isLoose = False
# Check if we are a loose event.
if variables['isElectron'][0] > 0:
if electronLoose[0] > 0:
isLoose = True
elif variables['isMuon'][0] > 0:
if muonLoose[0] > 0:
isLoose = True
if isLoose:
cutArray["isLoose"].passed = 1
return variables, cutArray
def reset(variables):
return variables
| StarcoderdataPython |
1848277 | <reponame>vsevolodpohvalenko/home-assistant
"""Click-based interface for Songpal."""
import ast
import asyncio
import json
import logging
import sys
from functools import update_wrapper
import click
from songpal import Device, SongpalException
from songpal.common import ProtocolType
from songpal.containers import Setting
from songpal.discovery import Discover
from songpal.group import GroupControl
class OnOffBoolParamType(click.ParamType):
name = "boolean"
def convert(self, value, param, ctx):
if value == "on":
return True
elif value == "off":
return False
else:
return click.BOOL.convert(value, param, ctx)
ONOFF_BOOL = OnOffBoolParamType()
def err(msg):
"""Pretty-print an error."""
click.echo(click.style(msg, fg="red", bold=True))
def coro(f):
"""Run a coroutine and handle possible errors for the click cli.
Source https://github.com/pallets/click/issues/85#issuecomment-43378930
"""
f = asyncio.coroutine(f)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
try:
return loop.run_until_complete(f(*args, **kwargs))
except KeyboardInterrupt:
click.echo("Got CTRL+C, quitting..")
dev = args[0]
loop.run_until_complete(dev.stop_listen_notifications())
except SongpalException as ex:
err("Error: %s" % ex)
if len(args) > 0 and hasattr(args[0], "debug"):
if args[0].debug > 0:
raise ex
return update_wrapper(wrapper, f)
async def traverse_settings(dev, module, settings, depth=0):
"""Print all available settings."""
for setting in settings:
if setting.is_directory:
print("%s%s (%s)" % (depth * " ", setting.title, module))
return await traverse_settings(dev, module, setting.settings, depth + 2)
else:
try:
print_settings([await setting.get_value(dev)], depth=depth)
except SongpalException as ex:
err("Unable to read setting %s: %s" % (setting, ex))
continue
def print_settings(settings, depth=0):
"""Print all available settings of the device."""
# handle the case where a single setting is passed
if isinstance(settings, Setting):
settings = [settings]
for setting in settings:
cur = setting.currentValue
print(
"%s* %s (%s, value: %s, type: %s)"
% (
" " * depth,
setting.title,
setting.target,
click.style(cur, bold=True),
setting.type,
)
)
for opt in setting.candidate:
if not opt.isAvailable:
logging.debug("Unavailable setting %s", opt)
continue
click.echo(
click.style(
"%s - %s (%s)" % (" " * depth, opt.title, opt.value),
bold=opt.value == cur,
)
)
pass_dev = click.make_pass_decorator(Device)
@click.group(invoke_without_command=False)
@click.option("--endpoint", envvar="SONGPAL_ENDPOINT", required=False)
@click.option("-d", "--debug", default=False, count=True)
@click.option("--post", is_flag=True, required=False)
@click.option("--websocket", is_flag=True, required=False)
@click.pass_context
@click.version_option()
@coro
async def cli(ctx, endpoint, debug, websocket, post):
"""Songpal CLI."""
lvl = logging.INFO
if debug:
lvl = logging.DEBUG
click.echo("Setting debug level to %s" % debug)
logging.basicConfig(level=lvl)
if ctx.invoked_subcommand == "discover":
ctx.obj = {"debug": debug}
return
if endpoint is None:
err("Endpoint is required except when with 'discover'!")
return
protocol = None
if post and websocket:
err("You can force either --post or --websocket")
return
elif websocket:
protocol = ProtocolType.WebSocket
elif post:
protocol = ProtocolType.XHRPost
logging.debug("Using endpoint %s", endpoint)
x = Device(endpoint, force_protocol=protocol, debug=debug)
try:
await x.get_supported_methods()
except SongpalException as ex:
err("Unable to get supported methods: %s" % ex)
sys.exit(-1)
ctx.obj = x
# this causes RuntimeError: This event loop is already running
# if ctx.invoked_subcommand is None:
# ctx.invoke(status)
@cli.command()
@pass_dev
@coro
async def status(dev: Device):
"""Display status information."""
power = await dev.get_power()
click.echo(click.style("%s" % power, bold=bool(power)))
vol = await dev.get_volume_information()
click.echo(vol.pop())
play_info = await dev.get_play_info()
if not play_info.is_idle:
click.echo("Playing %s" % play_info)
else:
click.echo("Not playing any media")
outs = await dev.get_inputs()
for out in outs:
if out.active:
click.echo("Active output: %s" % out)
sysinfo = await dev.get_system_info()
click.echo("System information: %s" % sysinfo)
@cli.command()
@coro
@click.pass_context
async def discover(ctx):
"""Discover supported devices."""
TIMEOUT = 5
async def print_discovered(dev):
pretty_name = "%s - %s" % (dev.name, dev.model_number)
click.echo(click.style("\nFound %s" % pretty_name, bold=True))
click.echo("* API version: %s" % dev.version)
click.echo("* Endpoint: %s" % dev.endpoint)
click.echo(" Services:")
for serv in dev.services:
click.echo(" - Service: %s" % serv)
click.echo("\n[UPnP]")
click.echo("* URL: %s" % dev.upnp_location)
click.echo("* UDN: %s" % dev.udn)
click.echo(" Services:")
for serv in dev.upnp_services:
click.echo(" - Service: %s" % serv)
click.echo("Discovering for %s seconds" % TIMEOUT)
await Discover.discover(TIMEOUT, ctx.obj["debug"] or 0, callback=print_discovered)
@cli.command()
@click.argument("cmd", required=False)
@click.argument("target", required=False)
@click.argument("value", required=False)
@pass_dev
@coro
async def power(dev: Device, cmd, target, value):
"""Turn on and off, control power settings.
Accepts commands 'on', 'off', and 'settings'.
"""
async def try_turn(cmd):
state = True if cmd == "on" else False
try:
return await dev.set_power(state)
except SongpalException as ex:
if ex.code == 3:
err("The device is already %s." % cmd)
else:
raise ex
if cmd == "on" or cmd == "off":
click.echo(await try_turn(cmd))
elif cmd == "settings":
settings = await dev.get_power_settings()
print_settings(settings)
elif cmd == "set" and target and value:
click.echo(await dev.set_power_settings(target, value))
else:
power = await dev.get_power()
click.echo(click.style(str(power), bold=bool(power)))
@cli.command()
@click.option("--output", type=str, required=False)
@click.argument("input", required=False)
@pass_dev
@coro
async def input(dev: Device, input, output):
"""Get and change outputs."""
inputs = await dev.get_inputs()
if input:
click.echo("Activating %s" % input)
try:
input = next((x for x in inputs if x.title == input))
except StopIteration:
click.echo("Unable to find input %s" % input)
return
zone = None
if output:
zone = await dev.get_zone(output)
if zone.uri not in input.outputs:
click.echo("Input %s not valid for zone %s" % (input.title, output))
return
await input.activate(zone)
else:
click.echo("Inputs:")
for input in inputs:
act = False
if input.active:
act = True
click.echo(" * " + click.style(str(input), bold=act))
for out in input.outputs:
click.echo(" - %s" % out)
@cli.command()
@click.argument("zone", required=False)
@click.argument("activate", required=False, type=ONOFF_BOOL)
@pass_dev
@coro
async def zone(dev: Device, zone, activate):
"""Get and change outputs."""
if zone:
zone = await dev.get_zone(zone)
click.echo("%s %s" % ("Activating" if activate else "Deactivating", zone))
await zone.activate(activate)
else:
click.echo("Zones:")
for zone in await dev.get_zones():
act = False
if zone.active:
act = True
click.echo(" * " + click.style(str(zone), bold=act))
@cli.command()
@click.argument("target", required=False)
@click.argument("value", required=False)
@pass_dev
@coro
async def googlecast(dev: Device, target, value):
"""Return Googlecast settings."""
if target and value:
click.echo("Setting %s = %s" % (target, value))
await dev.set_googlecast_settings(target, value)
print_settings(await dev.get_googlecast_settings())
@cli.command()
@click.argument("scheme", required=False)
@pass_dev
@coro
async def source(dev: Device, scheme: str):
"""List available sources.
If no `scheme` is given, will list sources for all sc hemes.
"""
if scheme is None:
all_schemes = await dev.get_schemes()
schemes = [str(scheme.scheme) for scheme in all_schemes]
else:
schemes = [scheme]
for schema in schemes:
try:
sources = await dev.get_source_list(schema)
except SongpalException as ex:
click.echo("Unable to get sources for %s: %s" % (schema, ex))
continue
for src in sources:
click.echo(src)
if src.isBrowsable:
try:
count = await dev.get_content_count(src.source)
if count.count > 0:
click.echo(" %s" % count)
for content in await dev.get_contents(src.source):
click.echo(" %s\n\t%s" % (content.title, content.uri))
else:
click.echo(" No content to list.")
except SongpalException as ex:
click.echo(" %s" % ex)
@cli.command()
@click.option("--output", type=str, required=False)
@click.argument("volume", required=False)
@pass_dev
@coro
async def volume(dev: Device, volume, output):
"""Get and set the volume settings.
Passing 'mute' as new volume will mute the volume,
'unmute' removes it.
"""
vol = None
vol_controls = await dev.get_volume_information()
if output is not None:
click.echo("Using output: %s" % output)
output_uri = (await dev.get_zone(output)).uri
for v in vol_controls:
if v.output == output_uri:
vol = v
break
else:
vol = vol_controls[0]
if vol is None:
err("Unable to find volume controller: %s" % output)
return
if volume and volume == "mute":
click.echo("Muting")
await vol.set_mute(True)
elif volume and volume == "unmute":
click.echo("Unmuting")
await vol.set_mute(False)
elif volume:
click.echo("Setting volume to %s" % volume)
await vol.set_volume(volume)
if output is not None:
click.echo(vol)
else:
for ctl in vol_controls:
click.echo(ctl)
@cli.command()
@pass_dev
@coro
async def schemes(dev: Device):
"""Print supported uri schemes."""
schemes = await dev.get_schemes()
for scheme in schemes:
click.echo(scheme)
@cli.command()
@click.option("--internet", is_flag=True, default=True)
@click.option("--update", is_flag=True, default=False)
@pass_dev
@coro
async def check_update(dev: Device, internet: bool, update: bool):
"""Print out update information."""
if internet:
print("Checking updates from network")
else:
print("Not checking updates from internet")
update_info = await dev.get_update_info(from_network=internet)
if not update_info.isUpdatable:
click.echo("No updates available.")
return
if not update:
click.echo("Update available: %s" % update_info)
click.echo("Use --update to activate update!")
else:
click.echo("Activating update, please be seated.")
res = await dev.activate_system_update()
click.echo("Update result: %s" % res)
@cli.command()
@click.argument("target", required=False)
@click.argument("value", required=False)
@pass_dev
@coro
async def bluetooth(dev: Device, target, value):
"""Get or set bluetooth settings."""
if target and value:
await dev.set_bluetooth_settings(target, value)
print_settings(await dev.get_bluetooth_settings())
@cli.command()
@pass_dev
@coro
async def sysinfo(dev: Device):
"""Print out system information (version, MAC addrs)."""
click.echo(await dev.get_system_info())
click.echo(await dev.get_interface_information())
@cli.command()
@pass_dev
@coro
async def misc(dev: Device):
"""Print miscellaneous settings."""
print_settings(await dev.get_misc_settings())
@cli.command()
@pass_dev
@coro
async def settings(dev: Device):
"""Print out all possible settings."""
settings_tree = await dev.get_settings()
for module in settings_tree:
await traverse_settings(dev, module.usage, module.settings)
@cli.command()
@pass_dev
@coro
async def storage(dev: Device):
"""Print storage information."""
storages = await dev.get_storage_list()
for storage in storages:
click.echo(storage)
@cli.command()
@click.argument("target", required=False)
@click.argument("value", required=False)
@pass_dev
@coro
async def sound(dev: Device, target, value):
"""Get or set sound settings."""
if target and value:
click.echo("Setting %s to %s" % (target, value))
click.echo(await dev.set_sound_settings(target, value))
print_settings(await dev.get_sound_settings())
@cli.command()
@pass_dev
@click.argument("soundfield", required=False)
@coro
async def soundfield(dev: Device, soundfield: str):
"""Get or set sound field."""
if soundfield is not None:
await dev.set_sound_settings("soundField", soundfield)
soundfields = await dev.get_sound_settings("soundField")
print_settings(soundfields)
@cli.command()
@pass_dev
@coro
async def eq(dev: Device):
"""Return EQ information."""
click.echo(await dev.get_custom_eq())
@cli.command()
@click.argument("cmd", required=False)
@click.argument("target", required=False)
@click.argument("value", required=False)
@pass_dev
@coro
async def playback(dev: Device, cmd, target, value):
"""Get and set playback settings, e.g. repeat and shuffle.."""
if target and value:
dev.set_playback_settings(target, value)
if cmd == "support":
click.echo("Supported playback functions:")
supported = await dev.get_supported_playback_functions("storage:usb1")
for i in supported:
print(i)
elif cmd == "settings":
print_settings(await dev.get_playback_settings())
# click.echo("Playback functions:")
# funcs = await dev.get_available_playback_functions()
# print(funcs)
else:
click.echo("Currently playing: %s" % await dev.get_play_info())
@cli.command()
@click.argument("target", required=False)
@click.argument("value", required=False)
@pass_dev
@coro
async def speaker(dev: Device, target, value):
"""Get and set external speaker settings."""
if target and value:
click.echo("Setting %s to %s" % (target, value))
await dev.set_speaker_settings(target, value)
print_settings(await dev.get_speaker_settings())
@cli.command()
@click.argument("notification", required=False)
@click.option("--listen-all", is_flag=True)
@pass_dev
@coro
async def notifications(dev: Device, notification: str, listen_all: bool):
"""List available notifications and listen to them.
Using --listen-all [notification] allows to listen to all notifications
from the given subsystem.
If the subsystem is omited, notifications from all subsystems are
requested.
"""
notifications = await dev.get_notifications()
async def handle_notification(x):
click.echo("got notification: %s" % x)
if listen_all:
if notification is not None:
await dev.services[notification].listen_all_notifications(
handle_notification
)
else:
click.echo("Listening to all possible notifications")
await dev.listen_notifications(fallback_callback=handle_notification)
elif notification:
click.echo("Subscribing to notification %s" % notification)
for notif in notifications:
if notif.name == notification:
await notif.activate(handle_notification)
click.echo("Unable to find notification %s" % notification)
else:
click.echo(click.style("Available notifications", bold=True))
for notif in notifications:
click.echo("* %s" % notif)
@cli.command()
@pass_dev
@coro
async def sleep(dev: Device):
"""Return sleep settings."""
click.echo(await dev.get_sleep_timer_settings())
@cli.command()
@pass_dev
def list_all(dev: Device):
"""List all available API calls."""
for name, service in dev.services.items():
click.echo(click.style("\nService %s" % name, bold=True))
for method in service.methods:
click.echo(" %s" % method.name)
@cli.command()
@click.argument("service", required=True)
@click.argument("method")
@click.argument("parameters", required=False, default=None)
@pass_dev
@coro
async def command(dev, service, method, parameters):
"""Run a raw command."""
params = None
if parameters is not None:
params = ast.literal_eval(parameters)
click.echo("Calling %s.%s with params %s" % (service, method, params))
res = await dev.raw_command(service, method, params)
click.echo(res)
@cli.command()
@click.argument("file", type=click.File("w"), required=False)
@pass_dev
@coro
async def dump_devinfo(dev: Device, file):
"""Dump developer information.
Pass `file` to write the results directly into a file.
"""
import attr
methods = await dev.get_supported_methods()
res = {
"supported_methods": {k: v.asdict() for k, v in methods.items()},
"settings": [attr.asdict(x) for x in await dev.get_settings()],
"sysinfo": attr.asdict(await dev.get_system_info()),
"interface_info": attr.asdict(await dev.get_interface_information()),
}
if file:
click.echo("Saving to file: %s" % file.name)
json.dump(res, file, sort_keys=True, indent=4)
else:
click.echo(json.dumps(res, sort_keys=True, indent=4))
pass_groupctl = click.make_pass_decorator(GroupControl)
@cli.group()
@click.pass_context
@click.option("--url", required=True)
@coro
async def group(ctx, url):
gc = GroupControl(url)
await gc.connect()
ctx.obj = gc
@group.command()
@pass_groupctl
@coro
async def info(gc: GroupControl):
"""Control information."""
click.echo(await gc.info())
@group.command()
@pass_groupctl
@coro
async def state(gc: GroupControl):
"""Current group state."""
state = await gc.state()
click.echo(state)
click.echo("Full state info: %s" % repr(state))
@group.command()
@pass_groupctl
@coro
async def codec(gc: GroupControl):
"""Codec settings."""
codec = await gc.get_codec()
click.echo("Codec: %s" % codec)
@group.command()
@pass_groupctl
@coro
async def memory(gc: GroupControl):
"""Group memory."""
mem = await gc.get_group_memory()
click.echo("Memory: %s" % mem)
@group.command()
@click.argument("name")
@click.argument("slaves", nargs=-1, required=True)
@pass_groupctl
@coro
async def create(gc: GroupControl, name, slaves):
"""Create new group"""
click.echo("Creating group %s with slaves: %s" % (name, slaves))
click.echo(await gc.create(name, slaves))
@group.command()
@pass_groupctl
@coro
async def abort(gc: GroupControl):
"""Abort existing group."""
click.echo("Aborting current group..")
click.echo(await gc.abort())
@group.command()
@pass_groupctl
@click.argument("slaves", nargs=-1, required=True)
@coro
async def add(gc: GroupControl, slaves):
"""Add speakers to group."""
click.echo("Adding to existing group: %s" % slaves)
click.echo(await gc.add(slaves))
@group.command()
@pass_groupctl
@click.argument("slaves", nargs=-1, required=True)
async def remove(gc: GroupControl, slaves):
"""Remove speakers from group."""
click.echo("Removing from existing group: %s" % slaves)
click.echo(await gc.remove(slaves))
@group.command() # type: ignore # noqa: F811
@pass_groupctl
@click.argument("volume", type=int)
async def volume(gc: GroupControl, volume): # noqa: F811
"""Adjust volume [-100, 100]"""
click.echo("Setting volume to %s" % volume)
click.echo(await gc.set_group_volume(volume))
@group.command()
@pass_groupctl
@click.argument("mute", type=bool)
async def mute(gc: GroupControl, mute):
"""(Un)mute group."""
click.echo("Muting group: %s" % mute)
click.echo(await gc.set_mute(mute))
@group.command()
@pass_groupctl
async def play(gc: GroupControl):
"""Play?"""
click.echo("Sending play command: %s" % await gc.play())
@group.command()
@pass_groupctl
async def stop(gc: GroupControl):
"""Stop playing?"""
click.echo("Sending stop command: %s" % await gc.stop())
if __name__ == "__main__":
cli()
| StarcoderdataPython |
149476 | from electionguard_tools.scripts import sample_generator
from electionguard_tools.scripts.sample_generator import (
DEFAULT_NUMBER_OF_BALLOTS,
DEFAULT_SPOIL_RATE,
DEFAULT_USE_ALL_GUARDIANS,
DEFAULT_USE_PRIVATE_DATA,
ElectionSampleDataGenerator,
)
__all__ = [
"DEFAULT_NUMBER_OF_BALLOTS",
"DEFAULT_SPOIL_RATE",
"DEFAULT_USE_ALL_GUARDIANS",
"DEFAULT_USE_PRIVATE_DATA",
"ElectionSampleDataGenerator",
"sample_generator",
]
| StarcoderdataPython |
1876818 | from ..base.base_connector import BaseConnector
from stix2matcher.matcher import Pattern
from stix2matcher.matcher import MatchListener
from stix2validator import validate_instance
import json, requests
class Connector(BaseConnector):
def __init__(self, connection, configuration):
self.is_async = False
self.connection = connection
self.configuration = configuration
self.results_connector = self
self.query_connector = self
self.ping_connector = self
#We re-implement this method so we can fetch all the "bindings", as their method only
#returns the first for some reason
def match(self, pattern, observed_data_sdos, verbose=False):
compiled_pattern = Pattern(pattern)
matcher = MatchListener(observed_data_sdos, verbose)
compiled_pattern.walk(matcher)
found_bindings = matcher.matched()
if found_bindings:
matching_sdos = []
for binding in found_bindings:
matching_sdos = matching_sdos + matcher.get_sdos_from_binding(binding)
else:
matching_sdos = []
return matching_sdos
def ping(self):
return {"success":True}
def create_query_connection(self, query):
return { "success": True, "search_id": query }
def create_results_connection(self, search_id, offset, length):
#search_id is the pattern
observations = []
if "http_user" in self.configuration:
response = requests.get(self.configuration["bundle_url"],auth=(self.configuration["http_user"], self.configuration["http_password"]))
else:
response = requests.get(self.configuration["bundle_url"])
if response.status_code != 200:
response.raise_for_status()
bundle = response.json()
if "validate" in self.configuration and self.configuration["validate"] is True:
results = validate_instance(bundle)
if results.is_valid is not True:
return { "success":False, "message":"Invalid STIX recieved: " + json.dumps(results) }
for obj in bundle["objects"]:
if obj["type"] == "observed-data":
observations.append( obj )
#Pattern match
results = self.match(search_id, observations, False)
return results[ int(offset):int(offset + length) ]
| StarcoderdataPython |
29563 | <filename>handler.py<gh_stars>1-10
import torch
import os
import logging
import json
from abc import ABC
from ts.torch_handler.base_handler import BaseHandler
from transformers import T5Tokenizer, T5ForConditionalGeneration
logger = logging.getLogger(__name__)
class TransformersSeqGeneration(BaseHandler, ABC):
_LANG_MAP = {
"es": "Spanish",
"fr": "French",
"de": "German",
"en": "English",
}
def __init__(self):
super().__init__()
self.initialized = False
def initialize(self, ctx):
self.manifest = ctx.manifest
properties = ctx.system_properties
model_dir = properties.get("model_dir")
serialized_file = self.manifest["model"]["serializedFile"]
model_pt_path = os.path.join(model_dir, serialized_file)
self.device = torch.device(
"cuda:" + str(properties.get("gpu_id"))
if torch.cuda.is_available()
else "cpu"
)
# read configs for the mode, model_name, etc. from setup_config.json
setup_config_path = os.path.join(model_dir, "setup_config.json")
if os.path.isfile(setup_config_path):
with open(setup_config_path) as setup_config_file:
self.setup_config = json.load(setup_config_file)
else:
logger.warning("Missing the setup_config.json file.")
# Loading the model and tokenizer from checkpoint and config files based on the user's choice of mode
# further setup config can be added.
self.tokenizer = T5Tokenizer.from_pretrained(model_dir)
if self.setup_config["save_mode"] == "torchscript":
self.model = torch.jit.load(model_pt_path)
elif self.setup_config["save_mode"] == "pretrained":
self.model = T5ForConditionalGeneration.from_pretrained(model_dir)
else:
logger.warning("Missing the checkpoint or state_dict.")
self.model.to(self.device)
self.model.eval()
logger.info("Transformer model from path %s loaded successfully", model_dir)
self.initialized = True
def preprocess(self, requests):
input_batch = None
texts_batch = []
for idx, data in enumerate(requests):
data = data["body"]
input_text = data["text"]
src_lang = data["from"]
tgt_lang = data["to"]
if isinstance(input_text, (bytes, bytearray)):
input_text = input_text.decode("utf-8")
src_lang = src_lang.decode("utf-8")
tgt_lang = tgt_lang.decode("utf-8")
texts_batch.append(f"translate {self._LANG_MAP[src_lang]} to {self._LANG_MAP[tgt_lang]}: {input_text}")
inputs = self.tokenizer(texts_batch, return_tensors="pt")
input_batch = inputs["input_ids"].to(self.device)
return input_batch
def inference(self, input_batch):
generations = self.model.generate(input_batch)
generations = self.tokenizer.batch_decode(generations, skip_special_tokens=True)
return generations
def postprocess(self, inference_output):
return [{"text": text} for text in inference_output]
| StarcoderdataPython |
291253 | <filename>deps/lib/python3.5/site-packages/openzwave/node.py
# -*- coding: utf-8 -*-
"""
.. module:: openzwave.node
This file is part of **python-openzwave** project https://github.com/OpenZWave/python-openzwave.
:platform: Unix, Windows, MacOS X
:sinopsis: openzwave API
.. moduleauthor: bibi21000 aka <NAME> <<EMAIL>>
License : GPL(v3)
**python-openzwave** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**python-openzwave** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-openzwave. If not, see http://www.gnu.org/licenses.
"""
import sys
from openzwave.object import ZWaveObject
from openzwave.group import ZWaveGroup
from openzwave.value import ZWaveValue
from openzwave.command import ZWaveNodeBasic, ZWaveNodeSwitch
from openzwave.command import ZWaveNodeSensor, ZWaveNodeThermostat
from openzwave.command import ZWaveNodeSecurity, ZWaveNodeDoorLock
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
class ZWaveNode(ZWaveObject,
ZWaveNodeBasic, ZWaveNodeSwitch,
ZWaveNodeSensor, ZWaveNodeThermostat,
ZWaveNodeSecurity, ZWaveNodeDoorLock):
"""
Represents a single Node within the Z-Wave Network.
"""
_isReady = False
def __init__(self, node_id, network):
"""
Initialize zwave node
:param node_id: ID of the node
:type node_id: int
:param network: The network object to access the manager
:type network: ZWaveNetwork
"""
logger.debug("Create object node (node_id:%s)", node_id)
ZWaveObject.__init__(self, node_id, network)
#No cache management for values in nodes
self.values = dict()
self._is_locked = False
self._isReady = False
def __str__(self):
"""
The string representation of the node.
:rtype: str
"""
try:
return u'home_id: [%s] id: [%s] name: [%s] model: [%s]' % \
(self._network.home_id_str, self._object_id, self.name, self.product_name)
except UnicodeDecodeError:
return u'home_id: [%s] id: [%s] name: [%s] model: [%s]' % \
(self._network.home_id_str, self._object_id, self.name.decode('utf-8', 'ignore'), self.product_name.decode('utf-8', 'ignore'))
@property
def node_id(self):
"""
The id of the node.
:rtype: int
"""
return self._object_id
@property
def name(self):
"""
The name of the node.
:rtype: str
"""
return self._network.manager.getNodeName(self.home_id, self.object_id)
@name.setter
def name(self, value):
"""
Set the name of the node.
:param value: The new name of the node
:type value: str
"""
self._network.manager.setNodeName(self.home_id, self.object_id, value)
@property
def location(self):
"""
The location of the node.
:rtype: str
"""
return self._network.manager.getNodeLocation(self.home_id, self.object_id)
@location.setter
def location(self, value):
"""
Set the location of the node.
:param value: The new location of the node
:type value: str
"""
self._network.manager.setNodeLocation(self.home_id, self.object_id, value)
@property
def product_name(self):
"""
The product name of the node.
:rtype: str
"""
return self._network.manager.getNodeProductName(self.home_id, self.object_id)
@product_name.setter
def product_name(self, value):
"""
Set the product name of the node.
:param value: The new name of the product
:type value: str
"""
self._network.manager.setNodeProductName(self.home_id, self.object_id, value)
@property
def product_type(self):
"""
The product type of the node.
:rtype: str
"""
return self._network.manager.getNodeProductType(self.home_id, self.object_id)
@property
def product_id(self):
"""
The product Id of the node.
:rtype: str
"""
return self._network.manager.getNodeProductId(self.home_id, self.object_id)
@property
def device_type(self):
"""
The device_type of the node.
:rtype: str
"""
return self._network.manager.getNodeDeviceTypeString(self.home_id, self.object_id)
@property
def role(self):
"""
The role of the node.
:rtype: str
"""
return self._network.manager.getNodeRoleString(self.home_id, self.object_id)
def to_dict(self, extras=['all']):
"""
Return a dict representation of the node.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
if 'all' in extras:
extras = ['kvals', 'capabilities', 'neighbors', 'groups', 'values']
ret={}
ret['name'] = self.name
ret['location'] = self.location
ret['product_type'] = self.product_type
ret['product_name'] = self.product_name
ret['node_id'] = self.node_id
if 'values' in extras :
ret['values'] = self.values_to_dict(extras=extras)
if 'groups' in extras :
ret['groups'] = self.groups_to_dict(extras=extras)
if 'neighbors' in extras :
ret['neighbors'] = dict.fromkeys(self.neighbors, 0)
if 'capabilities' in extras :
ret['capabilities'] = dict.fromkeys(self.capabilities, 0)
if 'kvals' in extras and self.network.dbcon is not None:
vals = self.kvals
for key in vals.keys():
ret[key]=vals[key]
return ret
@property
def capabilities(self):
"""
The capabilities of the node.
:rtype: set()
"""
caps = set()
if self.is_routing_device:
caps.add('routing')
if self.is_listening_device:
caps.add('listening')
if self.is_frequent_listening_device:
caps.add('frequent')
if self.is_security_device:
caps.add('security')
if self.is_beaming_device:
caps.add('beaming')
if self.node_id == self._network.controller.node_id:
for cap in self._network.controller.capabilities:
caps.add(cap)
return caps
@property
def neighbors(self):
"""
The neighbors of the node.
:rtype: set()
"""
return self._network.manager.getNodeNeighbors(self.home_id, self.object_id)
@property
def num_groups(self):
"""
Gets the number of association groups reported by this node.
:rtype: int
"""
return self._network.manager.getNumGroups(self.home_id, self.object_id)
def get_max_associations(self, groupidx):
"""
Gets the maximum number of associations for a group.
:param groupidx: The group to query
:type groupidx: int
:rtype: int
"""
return self._network.manager.getMaxAssociations(self.home_id, self.node_id, groupidx)
@property
def groups(self):
"""
Get the association groups reported by this node
In Z-Wave, groups are numbered starting from one. For example, if a call to
GetNumGroups returns 4, the _groupIdx value to use in calls to GetAssociations
AddAssociation and RemoveAssociation will be a number between 1 and 4.
:rtype: dict()
"""
groups = dict()
groups_added = 0
i = 1
while groups_added < self.num_groups and i<256:
if self.get_max_associations(i) > 0:
groups[i] = ZWaveGroup(i, network=self._network, node_id=self.node_id)
groups_added += 1
i += 1
return groups
def groups_to_dict(self, extras=['all']):
"""
Return a dict representation of the groups.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
groups = self.groups
ret={}
for gid in groups.keys():
ret[gid] = groups[gid].to_dict(extras=extras)
return ret
@property
def command_classes(self):
"""
The commandClasses of the node.
:rtype: set()
"""
command_classes = set()
for cls in self._network.manager.COMMAND_CLASS_DESC:
if self._network.manager.getNodeClassInformation(self.home_id, self.object_id, cls):
command_classes.add(cls)
return command_classes
@property
def command_classes_as_string(self):
"""
Return the command classes of the node as string.
:rtype: set()
"""
commands = self.command_classes
command_str = set()
for cls in commands:
command_str.add(self._network.manager.COMMAND_CLASS_DESC[cls])
return command_str
def get_command_class_as_string(self, class_id):
"""
Return the command class representation as string.
:param class_id: the COMMAND_CLASS to get string representation
:type class_id: hexadecimal code
:rtype: str
"""
return self._network.manager.COMMAND_CLASS_DESC[class_id]
def get_command_class_genres(self):
"""
Return the list of genres of command classes
:rtype: set()
"""
return ['User', 'Basic', 'Config', 'System']
def get_values_by_command_classes(self, genre='All', \
type='All', readonly='All', writeonly='All'):
"""
Retrieve values in a dict() of dicts(). The dict is indexed on the COMMAND_CLASS.
This allows to browse values grouped by the COMMAND_CLASS.You can optionnaly filter for a command class,
a genre and/or a type. You can also filter readonly and writeonly params.
This method always filter the values.
If you wan't to get all the node's values, use the property self.values instead.
:param genre: the genre of value
:type genre: 'All' or PyGenres
:param type: the type of value
:type type: 'All' or PyValueTypes
:param readonly: Is this value readonly
:type readonly: 'All' or True or False
:param writeonly: Is this value writeonly
:type writeonly: 'All' or True or False
:rtype: dict(command_class : dict(valueids))
"""
values = dict()
for value in self.values:
if (genre == 'All' or self.values[value].genre == genre) and \
(type == 'All' or self.values[value].type == type) and \
(readonly == 'All' or self.values[value].is_read_only == readonly) and \
(writeonly == 'All' or self.values[value].is_write_only == writeonly):
if self.values[value].command_class not in values:
values[self.values[value].command_class] = dict()
values[self.values[value].command_class][value] = self.values[value]
return values
def get_values_for_command_class(self, class_id):
"""
Retrieve the set of values for a command class.
Deprecated
For backward compatibility only.
Use get_values instead
:param class_id: the COMMAND_CLASS to get values
:type class_id: hexadecimal code or string
:type writeonly: 'All' or True or False
:rtype: set() of classId
"""
#print class_id
return self.get_values(class_id=class_id)
def get_values(self, class_id='All', genre='All', type='All', \
readonly='All', writeonly='All', index='All', label='All'):
"""
Retrieve the set of values. You can optionnaly filter for a command class,
a genre and/or a type. You can also filter readonly and writeonly params.
This method always filter the values.
If you wan't to get all the node's values, use self.values instead.
:param class_id: the COMMAND_CLASS to get values
:type class_id: hexadecimal code or string
:param genre: the genre of value
:type genre: 'All' or PyGenres
:param type: the type of value
:type type: 'All' or PyValueTypes
:param readonly: Is this value readonly
:type readonly: 'All' or True or False
:param writeonly: Is this value writeonly
:type writeonly: 'All' or True or False
:param index: Index of value within all the values
:type index: int
:param label: Label of the value as set by openzwave
:type label: str
:rtype: set() of Values
"""
ret = dict()
valkeys = self.values.keys()
for value in valkeys:
if (class_id == 'All' or self.values[value].command_class == class_id) and \
(genre == 'All' or self.values[value].genre == genre) and \
(type == 'All' or self.values[value].type == type) and \
(readonly == 'All' or self.values[value].is_read_only == readonly) and \
(writeonly == 'All' or self.values[value].is_write_only == writeonly) and \
(index == 'All' or self.values[value].index == index) and \
(label == 'All' or self.values[value].label == label):
ret[value] = self.values[value]
return ret
def values_to_dict(self, extras=['all']):
"""
Return a dict representation of the values.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret={}
for vid in self.values.keys():
ret[vid] = self.values[vid].to_dict(extras=extras)
return ret
def add_value(self, value_id):
"""
Add a value to the node
:param value_id: The id of the value to add
:type value_id: int
:param command_class: The command_class of the value
:type command_class: str
:rtype: bool
"""
value = ZWaveValue(value_id, network=self.network, parent=self)
self.values[value_id] = value
def change_value(self, value_id):
"""
Change a value of the node.
Not implemented
:param value_id: The id of the value to change
:type value_id: int
"""
pass
def refresh_value(self, value_id):
"""
Refresh a value of the node.
Not implemented
:param value_id: The id of the value to change
:type value_id: int
"""
return self._network.manager.refreshValue(value_id)
def remove_value(self, value_id):
"""
Change a value of the node. Todo
:param value_id: The id of the value to change
:type value_id: int
:return: The result of the operation
:rtype: bool
"""
if value_id in self.values:
logger.debug("Remove value : %s", self.values[value_id])
del self.values[value_id]
return True
return False
def set_field(self, field, value):
"""
A helper to set a writable field : name, location, product_name, ...
:param field: The field to set : name, location, product_name, manufacturer_name
:type field: str
:param value: The value to set
:type value: str
:rtype: bool
"""
if field == "name":
self.name = value
elif field == "location":
self.location = value
elif field == "product_name":
self.product_name = value
elif field == "manufacturer_name":
self.manufacturer_name = value
def has_command_class(self, class_id):
"""
Check that this node use this commandClass.
:param classId: the COMMAND_CLASS to check
:type classId: hexadecimal code
:rtype: bool
"""
return class_id in self.command_classes
@property
def manufacturer_id(self):
"""
The manufacturer id of the node.
:rtype: str
"""
return self._network.manager.getNodeManufacturerId(self.home_id, self.object_id)
@property
def manufacturer_name(self):
"""
The manufacturer name of the node.
:rtype: str
"""
return self._network.manager.getNodeManufacturerName(self.home_id, self.object_id)
@manufacturer_name.setter
def manufacturer_name(self, value):
"""
Set the manufacturer name of the node.
:param value: The new manufacturer name of the node
:type value: str
"""
self._network.manager.setNodeManufacturerName(self.home_id, self.object_id, value)
@property
def generic(self):
"""
The generic type of the node.
:rtype: int
"""
return self._network.manager.getNodeGeneric(self.home_id, self.object_id)
@property
def basic(self):
"""
The basic type of the node.
:rtype: int
"""
return self._network.manager.getNodeBasic(self.home_id, self.object_id)
@property
def specific(self):
"""
The specific type of the node.
:return: The specific type of the node
:rtype: int
"""
return self._network.manager.getNodeSpecific(self.home_id, self.object_id)
@property
def security(self):
"""
The security type of the node.
:return: The security type of the node
:rtype: int
"""
return self._network.manager.getNodeSecurity(self.home_id, self.object_id)
@property
def version(self):
"""
The version of the node.
:return: The version of the node
:rtype: int
"""
return self._network.manager.getNodeVersion(self.home_id, self.object_id)
@property
def is_listening_device(self):
"""
Is this node a listening device.
:rtype: bool
"""
return self._network.manager.isNodeListeningDevice(self.home_id, self.object_id)
@property
def is_beaming_device(self):
"""
Is this node a beaming device.
:rtype: bool
"""
return self._network.manager.isNodeBeamingDevice(self.home_id, self.object_id)
@property
def is_frequent_listening_device(self):
"""
Is this node a frequent listening device.
:rtype: bool
"""
return self._network.manager.isNodeFrequentListeningDevice(self.home_id, self.object_id)
@property
def is_security_device(self):
"""
Is this node a security device.
:rtype: bool
"""
return self._network.manager.isNodeSecurityDevice(self.home_id, self.object_id)
@property
def is_routing_device(self):
"""
Is this node a routing device.
:rtype: bool
"""
return self._network.manager.isNodeRoutingDevice(self.home_id, self.object_id)
@property
def is_zwave_plus(self):
"""
Is this node a zwave plus one.
:rtype: bool
"""
return self._network.manager.isNodeZWavePlus(self.home_id, self.object_id)
@property
def is_locked(self):
"""
Is this node locked.
:rtype: bool
"""
return self._is_locked
@property
def is_sleeping(self):
"""
Is this node sleeping.
:rtype: bool
"""
return not self.is_awake
# @property
# def level(self):
# """
# The level of the node.
# Todo
# """
# values = self._getValuesForCommandClass(0x26) # COMMAND_CLASS_SWITCH_MULTILEVEL
# if values:
# for value in values:
# vdic = value.value_data
# if vdic and vdic.has_key('type') and vdic['type'] == 'Byte' and vdic.has_key('value'):
# return int(vdic['value'])
# return 0
# @property
# def is_on(self):
# """
# Is this node On.
# Todo
# """
# values = self._getValuesForCommandClass(0x25) # COMMAND_CLASS_SWITCH_BINARY
# if values:
# for value in values:
# vdic = value.value_data
# if vdic and vdic.has_key('type') and vdic['type'] == 'Bool' and vdic.has_key('value'):
# return vdic['value'] == 'True'
# return False
# @property
# def signal_strength(self):
# """
# The signal strenght of this node.
# Todo
# """
# return 0
@property
def max_baud_rate(self):
"""
Get the maximum baud rate of a node
"""
return self._network.manager.getNodeMaxBaudRate(self.home_id, self.object_id)
def heal(self, upNodeRoute=False):
"""
Heal network node by requesting the node rediscover their neighbors.
Sends a ControllerCommand_RequestNodeNeighborUpdate to the node.
:param upNodeRoute: Optional Whether to perform return routes initialization. (default = false).
:type upNodeRoute: bool
:return: True is the ControllerCommand is sent. False otherwise
:rtype: bool
"""
if self.is_awake == False:
logger.warning(u'Node state must a minimum set to awake')
return False
self._network.manager.healNetworkNode(self.home_id, self.object_id, upNodeRoute)
return True
def test(self, count=1):
"""
Send a number of test messages to node and record results.
:param count: The number of test messages to send.
:type count: int
"""
self._network.manager.testNetworkNode(self.home_id, self.object_id, count)
def assign_return_route(self):
'''Ask the to update its update its Return Route to the Controller
This command will ask a Node to update its Return Route to the Controller
Results of the AssignReturnRoute Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug('assign_return_route for node [%s]', self.object_id)
return self._network.controller.assign_return_route(self.object_id)
def refresh_info(self):
"""
Trigger the fetching of fixed data about a node.
Causes the nodes data to be obtained from the Z-Wave network in the same way
as if it had just been added. This method would normally be called
automatically by OpenZWave, but if you know that a node has been changed,
calling this method will force a refresh of the data held by the library. This
can be especially useful for devices that were asleep when the application was
first run.
:rtype: bool
"""
logger.debug(u'refresh_info for node [%s]', self.object_id)
return self._network.manager.refreshNodeInfo(self.home_id, self.object_id)
def request_state(self):
"""
Trigger the fetching of just the dynamic value data for a node.
Causes the node's values to be requested from the Z-Wave network. This is the
same as the query state starting from the dynamic state.
:rtype: bool
"""
logger.debug(u'request_state for node [%s]', self.object_id)
return self._network.manager.requestNodeState(self.home_id, self.object_id)
def send_information(self):
'''Send a NIF frame from the Controller to a Node.
This command send a NIF frame from the Controller to a Node
Results of the SendNodeInformation Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'send_information for node [%s]', self.object_id)
return self._network.controller.send_node_information(self.object_id)
def network_update(self):
'''Update the controller with network information from the SUC/SIS.
Results of the RequestNetworkUpdate Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'network_update for node [%s]', self.object_id)
return self._network.controller.request_network_update(self.object_id)
def neighbor_update(self):
'''Ask a Node to update its Neighbor Tables
This command will ask a Node to update its Neighbor Tables.
Results of the RequestNodeNeighborUpdate Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'neighbor_update for node [%s]', self.object_id)
return self._network.controller.request_node_neighbor_update(self.object_id)
def create_button(self, buttonid):
'''Create a handheld button id.
Only intended for Bridge Firmware Controllers.
Results of the CreateButton Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param buttonid: the ID of the Button to query.
:type buttonid: int
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'create_button for node [%s]', self.object_id)
return self._network.controller.create_button(self.object_id, buttonid)
def delete_button(self, buttonid):
'''Delete a handheld button id.
Only intended for Bridge Firmware Controllers.
Results of the CreateButton Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param buttonid: the ID of the Button to query.
:type buttonid: int
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'delete_button for node [%s]', self.object_id)
return self._network.controller.delete_button(self.object_id, buttonid)
def request_all_config_params(self):
"""
Request the values of all known configurable parameters from a device.
"""
logger.debug(u'Requesting config params for node [%s]', self.object_id)
self._network.manager.requestAllConfigParams(self.home_id, self.object_id)
def request_config_param(self, param):
"""
Request the value of a configurable parameter from a device.
Some devices have various parameters that can be configured to control the
device behaviour. These are not reported by the device over the Z-Wave network
but can usually be found in the devices user manual. This method requests
the value of a parameter from the device, and then returns immediately,
without waiting for a response. If the parameter index is valid for this
device, and the device is awake, the value will eventually be reported via a
ValueChanged notification callback. The ValueID reported in the callback will
have an index set the same as _param and a command class set to the same value
as returned by a call to Configuration::StaticGetCommandClassId.
:param param: The param of the node.
:type param:
"""
logger.debug(u'Requesting config param %s for node [%s]', param, self.object_id)
self._network.manager.requestConfigParam(self.home_id, self.object_id, param)
def set_config_param(self, param, value, size=2):
"""
Set the value of a configurable parameter in a device.
Some devices have various parameters that can be configured to control the
device behaviour. These are not reported by the device over the Z-Wave network
but can usually be found in the devices user manual. This method returns
immediately, without waiting for confirmation from the device that the change
has been made.
:param param: The param of the node.
:type param:
:param value: The value of the param.
:type value:
:param size: Is an optional number of bytes to be sent for the parameter value. Defaults to 2.
:type size: int
:return:
:rtype: bool
"""
logger.debug(u'Set config param %s for node [%s]', param, self.object_id)
return self._network.manager.setConfigParam(self.home_id, self.object_id, param, value, size)
# def setNodeOn(self, node):
# """
# """
# self._log.debug('Requesting setNodeOn for node {0}'.format(node.id))
# self._manager.setNodeOn(node.home_id, node.id)
# def setNodeOff(self, node):
# """
# """
# self._log.debug('Requesting setNodeOff for node {0}'.format(node.id))
# self._manager.setNodeOff(node.home_id, node.id)
# def setNodeLevel(self, node, level):
# """
# """
# self._log.debug('Requesting setNodeLevel for node {0} with new level {1}'.format(node.id, level))
# self._manager.setNodeLevel(node.home_id, node.id, level)
@property
def is_awake(self):
"""
Is this node a awake.
:rtype: bool
"""
return self._network.manager.isNodeAwake(self.home_id, self.object_id)
@property
def is_failed(self):
"""
Is this node is presume failed.
:rtype: bool
"""
return self._network.manager.isNodeFailed(self.home_id, self.object_id)
@property
def query_stage(self):
"""
Is this node a awake.
:rtype: string
"""
return self._network.manager.getNodeQueryStage(self.home_id, self.object_id)
@property
def is_ready(self):
"""
Get whether the node is ready to operate (QueryStage Completed).
:rtype: bool
"""
return self._isReady
@is_ready.setter
def is_ready(self, value):
"""
Set whether the node is ready to operate.
automatically set to True by notification SIGNAL_NODE_QUERIES_COMPLETE
:param value: is node ready
:type value: bool
"""
self._isReady = value
@property
def is_info_received(self):
"""
Get whether the node information has been received. Returns True if the node information has been received yet
:rtype: bool
"""
return self._network.manager.isNodeInfoReceived(self.home_id, self.object_id)
@property
def type(self):
"""
Get a human-readable label describing the node
:rtype: str
"""
return self._network.manager.getNodeType(self.home_id, self.object_id)
| StarcoderdataPython |
5197319 | <reponame>return-main/stocksight
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""SeekAlphaListener.py - get headline sentiment from SeekingAlpha and add to
Elasticsearch.
See README.md or https://github.com/shirosaidev/stocksight
for more information.
Copyright (C) <NAME> 2018-2019
Copyright (C) Allen (<NAME>) Xie 2019
stocksight is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from StockSight.NewsHeadlineListener import *
class YahooFinanceListener(NewsHeadlineListener):
def __init__(self, symbol):
super(YahooFinanceListener, self)\
.__init__("Yahoo Finance", symbol, "https://finance.yahoo.com/quote/%s/?p=%s" % (symbol, symbol))
def get_news_headlines(self):
articles = []
parsed_uri = urlparse.urljoin(self.url, '/')
try:
soup = self.get_soup(self.url)
html = soup.findAll('h3')
if html:
for rawArticle in html:
article = self.get_article_with_atag(rawArticle, parsed_uri)
if self.can_process(article):
if config['news']['follow_link']:
body_url = article.url
for p in self.get_page_text(body_url, 'p'):
article.body += str(p)+" "
article.referer_url = self.url
articles.append(article)
except requests.exceptions.RequestException as exce:
logger.warning("Exception: can't crawl web site (%s)" % exce)
pass
return articles
def get_page_text(self, url, selector):
max_paragraphs = 10
try:
soup = self.get_soup(url)
html_p = soup.findAll(selector)
if html_p:
n = 1
for i in html_p:
if n <= max_paragraphs:
if i.text is not None:
yield i.text
else:
break
n += 1
except requests.exceptions.RequestException as re:
logger.warning("Exception: can't crawl web site (%s)" % re)
pass
| StarcoderdataPython |
11308850 | <gh_stars>1-10
class Solution:
def solve(self, words):
words = sorted(list(set(words)), key = lambda x: len(x))
words_set = set(words)
ans = 0
seen = set()
for i,word in enumerate(words):
if word in seen: continue
seen.add(word)
dfs = [[word,1]]
while dfs:
cur, streak = dfs.pop()
ans = max(ans, streak)
for char in ascii_lowercase:
if cur+char in words_set and cur+char not in seen:
seen.add(cur+char)
dfs.append([cur+char,streak+1])
return ans
| StarcoderdataPython |
1645829 | import numpy as np
from trackers.tracker import Tracker
from trackers.kalman_filter import KalmanFilter
from utils.hyper_params import default_params
from utils import util
class Sort(Tracker):
def __init__(
self,
min_score_thresh=default_params['min_score_thresh'],
max_age=default_params['max_age'],
nn_init=default_params['nn_init'],
filter=KalmanFilter()
):
super(Sort, self).__init__(
min_score_thresh=min_score_thresh,
max_age=max_age,
nn_init=nn_init,
filter=filter
)
def _match(self, detections):
return util.min_cost_matching(
distance_metric=util.iou_cost,
max_distance=self.min_score_thresh,
tracks=self.tracks,
detections=detections
)
'''return util.min_cost_matching_sort(
max_distance= self.min_score_thresh,
tracks=self.tracks,
detections=detections
)'''
def _update(self, detections):
"""
Params:
detections - a numpy array of Detections .
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
super(Sort, self)._update(detections)
# return self.tracks
return np.array([trk.to_visualize() for trk in self.tracks]).reshape(-1, 7)
| StarcoderdataPython |
11355727 | <filename>python/domain/quality_plan/content/requirements.py
"""
requirements - define requirements in the document
"""
from ..model import Requirement
from .sources import S1, S2
R1 = Requirement(
identifier="R1",
description="This is requirement 1",
sources=[S1],
)
R2 = Requirement(
identifier="R2",
description="This is requirement 2",
sources=[S1, S2],
)
R3 = Requirement(
identifier="R3",
description="This is requirement 3. This requirement is completed because all of its measures have been completed.",
sources=[S2],
)
R4 = Requirement(
identifier="R4",
description="This is requirement 4. It has no measures defined.",
sources=[S2],
)
| StarcoderdataPython |
8058428 | # -*- coding: utf-8 -*-
# DO NOT CHANGE THIS FILE!
# Changes will be overwritten on: boat pull
#
# Enable the plugin by adding it to ansible.cfg's [defaults] section:
#
# callback_whitelist = longboat
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: longboat
type: aggregate
short_description: Sends task results to Longboat
author: "<NAME> <<EMAIL>>"
description:
- This callback plugin will send task results as JSON formatted events to Longboat.
- Credit to "<NAME>" for source upon which this is based.
requirements:
- Whitelisting this callback plugin
'''
EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = longboat
'''
import os
import sys
import json
import uuid
import socket
import getpass
from subprocess import Popen, PIPE, STDOUT
from datetime import datetime
from os.path import basename
from ansible.plugins.callback import CallbackBase
from ansible.module_utils.urls import open_url
class LongboatCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
self.ansible_version = ""
self.session = str(uuid.uuid4())
def send_event(self, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
if result._task_fields['args'].get('_ansible_version'):
self.ansible_version = \
result._task_fields['args'].get('_ansible_version')
if result._task._role:
ansible_role = str(result._task._role)
else:
ansible_role = None
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
'+0000')
data['runtime'] = runtime
data['ansible_version'] = self.ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
data['ansible_role'] = ansible_role
data['ansible_task'] = result._task_fields
data['ansible_result'] = result._result
jsondata = json.dumps(data, sort_keys=True)
if sys.version_info[0] < 3:
proc = Popen([os.environ['LONGBOAT_CLI'] + '/boat','collect'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
else:
proc = Popen([os.environ['LONGBOAT_CLI'] + '/boat','collect'], stdout=PIPE, stdin=PIPE, stderr=STDOUT, encoding='utf8')
(stdout_data, stderr_data) = proc.communicate(input=jsondata)
if proc.returncode > 0:
print(stdout_data)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'longboat'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.start_datetimes = {} # Collect task start times
self.longboat = LongboatCollectorSource()
def _runtime(self, result):
return (
datetime.utcnow() -
self.start_datetimes[result._task._uuid]
).total_seconds()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
def v2_playbook_on_start(self, playbook):
self.longboat.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_playbook_on_handler_task_start(self, task):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_runner_on_ok(self, result, **kwargs):
self.longboat.send_event(
'OK',
result,
self._runtime(result)
)
def v2_runner_on_skipped(self, result, **kwargs):
self.longboat.send_event(
'SKIPPED',
result,
self._runtime(result)
)
def v2_runner_on_failed(self, result, **kwargs):
self.longboat.send_event(
'FAILED',
result,
self._runtime(result)
)
def runner_on_async_failed(self, result, **kwargs):
self.longboat.send_event(
'FAILED',
result,
self._runtime(result)
)
def v2_runner_on_unreachable(self, result, **kwargs):
self.longboat.send_event(
'UNREACHABLE',
result,
self._runtime(result)
)
def v2_playbook_on_stats(self, stats):
self._display.display("https://longboat.io/playbook/" + self.longboat.session)
| StarcoderdataPython |
3369038 | <reponame>4thel00z/rq-dashboard
# flake8: noqa
from .web import setup
| StarcoderdataPython |
4974608 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 13:58:58 2017
@author: konodera
"""
import pandas as pd
import numpy as np
import utils
utils.start(__file__)
#==============================================================================
# load
#==============================================================================
usecols = ['product_id', 'order_dow', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', usecols)
#==============================================================================
# def
#==============================================================================
def make(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
log_ = log[log.order_number_rev>T]
all_item_dist = log_.order_dow.value_counts(normalize=True).reset_index()
all_item_dist.columns = ['order_dow', 'dow_dist_ratio']
tbl = log_.groupby(['product_id', 'order_dow']).size().reset_index()
tbl.columns = ['product_id', 'order_dow', 'item_dow_cnt']
tbl['item_dow_ratio'] = tbl.item_dow_cnt / tbl.groupby('product_id').transform(np.sum).item_dow_cnt
tbl = pd.merge(tbl, all_item_dist, on='order_dow', how='left')
tbl['item_dow_ratio_diff'] = tbl.item_dow_ratio - tbl.dow_dist_ratio
tbl[['product_id','order_dow', 'item_dow_ratio_diff']].to_pickle('../feature/{}/f213_product-dow.p'.format(folder))
#==============================================================================
# main
#==============================================================================
make(0)
make(1)
make(2)
make(-1)
#==============================================================================
utils.end(__file__)
utils.end(__file__)
| StarcoderdataPython |
9717090 | import asyncio
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from functools import partial
import requests
import json
queue = asyncio.Queue()
path_root = "/home/user/workspace/Scraper/"
"""
Producer, simplely takes the urls and dump them into the queue
"""
async def produce(queue):
with open(path_root + 'input/sniff.txt', 'r', encoding='utf-8') as inputf:
for uid in inputf:
await queue.put(uid)
await queue.put(None) # poison pill to signal all the work is done
"""
Helper function to send request and manipulate response
"""
async def async_request(uid, loop, callback=None):
print("Sending request to: " + uid)
"""
This is a canonical way to turn a synchronized
routine to async. event_loop.run_in_executor,
by default, takes a new thread from ThreadPool.
It is also possible to change the executor to ProcessPool.
"""
access_token = ""
root = "https://graph.facebook.com/v3.3/"
params = "?fields=email,address,birthday,first_name,last_name,middle_name&access_token="
url = root + uid + params + access_token
ret = await loop.run_in_executor(ThreadPoolExecutor(),
partial(requests.get, timeout=5), url)
obj = json.loads(ret)
if callback is not None:
callback(uid, obj)
"""
Consumer with an infinite loop. It only stops if there is a poison pill.
"""
async def consume(queue, loop):
with open(path_root + 'output/graph_api.txt', 'a+', encoding='utf-8') as f:
def write_to_file(uid, obj):
email = obj["email"] if 'email' in obj else 'null'
address = obj["address"] if 'address' in obj else 'null'
birthday = obj["birthday"] if 'birthday' in obj else 'null'
first_name = obj["first_name"] if 'first_name' in obj else ''
last_name = obj["last_name"] if 'last_name' in obj else ''
middle_name = obj["middle_name"] if 'middle_name' in obj else ''
full_name = first_name + " " + middle_name + " " + last_name
f.write(uid + "," + email + "," + address + "," + birthday + "," +
full_name + "\n")
while True:
# coroutine will be blocked if queue is empty
item = await queue.get()
if item is None: # if poison pill is detected, exit the loop
break
await async_request(item, loop, write_to_file)
# signal that the current task from the queue is done
# and decrease the queue counter by one
queue.task_done()
"""
Driver
"""
if __name__ == '__main__':
loop = asyncio.get_event_loop()
queue = asyncio.Queue(loop=loop)
producer_coro = produce(queue)
consumer_coro = consume(queue, loop)
loop.run_until_complete(asyncio.gather(producer_coro, consumer_coro))
loop.close()
| StarcoderdataPython |
3273705 | <filename>covid/models/SEIRD_renewal.py
import jax
import jax.numpy as np
from jax.random import PRNGKey
import numpyro
import numpyro.distributions as dist
from ..compartment import SEIRDModel
from .util import observe, observe_nb2, ExponentialRandomWalk, LogisticRandomWalk, frozen_random_walk, clean_daily_obs
from .base import SEIRDBase, getter
import numpy as onp
"""
************************************************************
SEIRD model
************************************************************
"""
class SEIRD(SEIRDBase):
def __call__(self,
T = 50,
N = 1e5,
T_future = 0,
E_duration_est = 4.0,
I_duration_est = 2.0,
H_duration_est = 10.0,
R0_est = 3.0,
beta_shape = 1.,
sigma_shape = 100.,
gamma_shape = 100.,
death_rate_shape = 10.,
det_prob_est = 0.3,
det_prob_conc = 50.,
confirmed_dispersion=0.3,
death_dispersion=0.3,
rw_scale = 2e-1,
death_prob_est=0.01,
death_prob_conc=100,
forecast_rw_scale = 0.,
drift_scale = None,
num_frozen=0,
rw_use_last=1,
confirmed=None,
death=None,
T_old=None):
det_prob0 = numpyro.sample("det_prob0",
dist.Beta(det_prob_est * det_prob_conc,
(1-det_prob_est) * det_prob_conc))
if confirmed is None:
confirmed0, confirmed = (None, None)
else:
confirmed0 = confirmed[0]
confirmed = clean_daily_obs(onp.diff(confirmed))
if death is None:
death0, death = (None, None)
else:
death0 = death[0]
death = clean_daily_obs(onp.diff(death))
E0 = numpyro.sample("E0", dist.Uniform(0, 1e-4*N)) # change to 1e-3 if starting on 2020-03-16
rw = frozen_random_walk("rw" ,
num_steps=T-1,
num_frozen=num_frozen)
beta0 = numpyro.sample("beta0",
dist.Gamma(beta_shape, beta_shape * I_duration_est/R0_est))
beta = numpyro.deterministic("beta", beta0 * np.exp(rw_scale*rw))
det_prob = numpyro.sample("det_prob" ,
LogisticRandomWalk(loc=det_prob0,
scale=rw_scale,
drift=0.,
num_steps=T-1))
if T_future >0:
beta = np.concatenate((beta,np.repeat(beta[-10:].mean(),T_future)))
det_prob = np.concatenate((det_prob,np.repeat(det_prob[-10:].mean(),T_future)))
death_dispersion = numpyro.sample("death_dispersion",
dist.TruncatedNormal(low=0.1,
loc=death_dispersion,
scale=0.15))
confirmed_dispersion = numpyro.sample("confirmed_dispersion",
dist.TruncatedNormal(low=0.1,
loc=confirmed_dispersion,
scale=0.15))
sigma = numpyro.sample("sigma",
dist.Gamma(sigma_shape, sigma_shape * E_duration_est))
gamma = numpyro.sample("gamma",
dist.Gamma(gamma_shape, gamma_shape * I_duration_est))
det_prob_d = numpyro.sample("det_prob_d",
dist.Beta(.9 * 100,
(1-.9) * 100))
death_prob = numpyro.sample("death_prob",
dist.Beta(death_prob_est * death_prob_conc, (1-death_prob_est) * death_prob_conc))
death_rate = numpyro.sample("death_rate",
dist.Gamma(death_rate_shape, death_rate_shape * H_duration_est))
def Geometric0(mu):
'''Geometric RV supported on 0,1,...'''
p = 1/(1+mu)
log_p = np.log(p)
log_1_minus_p = np.log(1-p)
def log_prob(k):
return np.where(k >= 0, k * log_1_minus_p + log_p, -np.inf)
return log_prob
def Geometric1(mu):
'''Geometric RV supported on 1,2,...'''
p = 1/mu
log_p = np.log(p)
log_1_minus_p = np.log(1-p)
def log_prob(k):
return np.where(k > 0, (k-1) * log_1_minus_p + log_p, -np.inf)
return log_prob
def SEIR_renewal(theta, T=100, dE0=10, N=1e6, CONV_WIDTH=32):
beta, sigma, gamma,death_prob,death_rate, pd = theta
# U = latent period
# V = infectious period
U_logp = Geometric0(1/sigma)
# need an infectious period
V_logp = Geometric0(1/gamma)
D_logp = Geometric0(1/death_rate)
# For some reason this gives closest match to the diff eq. model
# with U drawn from the geometric distribution supported on non-
# negative integers and V drawn from the geometric supported on
# positive integers.
t = np.arange(CONV_WIDTH)
U_pmf = np.exp(U_logp(t))
U_ccdf = 1 - np.cumsum(U_pmf)
V_pmf = np.exp(V_logp(t))
V_ccdf = 1- np.cumsum(V_pmf)
D_pmf = np.exp(D_logp(t))
# A(t) = Pr(infectious t time units after being infected)
# = sum_u Pr(U=u) * [Pr(R >= t-u)*Pr(H >= t-u)]
# = convolution of U pmf and V ccdf
A = np.convolve(U_pmf, V_ccdf, mode='full')[:CONV_WIDTH]
A_rev = A[::-1] # to facilitate convolution incide the dynamics loop
#print("R0", beta*A.sum())
#print("beta/gamma", beta/gamma)
# Let dE(t) be newly exposed cases at time t. Then
#
# dE(t) = beta * S(t)/N * (# previous cases that are infectious at time t)
# = beta * S(t)/N * sum_{s<t} dE(s)*A(t-s)
# = beta * S(t)/N * conv(incidence, A)
#
def scan_body(state, beta):
incidence_history, S = state
dE = beta * S/N * np.sum(incidence_history * A_rev)
new_state = (np.append(incidence_history[1:], dE), S-dE)
return new_state, dE
incidence_history = np.append(np.zeros(CONV_WIDTH-1), dE0)
S = N - dE0
_, dE = jax.lax.scan(scan_body, (incidence_history, S), beta*np.ones(T-1))
dE = np.append(dE0, dE)
# calculate other variables from incident exposures using
# various convolutions to "project forward" incident exposures
E = np.convolve(dE, U_ccdf, mode='full')[:T]
dI = np.convolve(dE, U_pmf, mode='full')[:T]
I = np.convolve(dE, A, mode='full')[:T]
dH = np.convolve(death_prob*dI, V_pmf, mode='full')[:T]
dD = np.convolve(dH, D_pmf, mode='full')[:T]
CE = np.cumsum(dE)
CI = np.cumsum(dI)
S = N - CE
R = N-S-E-I
return (S, E, I, R,CI, dI, dD)
pd= numpyro.sample('time_to_death',dist.Dirichlet(.1*np.ones(40)))
theta = (beta, sigma, gamma,death_prob,death_rate,pd)
if T_future == 0:
S2, E2, I2, R2,CI, dI ,dD= SEIR_renewal(theta,T=T, dE0=E0, CONV_WIDTH=40)
else:
S2, E2, I2, R2,CI, dI ,dD= SEIR_renewal(theta,T=T_old+T_future, dE0=E0, CONV_WIDTH=40)
if confirmed is not None:
dy0 = numpyro.sample('dy0',dist.Normal(0,1),obs=confirmed[0])
dz0 = numpyro.sample('dz0',dist.Normal(0,1),obs=death[0])
else:
dy0 = numpyro.sample('dy0',dist.Normal(0,1))
dz0 = numpyro.sample('dz0',dist.Normal(0,1))
y = dy0
z = dz0
if confirmed is not None and T_future ==0:
with numpyro.handlers.scale(scale=1.0):
y = observe_nb2("dy" , np.diff(CI), det_prob, confirmed_dispersion, obs = confirmed)
with numpyro.handlers.scale(scale=1.0):
z = observe_nb2("dz" , dD[1:], det_prob_d, death_dispersion, obs = death)
elif T_future > 0:
with numpyro.handlers.scale(scale=1.0):
y = observe_nb2("dy_future" , np.diff(CI)[-28:], det_prob[-28:], confirmed_dispersion)
with numpyro.handlers.scale(scale=1.0):
z = observe_nb2("dz_future" , dD[1:][-28:], det_prob_d, death_dispersion)
with numpyro.handlers.scale(scale=1.0):
y = observe_nb2("dy" , np.diff(CI), det_prob, confirmed_dispersion)
with numpyro.handlers.scale(scale=1.0):
z = observe_nb2("dz" , dD[1:], det_prob_d, death_dispersion)
# Sample initial number of infected individuals
else:
with numpyro.handlers.scale(scale=1.0):
y = observe_nb2("dy" , np.diff(CI), det_prob, confirmed_dispersion)
with numpyro.handlers.scale(scale=1.0):
z = observe_nb2("dz" , dD[1:], det_prob_d, death_dispersion)
y = np.append(dy0,y)
z = np.append(dz0,z)
return beta, None, y, z, det_prob, death_prob
dy = getter('dy')
dz = getter('dz')
def y0(self, **args):
return self.z0(**args)
def y(self, samples, **args):
'''Get cumulative cases from incident ones'''
dy = self.dy(samples, **args)
y0 = np.zeros(dy.shape[0])
if args.get('forecast'):
y0 = self.y(samples, forecast=False)[:,-1]
return y0[:,None] + onp.cumsum(dy, axis=1)
def z0(self, **args):
return self.z0(**args)
def z(self, samples, **args):
'''Get cumulative deaths from incident ones'''
dz = self.dz(samples, **args)
z0 = np.zeros(dz.shape[0])
if args.get('forecast'):
z0 = self.z(samples, forecast=False)[:,-1]
return z0[:,None] + onp.cumsum(dz, axis=1)
| StarcoderdataPython |
4959704 | import os
from collections import defaultdict
import dpath.util
from voluptuous import Any
from dvc.dependency.local import LocalDependency
from dvc.exceptions import DvcException
from dvc.hash_info import HashInfo
from dvc.utils.serialize import LOADERS, ParseError
class MissingParamsError(DvcException):
pass
class BadParamFileError(DvcException):
pass
class ParamsDependency(LocalDependency):
PARAM_PARAMS = "params"
PARAM_SCHEMA = {PARAM_PARAMS: Any(dict, list, None)}
DEFAULT_PARAMS_FILE = "params.yaml"
def __init__(self, stage, path, params):
info = {}
self.params = []
if params:
if isinstance(params, list):
self.params = params
else:
assert isinstance(params, dict)
self.params = list(params.keys())
info = {self.PARAM_PARAMS: params}
super().__init__(
stage,
path
or os.path.join(stage.repo.root_dir, self.DEFAULT_PARAMS_FILE),
info=info,
)
def dumpd(self):
ret = super().dumpd()
if not self.hash_info:
ret[self.PARAM_PARAMS] = self.params
return ret
def fill_values(self, values=None):
"""Load params values dynamically."""
if not values:
return
info = {}
for param in self.params:
if param in values:
info[param] = values[param]
self.hash_info = HashInfo(self.PARAM_PARAMS, info)
def workspace_status(self):
status = super().workspace_status()
if status.get(str(self)) == "deleted":
return status
status = defaultdict(dict)
info = self.hash_info.value if self.hash_info else {}
actual = self.read_params()
for param in self.params:
if param not in actual.keys():
st = "deleted"
elif param not in info:
st = "new"
elif actual[param] != info[param]:
st = "modified"
else:
assert actual[param] == info[param]
continue
status[str(self)][param] = st
return status
def status(self):
return self.workspace_status()
def read_params(self):
if not self.exists:
return {}
suffix = self.path_info.suffix.lower()
loader = LOADERS[suffix]
try:
config = loader(self.path_info, fs=self.repo.fs)
except ParseError as exc:
raise BadParamFileError(
f"Unable to read parameters from '{self}'"
) from exc
ret = {}
for param in self.params:
try:
ret[param] = dpath.util.get(config, param, separator=".")
except KeyError:
pass
return ret
def get_hash(self):
info = self.read_params()
missing_params = set(self.params) - set(info.keys())
if missing_params:
raise MissingParamsError(
"Parameters '{}' are missing from '{}'.".format(
", ".join(missing_params), self,
)
)
return HashInfo(self.PARAM_PARAMS, info)
| StarcoderdataPython |
4811033 | """
This module represents the
entry point to Discard(tm)
"""
import controllers.cmdcontroller as CmdController
import views.cmdview as CmdView
import common.viewutil as ViewUtil
import common.game as Game
def main():
controller = CmdController([CmdView, ViewUtil], Game)
controller.main()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1946386 | <filename>data/studio21_generated/interview/0335/starter_code.py
class Solution:
def tallestBillboard(self, rods: List[int]) -> int:
| StarcoderdataPython |
4859398 | <reponame>peppasd/LIT<filename>projects/views.py
from django.shortcuts import render, get_object_or_404
from .models import Project, Photo, Member, Label, Value
from .forms import ProjectForm, LabelForm
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.conf import settings
from .utils import existsUser, allUsers_project, allTags_project, getUser, calProgress
from django.contrib.auth.decorators import login_required
import json
# Create your views here.
@login_required
def download(request, pk):
project = Project.objects.get(id=pk)
if request.user not in project.owners.all():
return HttpResponse('You must be a project owner to download the data.', status=403)
images = project.images.all()
exportdata = []
for image in images:
photodata = {
'name': image.name,
'upload time': str(image.created),
'uploaded by': image.uploader,
'data': []
}
values = Value.objects.filter(photo=image)
for value in values:
data = [value.label.first().name, value.val]
photodata['data'].append(data)
exportdata.append(photodata)
return HttpResponse(json.dumps(exportdata), content_type='text/json')
@login_required
def removeImg(request, slug):
elm = Photo.objects.get(uuid=slug)
project = elm.project
elm.delete()
return HttpResponseRedirect(reverse('project_images', args=[project.id]))
@login_required
def removeProject(request, pk):
elm = Project.objects.get(pk=pk)
for member in elm.members.all():
member.delete()
elm.delete()
return HttpResponseRedirect('/projects/')
@login_required
def overview(request):
project_list = []
all_projects = []
for project in Project.objects.all():
x, y, z = calProgress(project)
project.progress = x
for member in project.members.all():
if request.user == member.user.first():
project_list.append(project)
if request.user in project.owners.all():
if project not in project_list:
project_list.append(project)
for project in Project.objects.all():
x, y, z = calProgress(project)
project.progress = x
if project not in project_list:
all_projects.append(project)
context = {
'project_list': project_list,
'all_projects': all_projects,
}
return render(request, 'overview.html', context=context)
@login_required
def new_project(request):
if request.method == "POST":
form = ProjectForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
post.owners.add(request.user)
post.ownerName = request.user.username
post.save()
return HttpResponseRedirect('/projects/')
else:
form = ProjectForm()
return render(request, 'new_project.html', {'form': form})
@login_required
def edit_project(request, pk):
context = {
'pk': pk,
}
post = get_object_or_404(Project, pk=pk)
if request.method == "POST":
form = ProjectForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.save()
return HttpResponseRedirect(reverse('project_overview', args=[pk]))
else:
form = ProjectForm(instance=post)
context = {
'pk': pk,
'form': form,
}
return render(request, 'edit_project.html', context)
@login_required
def removeTag(request, pk):
elm = Label.objects.get(pk=pk)
project = elm.project
ok = True
for value in Value.objects.all():
if elm in value.label.all():
ok = False
break
if ok:
elm.delete()
return HttpResponseRedirect(reverse('project_overview', args=[project.id]))
@login_required
def edit_tag(request, pk):
context = {
'pk': pk,
}
post = get_object_or_404(Label, pk=pk)
project = post.project
if request.method == "POST":
form = LabelForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.save()
return HttpResponseRedirect(reverse('project_overview', args=[project.id]))
else:
form = LabelForm(instance=post)
context = {
'pk': pk,
'form': form,
'project':project,
}
return render(request, 'edit_tag.html', context)
@login_required
def project_overview(request, pk):
ph = ""
project = Project.objects.get(id=pk)
x, y, z = calProgress(project)
project.progress = x
if request.method == 'POST':
username = request.POST['username']
if existsUser(username):
project.owners.add(getUser(username))
else:
ph = "User {} does not exist".format(username)
members = allUsers_project(project)
tags = allTags_project(project)
count_images = y
tagged_images = z
context = {
'project': project,
'members': members,
'tags': tags,
'count_images': count_images,
'ph': ph,
'tagged_images': tagged_images,
}
return render(request, 'project_overview.html', context)
@login_required
def project_images(request, pk):
project = Project.objects.get(id=pk)
context = {
'project': project,
}
return render(request, 'project_images.html', context)
VALID_IMAGE_EXTENSIONS = [
"jpg",
"jpeg",
"png",
]
@login_required
def upload_images(request, pk):
context = {
'pk': pk,
}
if request.method == "GET":
return render(request, 'upload_images.html', context)
elif request.method == "POST":
uploaded_file = request.FILES['img']
extension = uploaded_file.name.split(".")[-1]
# if uploaded_file.size > settings.MAX_UPLOAD_SIZE:
# return HttpResponse("File too big.", status=413)
if extension not in VALID_IMAGE_EXTENSIONS:
return HttpResponse("Invalid file extension.", status=415)
else:
obj = Photo(file=uploaded_file, name=uploaded_file.name, project=Project.objects.get(id=pk),
uploader=request.user)
obj.save()
return HttpResponse("Upload successful.")
@login_required
def create_tags(request, pk):
context = {
'pk': pk,
}
if request.method == "POST":
form = LabelForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
post.project = Project.objects.get(id=pk)
post.save()
return HttpResponseRedirect(reverse('project_overview', args=[pk]))
else:
form = LabelForm()
context = {
'pk': pk,
'form': form,
}
return render(request, 'create_tags.html', context)
| StarcoderdataPython |
82367 | import configparser
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class SettingsDialog(Gtk.Dialog):
def __init__(self, parent, config_file):
buttons = (Gtk.STOCK_OK, Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, "Settings", parent, 0, buttons)
# Load defaults from file for persistence
self.load_initial_values(config_file)
self.set_size_request(350, 0)
box = self.get_content_area()
row = Gtk.HBox()
_ = Gtk.Label("Kafka Servers")
row.pack_start(_, False, False, 0)
self.servers = Gtk.Entry()
self.servers.get_buffer().set_text(self.initial_values['kafka_servers'], -1)
row.pack_end(self.servers, False, False, 0)
box.pack_start(row, False, False, 0)
row = Gtk.HBox()
_ = Gtk.Label("Initial Topics (Optional)")
row.pack_start(_, False, False, 0)
self.topics = Gtk.Entry()
self.topics.get_buffer().set_text(self.initial_values['topics'], -1)
row.pack_end(self.topics, False, False, 0)
box.pack_start(row, False, False, 0)
# TODO: Gtk.Adjustment seems to default to the min half the time
row = Gtk.HBox()
_ = Gtk.Label("Polling Frequency (ms)")
row.pack_start(_, False, False, 0)
freq_adj = Gtk.Adjustment(self.initial_values['polling_freq'], 1, 1000, 1, 10, 0)
self.freq = Gtk.SpinButton()
self.freq.set_adjustment(freq_adj)
row.pack_end(self.freq, False, False, 0)
box.pack_start(row, False, False, 0)
row = Gtk.HBox()
_ = Gtk.Label("Max History")
row.pack_start(_, False, False, 0)
history_adj = Gtk.Adjustment(self.initial_values['max_history'], 1, 5000, 100, 1000, 0)
self.history = Gtk.SpinButton()
self.history.set_adjustment(history_adj)
row.pack_end(self.history, False, False, 0)
box.pack_start(row, False, False, 0)
row = Gtk.HBox()
_ = Gtk.Label("Layout")
row.pack_start(_, False, False, 0)
layout_options = Gtk.HBox()
self.tab_button = Gtk.RadioButton.new_with_label_from_widget(None, "Tabs")
layout_options.pack_start(self.tab_button, True, True, 0)
self.tile_button = Gtk.RadioButton.new_with_label_from_widget(self.tab_button, "Tiles")
layout_options.pack_start(self.tile_button, True, True, 0)
row.pack_end(layout_options, False, False, 0)
box.pack_start(row, False, False, 0)
if self.initial_values['view_mode'] == "tabs":
self.tab_button.set_active(True)
else:
self.tile_button.set_active(True)
self.show_all()
def load_initial_values(self, config_file):
stored_settings = configparser.SafeConfigParser({
'kafka_servers': "",
'topics': "",
'polling_freq': "100",
'max_history': "1000",
'view_mode': "tabs"
})
stored_settings.read(config_file)
self.initial_values = {
"kafka_servers": stored_settings.get('samsa', 'kafka_servers'),
"topics": stored_settings.get('samsa', 'topics'),
"polling_freq": stored_settings.getint('samsa', 'polling_freq'),
"max_history": stored_settings.getint('samsa', 'max_history'),
"view_mode": stored_settings.get('samsa', 'view_mode')
}
def get_value(self):
return {
'kafka_servers': self.servers.get_buffer().get_text(),
'topics': self.topics.get_buffer().get_text(),
'polling_freq': self.freq.get_value_as_int(),
'max_history': self.history.get_value_as_int(),
'view_mode': 'tabs' if self.tab_button.get_active() else 'tiles'
}
| StarcoderdataPython |
5192868 | <reponame>FelixVi/Bedrock<filename>dsp/digaree/cgen_srf.py
#!/usr/bin/python
# SRF cavity analog state computer
# Takes in cavity field, forward, and reverse vector measurements
# and computes the cavity detune frequency, decay parameter, and
# power imbalance for the purposes of a tuning loop and quench detector.
# Keeps a history of the previous four cavity field mesurements so it
# can get dV/dt.
# Output of this program should be both valid c99 and valid input
# for the scheduler/mapper.
# See the rest of the Digaree infrastructure for details.
from cgen_lib import cgen_init, given, mul, sub, cpx_sub, cpx_mul
from cgen_lib import cpx_scale, cpx_dot, cpx_inv_conj, cpx_mul_conj
from cgen_lib import cpx_mag, set_result, cpx_persist, cpx_copy, cpx_add
cgen_init("cgen_srf.py")
# History of measured cavity voltages, used to compute dV/dt
# Initial value in simulation should be settable from initgen?
# Cut-and-paste for now, until we at least get the right answer.
cpx_persist("v1")
cpx_persist("v2")
cpx_persist("v3")
cpx_persist("v4")
# These lines declare the input variables,
# first six streamed from the radio
given("k_r") # forward
given("k_i") # forward
given("r_r") # reverse
given("r_i") # reverse
given("v_r") # cavity
given("v_i") # cavity
# next eight host-settable
given("beta_r")
given("beta_i")
given("invT")
given("two") # needed by 1/x macro
given("sclr")
given("sclf")
given("sclv")
given("powt")
# Get (still unscaled) derivative
# Implements [-2 -1 0 1 2] FIR
cpx_sub("dv1", "v", "v4", 3) # note multiply-by-4
cpx_sub("dv2", "v1", "v3", 2) # note multiply-by-2
cpx_add("dvx", "dv1", "dv2", 3) # note multiply-by-4
# Result is the amount that V will change in 80*T.
# Including the second-order CIC used to generate input samples,
# this computation has a 3*T group delay.
# State-variable computation of the complex number a,
# yielding detune frequency and decay rate
cpx_inv_conj("x5", "v", 0, 3)
cpx_scale("dvdt", "dvx", "invT", 1)
cpx_mul("x3", "k", "beta", 1, 1)
cpx_sub("x4", "dvdt", "x3", 2) # some evidence this shift should be 1
cpx_mul_conj("a", "x4", "x5", 2, 2)
set_result("ab", "a_r", "a_i")
# Power balance measure of cavity dissipation; uses magnitudes only
cpx_mag("magr", "r", 0) # reverse
mul("powr", "sclr", "magr", 0)
cpx_mag("magf", "k", 0) # forward
mul("powf", "sclf", "magf", 0)
sub("wgnet", "powf", "powr", 1) # net power transferred by waveguide
cpx_dot("dv2", "v", "dvx", 2) # 2 * V * dV/dt = d/dt(V^2)
mul("dudt", "dv2", "sclv", 3) # dU/dt = power to stored energy
sub("diss", "wgnet", "dudt", 1) # est. of dissipation in cold cavity
sub("perr", "diss", "powt", 1) # allow for measurement error
set_result("cd", "diss", "perr") # trigger quench fault if perr > 0
# Watch these like a hawk: order of execution matters,
# unlike everything else here
cpx_copy("v4", "v3")
cpx_copy("v3", "v2")
cpx_copy("v2", "v1")
cpx_copy("v1", "v")
| StarcoderdataPython |
11362264 | """API for SpatioTemporal Asset Catalog items."""
import os
import re
from typing import Any, Dict, List, Optional, Union
from urllib.parse import urlencode
from rasterio.transform import from_bounds
from rio_tiler_crs import STACReader
from titiler.api import utils
from titiler.api.deps import (
CommonImageParams,
CommonMetadataParams,
CommonTileParams,
TileMatrixSetNames,
morecantile,
request_hash,
)
from titiler.db.memcache import CacheLayer
from titiler.models.cog import cogBounds, cogInfo, cogMetadata
from titiler.models.mapbox import TileJSON
from titiler.ressources.enums import ImageMimeTypes, ImageType
from titiler.ressources.responses import ImgResponse
from titiler.templates.factory import web_template
from fastapi import APIRouter, Depends, HTTPException, Path, Query
from starlette import status
from starlette.requests import Request
from starlette.responses import HTMLResponse, Response
router = APIRouter()
@router.get(
"/bounds",
response_model=cogBounds,
responses={200: {"description": "Return the bounds of the STAC item."}},
)
async def stac_bounds(
resp: Response, url: str = Query(..., description="STAC item URL."),
):
"""Return the bounds of the STAC item."""
resp.headers["Cache-Control"] = "max-age=3600"
with STACReader(url) as stac:
return {"bounds": stac.bounds}
@router.get(
"/info",
response_model=Union[List[str], Dict[str, cogInfo]],
response_model_exclude={"__all__": {"minzoom", "maxzoom", "center"}},
response_model_exclude_none=True,
responses={200: {"description": "Return basic info for STAC item's assets"}},
)
async def stac_info(
resp: Response,
url: str = Query(..., description="STAC item URL."),
assets: str = Query(None, description="comma (,) separated list of asset names."),
):
"""Return basic info on STAC item's COG."""
resp.headers["Cache-Control"] = "max-age=3600"
with STACReader(url) as stac:
if not assets:
return stac.assets
info = stac.info(assets.split(","))
return info
@router.get(
"/metadata",
response_model=Dict[str, cogMetadata],
response_model_exclude={"__all__": {"minzoom", "maxzoom", "center"}},
response_model_exclude_none=True,
responses={200: {"description": "Return the metadata for STAC item's assets."}},
)
async def stac_metadata(
request: Request,
resp: Response,
url: str = Query(..., description="STAC item URL."),
assets: str = Query(..., description="comma (,) separated list of asset names."),
metadata_params: CommonMetadataParams = Depends(),
):
"""Return the metadata of the COG."""
with STACReader(url) as stac:
info = stac.metadata(
assets.split(","),
metadata_params.pmin,
metadata_params.pmax,
nodata=metadata_params.nodata,
indexes=metadata_params.indexes,
max_size=metadata_params.max_size,
hist_options=metadata_params.hist_options,
bounds=metadata_params.bounds,
**metadata_params.kwargs,
)
resp.headers["Cache-Control"] = "max-age=3600"
return info
params: Dict[str, Any] = {
"responses": {
200: {
"content": {
"image/png": {},
"image/jpg": {},
"image/webp": {},
"image/tiff": {},
"application/x-binary": {},
},
"description": "Return an image.",
}
},
"response_class": ImgResponse,
}
@router.get(r"/tiles/{z}/{x}/{y}", **params)
@router.get(r"/tiles/{z}/{x}/{y}.{format}", **params)
@router.get(r"/tiles/{z}/{x}/{y}@{scale}x", **params)
@router.get(r"/tiles/{z}/{x}/{y}@{scale}x.{format}", **params)
@router.get(r"/tiles/{TileMatrixSetId}/{z}/{x}/{y}", **params)
@router.get(r"/tiles/{TileMatrixSetId}/{z}/{x}/{y}.{format}", **params)
@router.get(r"/tiles/{TileMatrixSetId}/{z}/{x}/{y}@{scale}x", **params)
@router.get(r"/tiles/{TileMatrixSetId}/{z}/{x}/{y}@{scale}x.{format}", **params)
async def stac_tile(
z: int = Path(..., ge=0, le=30, description="Mercator tiles's zoom level"),
x: int = Path(..., description="Mercator tiles's column"),
y: int = Path(..., description="Mercator tiles's row"),
TileMatrixSetId: TileMatrixSetNames = Query(
TileMatrixSetNames.WebMercatorQuad, # type: ignore
description="TileMatrixSet Name (default: 'WebMercatorQuad')",
),
scale: int = Query(
1, gt=0, lt=4, description="Tile size scale. 1=256x256, 2=512x512..."
),
format: ImageType = Query(None, description="Output image type. Default is auto."),
url: str = Query(..., description="STAC Item URL."),
assets: str = Query("", description="comma (,) separated list of asset names."),
image_params: CommonTileParams = Depends(),
cache_client: CacheLayer = Depends(utils.get_cache),
request_id: str = Depends(request_hash),
):
"""Create map tile from a STAC item."""
timings = []
headers: Dict[str, str] = {}
if not image_params.expression and not assets:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Must pass Expression or Asset list.",
)
tilesize = scale * 256
tms = morecantile.tms.get(TileMatrixSetId.name)
content = None
if cache_client:
try:
content, ext = cache_client.get_image_from_cache(request_id)
format = ImageType[ext]
headers["X-Cache"] = "HIT"
except Exception:
content = None
if not content:
with utils.Timer() as t:
with STACReader(url, tms=tms) as stac:
tile, mask = stac.tile(
x,
y,
z,
assets=assets.split(","),
tilesize=tilesize,
indexes=image_params.indexes,
expression=image_params.expression,
nodata=image_params.nodata,
)
timings.append(("Read", t.elapsed))
if not format:
format = ImageType.jpg if mask.all() else ImageType.png
with utils.Timer() as t:
tile = utils.postprocess(
tile,
mask,
rescale=image_params.rescale,
color_formula=image_params.color_formula,
)
timings.append(("Post-process", t.elapsed))
bounds = tms.xy_bounds(x, y, z)
dst_transform = from_bounds(*bounds, tilesize, tilesize)
with utils.Timer() as t:
content = utils.reformat(
tile,
mask,
img_format=format,
colormap=image_params.color_map,
transform=dst_transform,
crs=tms.crs,
)
timings.append(("Format", t.elapsed))
if cache_client and content:
cache_client.set_image_cache(request_id, (content, format.value))
if timings:
headers["X-Server-Timings"] = "; ".join(
["{} - {:0.2f}".format(name, time * 1000) for (name, time) in timings]
)
return ImgResponse(
content, media_type=ImageMimeTypes[format.value].value, headers=headers,
)
@router.get(r"/preview", **params)
@router.get(r"/preview.{format}", **params)
async def stac_preview(
format: ImageType = Query(None, description="Output image type. Default is auto."),
url: str = Query(..., description="STAC Item URL."),
assets: str = Query("", description="comma (,) separated list of asset names."),
image_params: CommonImageParams = Depends(),
):
"""Create preview of STAC assets."""
timings = []
headers: Dict[str, str] = {}
if not image_params.expression and not assets:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Must pass Expression or Asset list.",
)
with utils.Timer() as t:
with STACReader(url) as stac:
data, mask = stac.preview(
assets=assets.split(","),
expression=image_params.expression,
height=image_params.height,
width=image_params.width,
max_size=image_params.max_size,
indexes=image_params.indexes,
nodata=image_params.nodata,
**image_params.kwargs,
)
timings.append(("Read", t.elapsed))
if not format:
format = ImageType.jpg if mask.all() else ImageType.png
with utils.Timer() as t:
data = utils.postprocess(
data,
mask,
rescale=image_params.rescale,
color_formula=image_params.color_formula,
)
timings.append(("Post-process", t.elapsed))
with utils.Timer() as t:
content = utils.reformat(
data, mask, img_format=format, colormap=image_params.color_map,
)
timings.append(("Format", t.elapsed))
timings.append(("Format", t.elapsed))
if timings:
headers["X-Server-Timings"] = "; ".join(
["{} - {:0.2f}".format(name, time * 1000) for (name, time) in timings]
)
return ImgResponse(
content, media_type=ImageMimeTypes[format.value].value, headers=headers,
)
# @router.get(r"/crop/{minx},{miny},{maxx},{maxy}", **params)
@router.get(r"/crop/{minx},{miny},{maxx},{maxy}.{format}", **params)
async def stac_part(
minx: float = Path(..., description="Bounding box min X"),
miny: float = Path(..., description="Bounding box min Y"),
maxx: float = Path(..., description="Bounding box max X"),
maxy: float = Path(..., description="Bounding box max Y"),
format: ImageType = Query(None, description="Output image type."),
url: str = Query(..., description="STAC Item URL."),
assets: str = Query("", description="comma (,) separated list of asset names."),
image_params: CommonImageParams = Depends(),
):
"""Create image from part of STAC assets."""
timings = []
headers: Dict[str, str] = {}
if not image_params.expression and not assets:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Must pass Expression or Asset list.",
)
with utils.Timer() as t:
with STACReader(url) as stac:
data, mask = stac.part(
[minx, miny, maxx, maxy],
height=image_params.height,
width=image_params.width,
max_size=image_params.max_size,
assets=assets.split(","),
expression=image_params.expression,
indexes=image_params.indexes,
nodata=image_params.nodata,
**image_params.kwargs,
)
timings.append(("Read", t.elapsed))
with utils.Timer() as t:
data = utils.postprocess(
data,
mask,
rescale=image_params.rescale,
color_formula=image_params.color_formula,
)
timings.append(("Post-process", t.elapsed))
with utils.Timer() as t:
content = utils.reformat(
data, mask, img_format=format, colormap=image_params.color_map
)
timings.append(("Format", t.elapsed))
timings.append(("Format", t.elapsed))
if timings:
headers["X-Server-Timings"] = "; ".join(
["{} - {:0.2f}".format(name, time * 1000) for (name, time) in timings]
)
return ImgResponse(
content, media_type=ImageMimeTypes[format.value].value, headers=headers,
)
@router.get(
r"/point/{lon},{lat}",
responses={200: {"description": "Return a value for a point"}},
)
async def cog_point(
lon: float = Path(..., description="Longitude"),
lat: float = Path(..., description="Latitude"),
url: str = Query(..., description="Cloud Optimized GeoTIFF URL."),
assets: str = Query("", description="comma (,) separated list of asset names."),
expression: Optional[str] = Query(
None,
title="Band Math expression",
description="rio-tiler's band math expression (e.g B1/B2)",
),
bidx: Optional[str] = Query(
None, title="Band indexes", description="comma (',') delimited band indexes",
),
asset_expression: Optional[str] = Query(
None,
title="Band Math expression for assets bands",
description="rio-tiler's band math expression (e.g B1/B2)",
),
):
"""Get Point value for a COG."""
if not expression and not assets:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Must pass Expression or Asset list.",
)
indexes = tuple(int(s) for s in re.findall(r"\d+", bidx)) if bidx else None
timings = []
headers: Dict[str, str] = {}
with utils.Timer() as t:
with STACReader(url) as stac:
values = stac.point(
lon,
lat,
assets=assets,
expression=expression,
indexes=indexes,
asset_expression=asset_expression,
)
timings.append(("Read", t.elapsed))
if timings:
headers["X-Server-Timings"] = "; ".join(
["{} - {:0.2f}".format(name, time * 1000) for (name, time) in timings]
)
return {"coordinates": [lon, lat], "values": values}
@router.get(
"/tilejson.json",
response_model=TileJSON,
responses={200: {"description": "Return a tilejson"}},
response_model_exclude_none=True,
)
@router.get(
"/{TileMatrixSetId}/tilejson.json",
response_model=TileJSON,
responses={200: {"description": "Return a tilejson"}},
response_model_exclude_none=True,
)
async def stac_tilejson(
request: Request,
response: Response,
TileMatrixSetId: TileMatrixSetNames = Query(
TileMatrixSetNames.WebMercatorQuad, # type: ignore
description="TileMatrixSet Name (default: 'WebMercatorQuad')",
),
url: str = Query(..., description="STAC Item URL."),
assets: str = Query("", description="comma (,) separated list of asset names."),
expression: Optional[str] = Query(
None,
title="Band Math expression",
description="rio-tiler's band math expression (e.g B1/B2)",
),
tile_format: Optional[ImageType] = Query(
None, description="Output image type. Default is auto."
),
tile_scale: int = Query(
1, gt=0, lt=4, description="Tile size scale. 1=256x256, 2=512x512..."
),
minzoom: Optional[int] = Query(None, description="Overwrite default minzoom."),
maxzoom: Optional[int] = Query(None, description="Overwrite default maxzoom."),
):
"""Return a TileJSON document for a STAC item."""
scheme = request.url.scheme
host = request.headers["host"]
kwargs = dict(request.query_params)
kwargs.pop("tile_format", None)
kwargs.pop("tile_scale", None)
kwargs.pop("TileMatrixSetId", None)
kwargs.pop("minzoom", None)
kwargs.pop("maxzoom", None)
if not expression and not assets:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Expression or Assets HAVE to be set in the queryString.",
)
qs = urlencode(list(kwargs.items()))
if tile_format:
tile_url = f"{scheme}://{host}/stac/tiles/{TileMatrixSetId.name}/{{z}}/{{x}}/{{y}}@{tile_scale}x.{tile_format}?{qs}"
else:
tile_url = f"{scheme}://{host}/stac/tiles/{TileMatrixSetId.name}/{{z}}/{{x}}/{{y}}@{tile_scale}x?{qs}"
tms = morecantile.tms.get(TileMatrixSetId.name)
with STACReader(url, tms=tms) as stac:
center = list(stac.center)
if minzoom:
center[-1] = minzoom
tjson = {
"bounds": stac.bounds,
"center": tuple(center),
"minzoom": minzoom or stac.minzoom,
"maxzoom": maxzoom or stac.maxzoom,
"name": os.path.basename(url),
"tiles": [tile_url],
}
response.headers["Cache-Control"] = "max-age=3600"
return tjson
@router.get("/viewer", response_class=HTMLResponse, tags=["Webpage"])
def stac_viewer(request: Request, template=Depends(web_template)):
"""SpatioTemporal Asset Catalog Viewer."""
return template(request, "stac_index.html", "stac_tilejson", "stac_info")
| StarcoderdataPython |
11254427 | # -*- coding: utf-8 -*-
__version__ = '1.0'
__title__ = 'webbot'
__description__ = 'webbot'
__url__ = 'https://github.com/joelee2012/webbot'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2021 Joe Lee'
__documentation__ = ''
| StarcoderdataPython |
6568914 | from config import BOTNAME,BOTTOKEN,DEBUG,PROXY,PY
from api import GetUserInfo,ChangeUserInfo
import requests
reqChange=requests.Session()
reqSender=requests.Session()
reqUpdater=requests.Session()
reqCallback=requests.Session()
req=requests.Session()
if len(PROXY)>0:
reqChange.proxies={"http":PROXY,"https":PROXY}
reqSender.proxies={"http":PROXY,"https":PROXY}
reqUpdater.proxies={"http":PROXY,"https":PROXY}
reqCallback.proxies={"http":PROXY,"https":PROXY}
req.proxies={"http":PROXY,"https":PROXY}
import time
import threading
import json
import re
if DEBUG:
import sys
import traceback
HELPMESSAGE='''帮助
/help 帮助
/blackjack 21点
/horse 赛马
/dice 骰子
/bet 金额|百分比|sh 下注(不支持小数)
例: /bet 10 或 /bet 10%
'''
def MakeRequest(method,data="",robj=req):
if DEBUG:
print("Make:"+method)
if data=="":
r=robj.get("https://api.telegram.org/bot"+BOTTOKEN+"/"+method)
else:
r=robj.post("https://api.telegram.org/bot"+BOTTOKEN+"/"+method,data=data)
resans=json.loads(r.text)
if resans["ok"]!=True:
logger.error(r.text)
if DEBUG:
print("Fin:"+r.text)
return resans
import logging
logging.basicConfig(level = logging.ERROR,format = '[%(asctime)s][%(levelname)s]: %(message)s')
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
# Telegram Bot
ChangeQueue=[]
ChangeLock=threading.Lock()
def ServiceChange():
global ChangeQueue
try:
f=open("BotUpdateID")
LastID=int(f.read())
f.close()
except:
LastID=0
while True:
try:
res=MakeRequest("getUpdates",{"offset":str(LastID+1),"allowed_updates":"[\"message\",\"callback_query\"]","timeout":10},robj=reqChange)
if DEBUG:
print("MKREQ ",res)
if res["ok"]==True:
#print(res)
lis=res["result"]
ChangeLock.acquire()
ChangeQueue=ChangeQueue+lis
ChangeLock.release()
if len(lis)>0:
LastID=lis[-1]["update_id"]
f=open("BotUpdateID","w")
f.write(str(LastID))
f.close()
except:
logger.error("Change")
ThErr()
time.sleep(0.2)
SenderQueue=[]
SenderLock=threading.Lock()
SendReqIDMap={}
SendReqIDTot=-1
def ServiceSender():#TODO: rate limit
global SenderQueue,SendReqIDMap
while True:
try:
sttime=time.time()
SenderLock.acquire()
todolis=SenderQueue*1
SenderQueue.clear()
SenderLock.release()
for it in todolis:
resarr={"text":it["text"],"chat_id":it["chat_id"]}
if "reply_markup" in it:
resarr["reply_markup"]=json.dumps(it["reply_markup"])
if "reply_to_message_id" in it:
resarr["reply_to_message_id"]=it["reply_to_message_id"]
#print(resarr)
ret=MakeRequest("sendMessage",resarr,robj=reqSender)
if "reqid" in it:
SendReqIDMap[it["reqid"]]=ret["result"]["message_id"]
if DEBUG:
print("SEND ",resarr)
edtime=time.time()
net=max(0.1-edtime+sttime,0)
time.sleep(net)
except:
logger.error("Sender")
ThErr()
time.sleep(0.1)
UpdaterQueue=[]
UpdaterLock=threading.Lock()
def ServiceUpdater():#TODO: merge & rate limit
global UpdaterQueue
while True:
try:
sttime=time.time()
UpdaterLock.acquire()
todolis=UpdaterQueue*1
UpdaterQueue.clear()
UpdaterLock.release()
for it in todolis:
resarr={"text":it["text"],"chat_id":it["chat_id"],"message_id":it["message_id"]}
if "reply_markup" in it:
resarr["reply_markup"]=json.dumps(it["reply_markup"])
MakeRequest("editMessageText",resarr,robj=reqUpdater)
edtime=time.time()
net=max(0.1-edtime+sttime,0)
time.sleep(net)
except:
logger.error("Updater")
ThErr()
time.sleep(0.1)
CallbackQueue=[]
CallbackLock=threading.Lock()
def ServiceCallback():#TODO: merge
global CallbackQueue
while True:
try:
sttime=time.time()
CallbackLock.acquire()
todolis=CallbackQueue*1
CallbackQueue.clear()
CallbackLock.release()
for it in todolis:
resarr={"callback_query_id":it["id"]}
if "text" in it:
resarr["text"]=it["text"]
if "alert" in it:
resarr["show_alert"]=it["alert"]
MakeRequest("answerCallbackQuery",resarr,robj=reqUpdater)
edtime=time.time()
net=max(0.1-edtime+sttime,0)
time.sleep(net)
except:
logger.error("Callback")
ThErr()
time.sleep(0.1)
def GetChange():
global ChangeQueue
ChangeLock.acquire()
ret=ChangeQueue*1
ChangeQueue.clear()
ChangeLock.release()
return ret
def SendMessage(text,chatid,reply=0,button={},reqid=0):
global SenderQueue
obj={"text":text,"chat_id":chatid}
if len(button)!=0:
obj["reply_markup"]=button
if reply!=0:
obj["reply_to_message_id"]=reply
if reqid!=0:
obj["reqid"]=reqid
SenderLock.acquire()
SenderQueue.append(obj)
SenderLock.release()
def UpdateMessage(text,chatid,messid,button={}):
global UpdaterQueue
obj={"text":text,"chat_id":chatid,"message_id":messid}
if len(button)!=0:
obj["reply_markup"]=button
UpdaterLock.acquire()
flag=False
for i in UpdaterQueue:
if i["chat_id"]==chatid and i["message_id"]==messid:
flag=True
i["text"]=text
if len(button)!=0:
i["reply_markup"]=button
elif "reply_markup" in i:
i.pop("reply_markup")
flag=True
if not flag:
UpdaterQueue.append(obj)
UpdaterLock.release()
def AnswerCallback(callbackid,text="",isalert=False):
global CallbackQueue
obj={"id":callbackid}
if len(text)!=0:
obj["text"]=text
if isalert:
obj["alert"]=True
CallbackLock.acquire()
CallbackQueue.append(obj)
CallbackLock.release()
ObjThreadServiceChange=threading.Thread(target=ServiceChange)
ObjThreadServiceChange.start()
ObjThreadServiceSender=threading.Thread(target=ServiceSender)
ObjThreadServiceSender.start()
ObjThreadServiceUpdater=threading.Thread(target=ServiceUpdater)
ObjThreadServiceUpdater.start()
ObjThreadServiceCallback=threading.Thread(target=ServiceCallback)
ObjThreadServiceCallback.start()
# Bot end
# Game Obj
class GameDiceObj(object):
def __init__(self,userlist):
self.player=userlist
self.playerst={}
for i in userlist:
self.playerst[i]=0#0:pending 1:xiao 2:da 3:wei
self.NeedUpdate=True
self.NeedEnd=False
self.lastime=time.time()
def GenMess(self):
info=["?","小","大","围"]
mess="骰子"
for i in self.player:
mess+="\n"+self.player[i]["name"]+"("+str(self.player[i]["money"])+"): "+info[self.playerst[i]]
return mess
def GenButton(self,chatid):
return [[{"text":"小","callback_data":str(chatid)+"+s"},
{"text":"围","callback_data":str(chatid)+"+m"},
{"text":"大","callback_data":str(chatid)+"+l"}
],
[{"text":"强制结束","callback_data":str(chatid)+"+E"}]]
def UserCmd(self,uid,action):
if action=="E":
if time.time()-self.lastime>15:
self.NeedEnd=True
return
if self.NeedEnd:
return
if not uid in self.player:
return
if self.playerst[uid]!=0:
return
self.NeedUpdate=True
if action=="s":
self.playerst[uid]=1
elif action=="l":
self.playerst[uid]=2
else:
self.playerst[uid]=3
self.lastime=time.time()
return
def NextTick(self):
sfg=True
for i in self.playerst:
if self.playerst[i]==0:
sfg=False
self.NeedEnd|=sfg
def EndGame(self):
info=["?","小","大","围"]
res=[]
rdl=__import__("random")
res.append(rdl.randint(1,6))
res.append(rdl.randint(1,6))
res.append(rdl.randint(1,6))
typ=1
if res[0]==res[1] and res[1]==res[2]:
typ=3
elif sum(res)>=11:
typ=2
mess="骰子"
mess+="\n🎲"+str(res[0])+" 🎲"+str(res[1])+" 🎲"+str(res[2])
user={}
for i in self.player:
ob={"mess":info[self.playerst[i]]}
if self.playerst[i]==0:
ob["money"]=self.player[i]["money"]
elif self.playerst[i]==typ:
ob["money"]=self.player[i]["money"]*2
if typ==3:
ob["money"]=self.player[i]["money"]*24
else:
ob["money"]=0
user[i]=ob
return (mess,user)
class GameHorseObj(object):
def __init__(self,userlist):
self.player=userlist
self.playerst={}
self.horsest={}#(dis,st)
for i in userlist:
self.playerst[i]=0#ma id
self.NeedUpdate=True
self.NeedEnd=False
self.lastime=time.time()
self.status=0#0xuanma 1jiesuan
self.NeedStart=False
self.rdlib=__import__("random").SystemRandom()
self.sm={}
def GenMess(self):
info="?------"
if self.status==0:
mess="选马"
for i in self.player:
mess+="\n"+self.player[i]["name"]+"("+str(self.player[i]["money"])+"): 🐴"+info[self.playerst[i]]
return mess
else:
mst=["🏇","☠️","🐎"]
mess="赛马"
for i in self.horsest:
tx=" "*max(50-self.horsest[i][0],0)
tx+=mst[self.horsest[i][1]]
tx+=str(i)
mess+="\n"+tx
return mess
def GenButton(self,chatid):
if self.status==0:
return [[{"text":"🐴1","callback_data":str(chatid)+"+1"},
{"text":"🐴2","callback_data":str(chatid)+"+2"},
{"text":"🐴3","callback_data":str(chatid)+"+3"}
],
[{"text":"🐴4","callback_data":str(chatid)+"+4"},
{"text":"🐴5","callback_data":str(chatid)+"+5"},
{"text":"🐴6","callback_data":str(chatid)+"+6"}
],
[{"text":"强制开始","callback_data":str(chatid)+"+E"}]]
else:
return [[{"text":"火箭加速","callback_data":str(chatid)+"+H"},
{"text":"快马加鞭","callback_data":str(chatid)+"+B"}
]]
def UserCmd(self,uid,action):
mst="马死摔"
if action=="E":
if time.time()-self.lastime>15:
self.NeedStart=True
return
if not uid in self.player:
return
if self.status==0:
if self.playerst[uid]!=0:
return
if not re.match("^[1-6]$",action):
return
self.NeedUpdate=True
self.playerst[uid]=int(action)
self.lastime=time.time()
fafa=True
for i in self.playerst:
if self.playerst[i]==0:
fafa=False
self.NeedStart|=fafa
else:
maid=self.playerst[uid]
if maid==0:
return
if self.horsest[maid][1]!=0:
return ("你🐴"+mst[self.horsest[maid][1]]+"了",True)
if action=='H':
dis=min(50,16+self.horsest[maid][0])
ff=self.rdlib.randint(0,1)
gst=0
if ff==1:
gst=1
self.horsest[maid]=(dis,gst)
if action=='B':
dis=min(50,8+self.horsest[maid][0])
ff=self.rdlib.randint(0,2)
gst=0
if ff==2:
gst=2
self.horsest[maid]=(dis,gst)
return
return
def NextTick(self):
if self.status==0:
if self.NeedStart==False:
return
for i in range(1,7):
self.horsest[i]=(0,0)
self.sm[i]=0
self.status=1
return
else:
self.NeedUpdate=True
for i in self.horsest:
if self.horsest[i][1]==0:
dis=self.rdlib.randint(3,6)
dis=min(50,dis+self.horsest[i][0])
self.horsest[i]=(dis,self.horsest[i][1])
self.sm[i]=0
if dis==50:
self.NeedEnd=True
elif self.horsest[i][1]==2:
self.sm[i]+=1
if self.sm[i]==5:
self.horsest[i]=(self.horsest[i][0],0)
return
def EndGame(self):
mess="赛马"
mst=["🏇","☠️","🐎"]
info="?123456"
for i in self.horsest:
tx=" "*max(50-self.horsest[i][0],0)
tx+=mst[self.horsest[i][1]]
tx+=str(i)
mess+="\n"+tx
user={}
for i in self.player:
ob={"mess":"🐴"+info[self.playerst[i]]}
if self.playerst[i]==0:
ob["money"]=self.player[i]["money"]
elif self.horsest[self.playerst[i]][0]==50 and self.horsest[self.playerst[i]][1]==0:
ob["money"]=self.player[i]["money"]*2
else:
ob["money"]=0
user[i]=ob
return (mess,user)
class GameBlackJackObj(object):
def __init__(self,userlist):
self.vall=[0,1,2,3,4,5,6,7,8,9,10,10,10,10]
self.Redst=["x","A","2","3","4","5","6","7","8","9","10","J","Q","K"]
self.player=userlist
self.playerst={}
self.playerok={}
self.NeedUpdate=True
self.NeedEnd=False
self.lastime=time.time()
self.rdlib=__import__("random").SystemRandom()
self.zjst=[self.rdlib.randint(1,13),self.rdlib.randint(1,13)]
while self.cal(self.zjst)[1]<17:
self.zjst.append(self.rdlib.randint(1,13))
for i in userlist:
self.playerst[i]=[self.rdlib.randint(1,13),self.rdlib.randint(1,13)]
self.playerok[i]=0
if self.cal(self.playerst[i])[1]==21:
self.playerok[i]=2
def cal(self,arr):
ret=[0,0]
for i in arr:
ret[1]+=self.vall[i]
if i==1:
ret[0]+=1
if ret[1]<=11 and ret[0]>0:
ret[1]+=10
return tuple(ret)
def arr2str(self,arr):
st=""
for i in arr:
st+=self.Redst[i]+" "
return st
def GenMess(self):
mess="21点"
sta=["未完成","已完成","黑杰克","爆炸"]
mess+="\n庄家: "+self.Redst[self.zjst[0]]+" ?"
for i in self.player:
mess+="\n"+self.player[i]["name"]+"("+str(self.player[i]["money"])+"): "+self.arr2str(self.playerst[i])+sta[self.playerok[i]]
return mess
def GenButton(self,chatid):
return [[{"text":"要牌","callback_data":str(chatid)+"+Y"},
{"text":"完成","callback_data":str(chatid)+"+N"}
],
[{"text":"强制结束","callback_data":str(chatid)+"+E"}]]
def UserCmd(self,uid,action):
if action=="E":
if time.time()-self.lastime>15:
self.NeedEnd=True
return
if self.NeedEnd:
return
if not uid in self.player:
return
if self.playerok[uid]!=0:
return
if action=='Y':
self.playerst[uid].append(self.rdlib.randint(1,13))
cc=self.cal(self.playerst[uid])
if cc[1]>21:
self.playerok[uid]=3
if action=='N':
self.playerok[uid]=1
self.NeedUpdate=True
self.lastime=time.time()
return
def NextTick(self):
nmsl=True
for i in self.playerok:
if self.playerok[i]==0:
nmsl=False
self.NeedEnd|=nmsl
return
def EndGame(self):
mess="21点"
sta=["失败","胜利","黑杰克","爆炸","平局"]
mess+="\n庄家: "+self.arr2str(self.zjst)
user={}
zjd=self.cal(self.zjst)
for i in self.player:
ob={"mess":self.arr2str(self.playerst[i])}
nmsl=self.playerok[i]
if self.playerok[i]==3:
ob["money"]=0
elif self.playerok[i]==2:
ob["money"]=int(self.player[i]["money"]*2.5)
else:
if zjd[1]>21 or self.cal(self.playerst[i])[1]>zjd[1]:
ob["money"]=self.player[i]["money"]*2
nmsl=1
elif self.cal(self.playerst[i])[1]==zjd[1]:
ob["money"]=self.player[i]["money"]
nmsl=4
else:
ob["money"]=0
nmsl=0
ob["mess"]+=sta[nmsl]
user[i]=ob
return (mess,user)
GameObjList={
"dice":{"cmd":"/dice","obj":GameDiceObj,"name":"骰子"},
"horse":{"cmd":"/horse","obj":GameHorseObj,"name":"赛马"},
"blackjack":{"cmd":"/blackjack","obj":GameBlackJackObj,"name":"21点"}
}
Cmd2Game={}
for i in GameObjList:
Cmd2Game[GameObjList[i]["cmd"]]=i
# Game end
def GenBetButton(chatid):
return [[{"text":"5","callback_data":str(chatid)+"+*X5"},
{"text":"10","callback_data":str(chatid)+"+*X10"},
{"text":"50","callback_data":str(chatid)+"+*X50"},
{"text":"50%","callback_data":str(chatid)+"+*X50%"},
{"text":"sh","callback_data":str(chatid)+"+*Xsh"}
],
[{"text":"Start","callback_data":str(chatid)+"+*S"},{"text":"余额","callback_data":str(chatid)+"+*M"}]]
AliveGame={}
def DoBet(userobj,chatid,st):
uid=userobj["id"]
global AliveGame,UserInfo,SendReqIDMap
if st=="sh":
st=str(GetUserInfo(uid))
if re.match("(^[1-9][0-9]{0,1}%$|^100%$)",st):
fa=int(int(st[:-1])/100.0*GetUserInfo(uid))
st=str(fa)
if not re.match("^[1-9][0-9]*$",st):
return (-1,"无法识别投注金额")
if not chatid in AliveGame:
return (-1,"无进行中游戏")
if not AliveGame[chatid]["status"]==0:
return (-1,"游戏状态错误")
mon=int(st)
if mon>GetUserInfo(uid):
return (-1,"余额不足")
ChangeUserInfo(uid,-mon)
if not uid in AliveGame[chatid]["player"]:
AliveGame[chatid]["player"][uid]={"money":0,"name":userobj["first_name"]}
AliveGame[chatid]["player"][uid]["money"]+=mon
if AliveGame[chatid]["messid"]<0:
sbsb=AliveGame[chatid]["messid"]
if not sbsb in SendReqIDMap:
return (-1,"消息未发出")
AliveGame[chatid]["messid"]=SendReqIDMap[AliveGame[chatid]["messid"]]
SendReqIDMap.pop(sbsb)
typ=AliveGame[chatid]["typ"]
mess=GameObjList[typ]["name"]+"\n玩家"
for i in AliveGame[chatid]["player"]:
mess+="\n"+AliveGame[chatid]["player"][i]["name"]+": "+str(AliveGame[chatid]["player"][i]["money"])+"("+str(GetUserInfo(i))+")"
UpdateMessage(mess,chatid,AliveGame[chatid]["messid"],button={"inline_keyboard":GenBetButton(chatid)})
return (0,"下注成功")
def StartGame(chatid,typ):
global AliveGame,SendReqIDTot
if chatid in AliveGame:
if AliveGame[chatid]["messid"]<0:
sbsb=AliveGame[chatid]["messid"]
if sbsb in SendReqIDMap:
AliveGame[chatid]["messid"]=SendReqIDMap[AliveGame[chatid]["messid"]]
SendReqIDMap.pop(sbsb)
SendMessage("上一局游戏还未结束 无法新建",chatid,reply=AliveGame[chatid]["messid"])
return
obj={"typ":typ,"player":{},"status":0,"messid":SendReqIDTot}
AliveGame[chatid]=obj
SendMessage(GameObjList[typ]["name"],chatid,button={"inline_keyboard":GenBetButton(chatid)},reqid=SendReqIDTot)
SendReqIDTot-=1
return
def EndGame(chatid):
global AliveGame
(mess,chang)=AliveGame[chatid]["game"].EndGame()
AliveGame[chatid]["game"].NeedUpdate=False
#player
for i in chang:
ChangeUserInfo(i,chang[i]["money"])
usm=GetUserInfo(i)
mess+="\n"+AliveGame[chatid]["player"][i]["name"]+"("+str(AliveGame[chatid]["player"][i]["money"])+"): "+chang[i]["mess"]+" +"+str(chang[i]["money"])+"("+str(usm)+")"
UpdateMessage(mess,chatid,AliveGame[chatid]["messid"])
return
def UpdateGame(chatid):
AliveGame[chatid]["game"].NeedUpdate=False
mess=AliveGame[chatid]["game"].GenMess()
but={"inline_keyboard":AliveGame[chatid]["game"].GenButton(chatid)}
UpdateMessage(mess,chatid,AliveGame[chatid]["messid"],button=but)
return
def DoCommand(obj):
if not "text" in obj:
return
txt=obj["text"]
if len(txt)<1 or txt[0]!='/':
return
cmdall=txt.split(' ')
cmd=cmdall[0]
if cmd.find("@")!=-1:
botname=cmd[cmd.find("@"):]
if botname!="@"+BOTNAME:
return
cmd=cmd.replace("@"+BOTNAME,"")
if cmd=="/help" or cmd=="/start":
SendMessage(HELPMESSAGE,obj["chat"]["id"])
if cmd in Cmd2Game:
StartGame(obj["chat"]["id"],Cmd2Game[cmd])
if cmd=="/bet":
if len(cmdall)>1:
res=DoBet(obj["from"],obj["chat"]["id"],cmdall[1])
if res[0]==0:
retx="成功 "
else:
retx="错误 "
retx+=res[1]
SendMessage(retx,obj["chat"]["id"],reply=obj["message_id"])
if cmd=='/del':
global AliveGame
if obj["chat"]["id"] in AliveGame:
AliveGame.pop(obj["chat"]["id"])
SendMessage("已重置",obj["chat"]["id"])
if PY:
if cmd=='/py':
mm=__import__("random").randint(-100,1000)
GetUserInfo(obj["from"]["id"])
ChangeUserInfo(obj["from"]["id"],mm)
SendMessage("pyed: "+str(mm),obj["chat"]["id"],reply=obj["message_id"])
return
def DoButton(obj):
global AliveGame
if (not "data" in obj) or len(obj["data"])<1:
return
dat=obj["data"].split('+')
if len(dat)<2 or (not re.match("^[-]*[1-9][0-9]*$",dat[0])):
AnswerCallback(obj["id"],"非法请求")
return
cid=int(dat[0])
if not cid in AliveGame:
AnswerCallback(obj["id"],"无进行中的游戏")
return
txt=dat[1]
if AliveGame[cid]["status"]==0:
if txt[0]!='*':
return
if txt[1]=='X':
res=DoBet(obj["from"],cid,txt[2:])
sta=False
if res[0]==0:
retx="成功 "
else:
retx="错误 "
sta=True
retx+=res[1]
AnswerCallback(obj["id"],retx,isalert=sta)
elif txt[1]=='M':
AnswerCallback(obj["id"],"余额: "+str(GetUserInfo(obj["from"]["id"])),isalert=True)
elif txt[1]=='S':
if not AliveGame[cid]["player"]:
AnswerCallback(obj["id"],"没人上车")
return
AliveGame[cid]["game"]=GameObjList[AliveGame[cid]["typ"]]["obj"](AliveGame[cid]["player"])
AliveGame[cid]["status"]=1
AnswerCallback(obj["id"])
else:
ret=AliveGame[cid]["game"].UserCmd(obj["from"]["id"],txt)
if ret is None:
AnswerCallback(obj["id"])
else:
AnswerCallback(obj["id"],ret[0],ret[1])
return
def DoChange(cz):
if "message" in cz:
DoCommand(cz["message"])
elif "callback_query" in cz:
DoButton(cz["callback_query"])
return
def ThErr():
if DEBUG:
ex_type, ex_val, ex_stack = sys.exc_info()
print(ex_type)
print(ex_val)
for stack in traceback.extract_tb(ex_stack):
print(stack)
print(ex_type,file=sys.stderr)
print(ex_val,file=sys.stderr)
for stack in traceback.extract_tb(ex_stack):
print(stack,file=sys.stderr)
#main
def main():
while True:
sttime=time.time()
ch=GetChange()
#print(ch)
for cz in ch:
DoChange(cz)
nend=[]
for i in AliveGame:
if "game" in AliveGame[i]:
try:
AliveGame[i]["game"].NextTick()
if AliveGame[i]["game"].NeedEnd:
EndGame(i)
nend.append(i)
if AliveGame[i]["game"].NeedUpdate:
UpdateGame(i)
except:
logger.error("Update Game")
if DEBUG:
ThErr()
for i in nend:
AliveGame.pop(i)
edtime=time.time()
if DEBUG:
print(edtime-sttime)
net=max(2-edtime+sttime,0)
time.sleep(net)
try:
main()
except:
ThErr()
exit(0)
| StarcoderdataPython |
237404 | <reponame>jasondelaat/ticklish_ui
from ticklish_ui import *
app = Application(
'Progressbar',
[Progressbar().options(name='pb1')],
[Progressbar('indeterminate').options(name='pb2')]
)
app.nametowidget('.row1.pb1').start(1)
app.nametowidget('.row2.pb2').start(1)
app.mainloop()
| StarcoderdataPython |
3530991 | <reponame>rizkhita/Algorithms
# i used libraries that need to be installed first
import pandas as pd
# read string sebagai file
from io import StringIO
import string
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
from string import punctuation
from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.feature_extraction import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
def baca_csv():
dframe = pd.read_csv('taharah_intent.csv')
return dframe
def convert_to_tidf():
y = baca_csv()
y['id_label'] = y['labels'].factorize()[0]
id_label_df = y[['labels','id_label']].drop_duplicates().sort_values('id_label')
label_ke_id = dict(id_label_df.values)
id_ke_label = dict(id_label_df[['id_label', 'labels']].values)
return y
def mnb():
factory = StopWordRemoverFactory()
stop_word_list = factory.get_stop_words()
stop = stop_word_list + list(punctuation)
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2),
stop_words=stop)
df = convert_to_tidf()
X_train, X_test, y_train, y_test = train_test_split(df['questions'], df['labels'], random_state=0)
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
feed = MultinomialNB().fit(X_train_tfidf, y_train)
return feed, count_vect
#X_test.iloc[0]
def predict(question):
feed, count_vect = mnb()
intent = feed.predict(count_vect.transform([question]))
intent = str(intent).strip("['']")
return intent
question=input("Masukan pertanyaan : ")
x=predict(question)
intent=str(x).strip("['']")
print("Intent predicted : "+format(x))
| StarcoderdataPython |
1805224 | <gh_stars>1-10
import numpy as np
import attitude_utils as attu
from triangle_ray_intersect import Triangle_ray_intersect
class Landing_icgen(object):
def __init__(self,
attitude_parameterization=None,
position_r=(900,1100) , position_theta=(0, np.pi/2), position_phi=(-np.pi,np.pi),
velocity_x=(-0.10,0.10), velocity_y=(-0.10,0.10), velocity_z=(-0.10,0.10),
attitude_error=(0,np.pi/16),
asteroid_axis_low=(300,300,300),
asteroid_axis_high=(600,600,600),
lander_wll=(0.0,0.0,0.0),
lander_wul=(0.0,0.0,0.0),
min_mass=450, max_mass=500,
p_engine_fail=0.0,
engine_fail_scale=(0.5,1.0),
debug_fail=False,
debug=False,
noise_u=np.zeros(3), noise_sd=np.zeros(3), l_offset=0.0,
position=None,
velocity=None,
attitude=None,
p_scale=(0.0, 0.05),
com_range=(0.0,0.0),
generate_new_asteroid=True,
inertia_uncertainty_diag=0.0, inertia_uncertainty_offdiag=0.0):
self.p_scale = p_scale
self.position_r = position_r
self.position_theta = position_theta
self.position_phi = position_phi
self.velocity_x = velocity_x
self.velocity_y = velocity_y
self.velocity_z = velocity_z
self.com_range = com_range
self.asteroid_axis_low = asteroid_axis_low
self.asteroid_axis_high = asteroid_axis_high
self.generate_new_asteroid = generate_new_asteroid
self.attitude_parameterization=attitude_parameterization
self.attitude_error=attitude_error
self.lander_wul=lander_wul
self.lander_wll=lander_wll
self.debug_fail = debug_fail
self.p_engine_fail = p_engine_fail
self.engine_fail_scale = engine_fail_scale
self.min_mass = min_mass
self.max_mass = max_mass
self.noise_u = noise_u
self.noise_sd = noise_sd
self.l_offset = l_offset
self.inertia_uncertainty_diag = inertia_uncertainty_diag
self.inertia_uncertainty_offdiag = inertia_uncertainty_offdiag
self.position = position
self.velocity = velocity
self.attitude = attitude
self.max_pointing_error = 0.0
self.debug = debug
self.tri = Triangle_ray_intersect()
def show(self):
print('Landing_icgen:')
def get_radius(self, scene, r, theta, phi):
tri_v0, tri_v1, tri_v2 = scene.get_triangle_vertices()
tri_v0 = np.vstack(tri_v0)
tri_v1 = np.vstack(tri_v1)
tri_v2 = np.vstack(tri_v2)
extended_r = r + 10000.
extended_rx = extended_r * np.sin(theta) * np.cos(phi)
extended_ry = extended_r * np.sin(theta) * np.sin(phi)
extended_rz = extended_r * np.cos(theta)
extended_pos = np.asarray([extended_rx, extended_ry, extended_rz])
extended_pos = np.expand_dims(extended_pos, axis=0)
extended_dvec_p = -extended_pos / np.linalg.norm(extended_pos)
intersections, distances, indices, hit_cnts = self.tri.get_intersections(extended_pos, extended_dvec_p, tri_v0, tri_v1, tri_v2)
radius = np.linalg.norm(extended_pos)-distances[0]
return radius
def set_ic(self , lander, dynamics):
lander.asteroid.reset()
# ENGINE FAILURE
assert lander.thruster_model.fail is not None
lander.thruster_model.fail = np.random.rand() < self.p_engine_fail
lander.thruster_model.fail_idx = np.random.randint(low=0,high=lander.thruster_model.num_thrusters)
lander.thruster_model.fail_scale = np.random.uniform(low=self.engine_fail_scale[0], high=self.engine_fail_scale[1])
if self.debug_fail:
print('Engine Fail? : ', self.p_engine_fail, lander.thruster_model.fail, lander.thruster_model.fail_idx, lander.thruster_model.fail_scale)
# COM variation
lander.thruster_model.com = np.random.uniform(self.com_range[0], self.com_range[1])
dynamics.noise_u = np.random.uniform(low=-self.noise_u, high=self.noise_u,size=3)
dynamics.noise_sd = self.noise_sd
lander.init_mass = np.random.uniform(low=self.min_mass, high=self.max_mass)
theta = np.random.uniform(low=self.position_theta[0], high=self.position_theta[1])
phi = np.random.uniform(low=self.position_phi[0], high=self.position_phi[1])
r = np.random.uniform(low=self.position_r[0], high=self.position_r[1])
vx = np.random.uniform(low=self.velocity_x[0], high=self.velocity_x[1])
vy = np.random.uniform(low=self.velocity_y[0], high=self.velocity_y[1])
vz = np.random.uniform(low=self.velocity_z[0], high=self.velocity_z[1])
### generate new asteroid, find where spacecraft LOS would intersect ellipsoid surface
if self.generate_new_asteroid:
r_limit_p = np.random.uniform(low=self.asteroid_axis_low,high=self.asteroid_axis_high)
r_limit_n = np.random.uniform(low=self.asteroid_axis_low,high=self.asteroid_axis_high)
ps = np.random.uniform(low=self.p_scale[0],high=self.p_scale[1])
lander.scene.perturb_axes(p_scale=ps,r_scale_p=(r_limit_p[0],r_limit_p[1],r_limit_p[2]), r_scale_n=(r_limit_n[0],r_limit_n[1],r_limit_n[2]))
"""
make initial distance with respect to asteroid surface given direction vector
pointing from spacecraft to asteroid center
i.e., add radius to initial distance
"""
#print(lander.scene.get_limits())
asteroid_radius = self.get_radius(lander.scene, r, theta, phi)
r_ext = r + asteroid_radius
lander.state['init_dist'] = r
#print('debug2: ', r, r_ext, asteroid_radius)
rx = r_ext * np.sin(theta) * np.cos(phi)
ry = r_ext * np.sin(theta) * np.sin(phi)
rz = r_ext * np.cos(theta)
pos = np.asarray([rx, ry, rz])
dvec_p = -pos / np.linalg.norm(pos)
lander.target_attitude = attu.make_random_attitude_error(self.attitude_parameterization, (0.,0.) , dvec_p, np.asarray([0.,0.,-1]))
lander.state['attitude'] = attu.make_random_attitude_error(self.attitude_parameterization, self.attitude_error, dvec_p, np.asarray([0.,0.,-1]))
lander.state['position'] = pos
lander.state['velocity'] = np.asarray([vx, vy, vz])
lander.state['attitude_321'] = self.attitude_parameterization.q2Euler321(lander.state['attitude'])
lander.state['w'] = np.random.uniform(low=self.lander_wll, high=self.lander_wul, size=3)
lander.state['thrust'] = np.zeros(3)
lander.state['mass'] = lander.init_mass
if self.position is not None:
lander.state['position'] = self.position
if self.velocity is not None:
lander.state['velocity'] = self.velocity
if self.attitude is not None:
lander.state['attitude'] = self.attitude
if self.debug:
print('debug v: ', 180 / np.pi * np.arccos(np.clip(np.dot(dvec_v, dvec_p),-1,1)), 180 / np.pi * theta_debug)
C = self.attitude_parameterization.q2dcm(lander.state['attitude'])
sensor = np.asarray([0.,0.,-1])
rot_dvec = C.T.dot(sensor)
error = 180/np.pi*np.arccos(np.clip(np.dot(rot_dvec,dvec_p),-1,1))
self.max_pointing_error = np.maximum(error,self.max_pointing_error)
print('debug attitude: ', error, self.max_pointing_error)
it_noise1 = np.random.uniform(low=-self.inertia_uncertainty_offdiag,
high=self.inertia_uncertainty_offdiag,
size=(3,3))
np.fill_diagonal(it_noise1,0.0)
it_noise1 = (it_noise1 + it_noise1.T)/2
it_noise2 = np.diag(np.random.uniform(low=-self.inertia_uncertainty_diag,
high=self.inertia_uncertainty_diag,
size=3))
lander.inertia_tensor = lander.nominal_inertia_tensor + it_noise1 + it_noise2
if self.debug and False:
print(dynamics.g, lander.state['mass'])
print(lander.inertia_tensor)
| StarcoderdataPython |
1689925 | #!/usr/bin/env python
#encoding=utf-8
import sys
import codecs
import json
if len(sys.argv) < 3:
print("Please select input and output file")
sys.exit()
in_file = codecs.open(sys.argv[1], encoding="utf-8")
out_file = codecs.open(sys.argv[2], "w", encoding="utf-8")
for line in in_file:
text = json.loads(line)["text"]
labels = json.loads(line)["labels"]
for tag in labels:
out_file.write(text[tag[0]:tag[1]] + "\t" + tag[2] + "\n")
out_file.write("\n")
| StarcoderdataPython |
1648044 | <reponame>MLH-Fellowship/LarynxCode
class TRParseMode(object):
"""
type | replacement location
------------------------------------------------------
ANY | specs, extras, or defaults
SPEC | specs only
EXTRA | extras only
DEFAULT | defaults only
NOT_SPECS | extras and defaults but not specs
"""
ANY = "<<<ANY>>>"
SPEC = "<<<SPEC>>>"
EXTRA = "<<<EXTRA>>>"
DEFAULT = "<<<DEFAULT>>>"
NOT_SPECS = "<<<NOT_SPECS>>>"
| StarcoderdataPython |
6576971 | <gh_stars>0
from gym.envs.registration import register
from gym_xmanage.xmanage_errors import *
register(
id='xmanageTSC-v0',
entry_point='gym_xmanage.envs:XmanageTSCEnv',
)
register(
id='xmanageCVTSC-v0',
entry_point='gym_xmanage.envs:XmanageCVTSCEnv',
) | StarcoderdataPython |
11363031 | import numpy as np
from ..base.indiv import Individual
################################################################################
# スカラー化関数
################################################################################
class ScalarError(Exception):
pass
def scalar_weighted_sum(indiv, weight, ref_point):
return -np.sum(weight * np.abs(indiv.wvalue - ref_point))
def scalar_chebyshev(indiv:Individual, weight, ref_point):
return scalar_chebyshev_for_minimize(indiv, weight, ref_point)
# return scalar_chebyshev_for_maximize(indiv, weight, ref_point)
def scalar_chebyshev_for_minimize(indiv, weight, ref_point):
if not indiv.evaluated():
raise ScalarError("indiv not evaluated.")
res = -np.max(weight * np.abs(indiv.wvalue - ref_point))
return res
def scalar_chebyshev_for_maximize(indiv, weight, ref_point):
if not indiv.evaluated():
raise ScalarError("indiv not evaluated.")
res = np.min(weight * np.abs(indiv.wvalue - ref_point))
# res = -1.0/np.max(weight * np.abs(indiv.wvalue - ref_point))
return res
def scalar_boundaryintersection(indiv, weight, ref_point):
''' norm(weight) == 1
'''
nweight = weight / np.linalg.norm(weight)
bi_theta = 5.0
d1 = np.abs(np.dot((indiv.wvalue - ref_point), nweight))
d2 = np.linalg.norm(indiv.wvalue - (ref_point - d1 * nweight))
return -(d1 + bi_theta * d2) | StarcoderdataPython |
9707023 | from pathlib import Path
from tempfile import gettempdir
from bets.utils import sys_util
from bets.utils import log
log.init()
FILE_PATH = Path(__file__).absolute()
FILE_NAME = FILE_PATH.name
def test_get_temp_location():
temp_file = Path(sys_util.get_tmp_location(str(FILE_PATH)))
assert temp_file.parent == Path(gettempdir())
assert temp_file.name.startswith("tmp_")
assert temp_file.name.endswith(FILE_NAME)
def test_delete():
tmp_dir = Path(gettempdir()).absolute().joinpath("tmp_dir_for_deletion")
tmp_dir.mkdir()
inner_dirs = ["d1",
"d2",
"d3"]
inner_paths = [tmp_dir.joinpath(d) for d in inner_dirs]
for ip in inner_paths:
ip.mkdir(parents=True, exist_ok=True)
inner_file = ip.joinpath("file.txt")
log.debug(f'writing text to: {str(inner_file)}')
inner_file.write_text("msome_Text", encoding="utf-8")
log.debug(f"created temp dir structure at: {tmp_dir}")
sys_util.delete(str(tmp_dir))
assert not tmp_dir.exists()
def test_copy_to_tmp_location_file():
src_file = Path(__file__).absolute()
dst_file = Path(sys_util.copy_to_tmp(str(src_file)))
src_bytes = src_file.read_bytes()
dst_bytes = dst_file.read_bytes()
assert dst_bytes == src_bytes
| StarcoderdataPython |
8176942 | <reponame>AyumiizZ/Grad_school_work
"""
File name: 5.py
Author: AyumiizZ
Date created: 2020/10/04
Python Version: 3.8.5
About: Find kth smallest element in union set of two sorted arrays problem
"""
from random import randint
from time import sleep
DEBUG = False
def generate_data(n: int, min_data: int = 1, max_data: int = 100):
''' Generate sorted n elements array
Parameters
----------
n : integer
Number of elements in each array
min_data, max_data: integer
All element in the array be like min_data <= each elements <= max_data
'''
if(2*n > max_data-min_data-1):
print("Please change max min data")
exit()
A = []
B = []
for i in [A, B]:
for j in range(n):
random_number = None
while random_number == None or random_number in (A+B):
random_number = randint(min_data, max_data)
i.append(random_number)
return sorted(A), sorted(B)
def find_kth_element(A: list, B: list, k: int):
'''Recursive finding kth smallest element from A UNION B
Parameters
----------
A,B : 1d array
Sorted array
k: int
kth smallest element
'''
if(DEBUG):
print(f"Finding {k}{get_ordinal(k)} smallest element")
print_dataset(A, B)
if(len(A) > len(B)):
A, B = B, A
if(len(A) == 0):
return B[k-1]
if(k == 1):
return min(A[0], B[0])
i = min(len(A), k//2)
j = min(len(B), k//2)
if(A[i-1] > B[i-1]):
B = B[j:]
return find_kth_element(A, B, k-j)
else:
A = A[i:]
return find_kth_element(A, B, k-i)
def get_ordinal(k: int):
'''Return ordinal number prefix'''
if(k in [11, 12, 13] or k % 10 == 0 or k % 10 >= 4):
return 'th'
else:
return ['st', 'nd', 'rd'][(k % 10)-1]
def print_dataset(A, B):
'''Print dataset pretty format'''
print(f"A: {A}")
print(f"B: {B}")
def find_in(A, B):
'''Loop for finding number in dataset'''
try:
print_dataset(A, B)
while True:
k = int(input('K (Ctrl-c to break): '))
if(k > (len(A)*2)):
print("ERROR")
continue
value = find_kth_element(A, B, k)
print(f"Index {k}{get_ordinal(k)} is {value}")
except KeyboardInterrupt:
pass
if __name__ == "__main__":
dataset_size = int(input("Input size of dataset: "))
A, B = generate_data(n=dataset_size)
find_in(A, B)
| StarcoderdataPython |
9606126 | from PreprocessData.all_class_files.Reservation import Reservation
import global_data
class FoodEstablishmentReservation(Reservation):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, bookingTime=None, broker=None, modifiedTime=None, priceCurrency=None, programMembershipUsed=None, provider=None, reservationFor=None, reservationId=None, reservationStatus=None, reservedTicket=None, totalPrice=None, underName=None, endTime=None, partySize=None, startTime=None):
Reservation.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, bookingTime, broker, modifiedTime, priceCurrency, programMembershipUsed, provider, reservationFor, reservationId, reservationStatus, reservedTicket, totalPrice, underName)
self.endTime = endTime
self.partySize = partySize
self.startTime = startTime
def set_endTime(self, endTime):
self.endTime = endTime
def get_endTime(self):
return self.endTime
def set_partySize(self, partySize):
self.partySize = partySize
def get_partySize(self):
return self.partySize
def set_startTime(self, startTime):
self.startTime = startTime
def get_startTime(self):
return self.startTime
def __setattr__(self, key, value_list):
if type(value_list).__name__ == "NoneType" or key == "node_id":
self.__dict__[key] = value_list
return
for value in value_list:
str_value = type(value).__name__
if str_value not in global_data.get_table()[key]:
raise ValueError("非法类型!")
self.__dict__[key] = value_list
| StarcoderdataPython |
11210331 | from django.test import TestCase
from authenticate.models import User
from flight.models import Flight, Seat
class TestFlight(TestCase):
def setUp(self):
self.user = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>#",
date_of_birth="1900-11-19",
username="testuser",
first_name="test",
last_name="user",
gender="m",
location="testlocation",
phone="256799000101",
)
self.flight = Flight.objects.create(
name="test flight",
origin="test origin",
destination="test destination",
departure="2019-07-21T23:07:01.841121Z",
arrival="2019-07-22T23:07:01.841121Z",
aircraft="KLM",
status="ON_TIME",
created_by=self.user,
number="KL8190",
capacity=120,
)
self.seat = Seat.objects.create(
seat_number="A7", flight=self.flight, is_available=True
)
def test_flight(self):
self.assertEqual(Flight.objects.count(), 1)
def test_string_representation(self):
self.assertEqual(str(self.flight), self.flight.name)
def test_flight_seats(self):
self.assertEqual(self.flight.seats.count(), 1)
self.assertEqual(len(self.flight.flight_seats), 1)
def test_seat_string_representation(self):
self.assertEqual(
str(self.seat),
f"{self.flight}_{self.flight.number}_{self.seat.seat_number}",
)
def test_seat(self):
self.assertEqual(Seat.objects.count(), 1)
| StarcoderdataPython |
192636 | # from util_content_hash import content_hash
#from python.common.util_mimetype import get_mimetype, mimetypes, validate_mimetype
#from schemas.schema_aligned_cfs import metadata
#from schemas.schema_floorplan_master_cfs import metadata_schema
# from . import cfs_schema
import copy
import hashlib
import io
import json
import struct
# import arrow
from typing import List
import datetime
import mimetypes
from cfs.cfs_base import CFS_Base
#x=mimetypes.
class CFS_Blob(object):
blob:bytes
def __init__(self, *, name:str, content:bytes, mimetype:str, metadata:dict=None):
assert isinstance(name, str)
assert isinstance(content, bytes)
if mimetype is not None:
assert isinstance(mimetype, str)
if metadata is not None:
assert isinstance(metadata, dict)
self._content=copy.copy(content)
self._name=name
self._sha1=hashlib.sha1()
self._sha1.update(content)
self._sha1=self._sha1.hexdigest()
if mimetype is not None:
self._mimetype=mimetype
else:
self._mimetype= "application/octet-stream"
self._metadata={}
if metadata is not None:
self._metadata=metadata
@property
def size(self):
return len(self._content)
@property
def name(self):
return self._name
@property
def sha1(self):
return self._sha1
@property
def mimetype(self):
return self._mimetype
@property
def metadata(self):
return self._metadata
@property
def content(self):
return self._content
@property
def blob_manifest_entry(self):
return dict(
metadata=self.metadata,
mimetype=self.mimetype,
sha1=self.sha1,
name=self.name,
size=self.size,
)
class CFS_Builder(object):
def __init__(self, metadata={}):
self._blobs=dict()
self._content_bytes=b''
self._cfs_metadata={}
if metadata is not None:
assert isinstance(metadata, dict)
self._cfs_metadata=metadata
@staticmethod
def wrap(cfs:CFS_Base):
assert False, "NOT IMPLEMENTED"
@property
def cfs_metadata(self)->dict:
return self._cfs_metadata
@property
def blob_paths(self)->List[str]:
return list(self._blobs.keys())
def get_bytes(self, name)->bytes:
return copy.copy(self._blobs[name].content) #raises an exception
def build(self)->bytes:
timestamp= datetime.datetime.now() #.isoformat() # arrow.utcnow()
timestamp_str=timestamp.isoformat()
self._cfs_metadata["timestamp"]=timestamp_str
content_block=io.BytesIO()
blob_offsets={}
file_list={}
manifest={}
for name in self._blobs:
blob=self._blobs[name]
if blob._sha1 not in blob_offsets:
offset=content_block.tell()
content_block.write(blob.content)
blob_offsets[blob._sha1]=dict(offset=offset, size=blob.size)
file_list[name]=dict(
sha1=blob._sha1,
offset=blob_offsets[blob._sha1]['offset'],
metadata=blob.metadata,
mimetype=blob.mimetype,
size=blob_offsets[blob._sha1]['size'])
content_sha1=hashlib.sha1()
content_sha1.update(content_block.getvalue())
content_sha1=content_sha1.hexdigest()
manifest=dict()
manifest=dict(timestamp=timestamp_str,metadata=self.cfs_metadata, sha1=content_sha1, blobs=file_list, size=len(content_block.getvalue()))
# cfs_schema.validate(manifest)
manifest_bytes=json.dumps(manifest).encode()
manifest_length=len(manifest_bytes)
cfs_full_content = io.BytesIO()
cfs_full_content.write(struct.pack("I", int(timestamp.timestamp())))
cfs_full_content.write(struct.pack("I", manifest_length))
cfs_full_content.write(manifest_bytes)
cfs_full_content.write(content_block.getvalue())
outer_sha1=hashlib.sha1()
outer_sha1.update(cfs_full_content.getvalue())
output_str = io.BytesIO()
output_str.write(b"CFS")
output_str.write(outer_sha1.digest())
output_str.write(cfs_full_content.getvalue())
return output_str.getvalue()
def add(self, *, blob:CFS_Blob):
self._content_bytes=b'' # blow away any file structure if it exists
self._blobs[blob.name]=blob
def add_blob(self, name:str, val:bytes, metadata:dict={}, mimetype:str=None):
assert isinstance(val, bytes)
assert isinstance(name, str)
assert isinstance(metadata, dict)
if mimetype is None:
mimetype="/octet-stream"
#validate_mimetype(mimetype) # raises a ValueError Exception
self._content_bytes=b'' # blow away any file structure if it exists
self._blobs[name]=CFS_Blob(name=name, content=val, mimetype=mimetype, metadata=metadata)
return self
def merge(self, rhs:'CFS_Builder')->'CFS_Builder':
# merging consists of merging over all of the blobs in the rhs file system.
# exisiting blobs with the same name will be overwritten
# file system metadata from the rhs file will not be copied over
for blob_path in rhs._blobs:
rhs_blob=rhs._blobs[blob_path]
assert isinstance(rhs_blob, CFS_Blob)
self._blobs[rhs_blob]=rhs_blob
@property
def blob_manifest(self):
r={}
for blob in self._blobs.values():
r[blob.name]=blob.blob_manifest_entry
return r
@property
def manifest(self):
return dict(metadata=self._cfs_metadata, blob_manifest=self.blob_manifest) | StarcoderdataPython |
1844827 | from genericpath import exists
import os
import configparser
from sqlite3 import Connection, OperationalError
import sqlite3
import sys
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(PROJECT_ROOT)
from pytest import raises
from lib.tools import check_ini_files_and_return_config_object, initialize_db
from lib.tools import create_main_variables_from_config, backup_in_memory_db_to_disk
from lib.tools import get_current_session, get_queries, brand_query
import logging
def init_ok():
# Firstly, check if the inifile exists and has all the required keys
config = check_ini_files_and_return_config_object('map_indicators.ini')[0]
assert isinstance(config, configparser.ConfigParser)
assert 'Main' in config
#Secondly, check if all the required files exists
maindir, separator, file_ext, iniFilesDir, prefix, context, backup_name, log_level = str(), str(), str(), str(), str(), str(), str(), str()
retailers, tables, toolkit_tables = list(), list(), list()
retailers_tables = dict()
variables_from_ini_in_list = create_main_variables_from_config([config])
assert variables_from_ini_in_list is not None
maindir, separator, retailers, tables, retailers_tables, toolkit_tables, file_ext, iniFilesDir, prefix, context, backup_name, log_level = variables_from_ini_in_list
assert maindir is not None
logger.info('content of variable maindir : {v}'.format(v=maindir))
assert len(maindir) > 0
logger.info('content of variable iniFilesDir : {v}'.format(v=iniFilesDir))
assert len(iniFilesDir) > 0
logger.info('content of variable separator : {v}'.format(v=separator))
assert len(separator) > 0
logger.info('content of variable retailers : {v}'.format(v=retailers))
assert len(retailers) > 0
logger.info('content of variable tables : {v} '.format(v=tables))
assert len(tables) > 0
logger.info('content of variable retailers_tables : {v} '.format(v=retailers_tables))
assert len(retailers_tables) > 0
logger.info('content of variable toolkit_tables : {v}'.format(v=toolkit_tables))
assert len(toolkit_tables) > 0
logger.info('content of variable file_ext : {v}'.format(v=file_ext))
assert len(file_ext) > 0
logger.info('content of variable prefix : {v}'.format(v=prefix))
assert len(prefix) > 0
logger.info('content of variable context : {v}'.format(v=context))
assert len(context) > 0
logger.info('content of variable backup_name : {v}'.format(v=backup_name))
assert len(backup_name) > 0
logger.info('content of variable log_level : {v}'.format(v=log_level))
assert len(log_level) > 0
#Thirdly, load all the files in the database
conn = initialize_db(':memory:', retailers, retailers_tables, toolkit_tables, file_ext, iniFilesDir)[0]
assert conn is not None
cur = conn.cursor()
cur.execute("select * from path")
assert len(cur.fetchall()) > 0
return [[conn], [config], variables_from_ini_in_list]
def backup(conninlist: list, variables_from_ini_in_list: list) -> str:
conn = conninlist[0]
maindir, separator, retailers, tables, retailers_tables, toolkit_tables, file_ext, iniFilesDir, prefix, context, backup_name, log_level = variables_from_ini_in_list
# Time to save a backup of the database
current_session, current_date = get_current_session(maindir, prefix, context, separator)
assert current_session is not None
assert current_date is not None
logger.info('content of variable current_session : {v}'.format(v=current_session))
backup_full_path_name = current_session + os.path.sep + backup_name
backup_path = current_session + os.path.sep
conn_backup = backup_in_memory_db_to_disk([conn], backup_full_path_name)[0]
cur_backup = conn_backup.cursor()
cur_backup.execute("select * from path")
assert len(cur_backup.fetchall()) > 0
conn_backup.close()
return backup_path, backup_full_path_name
def check_queries(conninlist: list, configinlist: list, backup_path: str) -> None:
#check if the queries are present in the inifile
all_queries_in_a_dict = dict()
all_queries_in_a_dict = get_queries(configinlist)
assert all_queries_in_a_dict is not None
variables_from_ini_in_list = create_main_variables_from_config(configinlist)
maindir, separator, retailers, tables, retailers_tables, toolkit_tables, file_ext, iniFilesDir, prefix, context, backup_name, log_level = variables_from_ini_in_list
branded_query = brand_query(all_queries_in_a_dict['connected_at_least_once'], tables, 'jules', separator)
cur = conninlist[0].cursor()
assert len(cur.execute(branded_query).fetchall()) > 0
branded_query = brand_query(all_queries_in_a_dict['connected_at_least_once_v2'], tables, 'jules', separator)
assert len(cur.execute(branded_query).fetchall()) > 0
branded_query = brand_query(all_queries_in_a_dict['request_history'], tables, 'jules', separator)
assert len(cur.execute(branded_query).fetchall()) > 0
branded_query = brand_query(all_queries_in_a_dict['request_history_v2'], tables, 'jules', separator)
assert len(cur.execute(branded_query).fetchall()) > 0
if __name__=="__main__":
logger = logging.getLogger('map_indicator_app')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('map_indicator.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Start of the test suite')
conninlist, configinlist, variables_from_ini_in_a_list = init_ok()
backup_path, backup_full_path_name = backup(conninlist, variables_from_ini_in_a_list)
check_queries(conninlist, configinlist, backup_path)
fh.close()
| StarcoderdataPython |
3467464 | from .containers.spectrum import Spectrum
from .containers.collection import Collection, df_to_collection, proximal_join
from .readers import read
# __all__ = ['spectrum', 'collection', 'reader']
| StarcoderdataPython |
3564656 | <filename>Website-Status.py
#!/usr/bin/python
# -*- coding: UTF-8 -*
import sys
import requests
import urllib
from colorama import Fore, Back, Style
if len(sys.argv)==1:
print "Invalid domain"
else:
try:
urllib.urlopen('http://' + sys.argv[1])
print( u"\u2713" +" Up")
except:
print( u"\u2718" +" Down")
| StarcoderdataPython |
5065558 | import http.client
import http.cookiejar
import json
import random
import re
import ssl
import time
import unittest
from xml.dom import minidom
import logging
logger = logging.getLogger(__name__)
__unittest = True
PROXYHOST = ""
PROXYPORT = ""
PROXYHTTPSPORT = ""
'''
Based on https://stackoverflow.com/questions/61280350/how-to-set-the-sni-via-http-client-httpsconnection
'''
class WrapSSSLContext(ssl.SSLContext):
"""
HTTPSConnection provides no way to specify the
server_hostname in the underlying socket. We
accomplish this by wrapping the context to
overrride the wrap_socket behavior (called later
by HTTPSConnection) to specify the
server_hostname that we want.
"""
def __new__(cls, server_hostname, *args, **kwargs):
return super().__new__(cls, *args, *kwargs)
def __init__(self, server_hostname, *args, **kwargs):
super().__init__(*args, **kwargs)
self._server_hostname = server_hostname
def wrap_socket(self, sock, *args, **kwargs):
kwargs['server_hostname'] = self._server_hostname
return super().wrap_socket(sock, *args, **kwargs)
class CacheUnitTest(unittest.TestCase):
"""
Basis class common to all CMS and tests
"""
def setUp(self, proxy, port, website, https=False):
self.headers = None
self.proxies = None
if proxy:
self.proxies = {"http": proxy, "https": proxy}
self.proxy = proxy
if port:
self.port = port
else:
if not https:
self.port = 80
else:
self.port = 443
if website:
self.website = website
self.headers = {'Host': self.website}
self.https = https
self.response_content = ""
self.response_content_bytes = 2048
# Add specific header
def set_header(self, header_value):
self.headers.update(header_value)
def purge_request(self, url):
return self.request("PURGE", url)
def get_request(self, url):
return self.request("GET", url)
def request(self, method, url):
# Recoding get request to allow proxy
if not self.https:
logging.debug("Establishing HTTP Connection with {} on port {}".format(self.proxy, self.port))
conn = http.client.HTTPConnection(self.proxy, self.port)
else:
proxy_to_server_context = WrapSSSLContext(self.proxy)
logging.debug("Establishing HTTPS Connection with {} on port {}".format(self.proxy, self.port))
conn = http.client.HTTPSConnection(self.proxy, self.port, context=proxy_to_server_context)
logging.debug("Pushing request on url {} with method {}".format(url, method))
conn.putrequest(method, url, skip_host=True)
for header in self.headers.keys():
logging.debug("Specifying header {} with value {}".format(str(header), self.headers.get(header)))
conn.putheader(str(header), str(self.headers.get(header)))
conn.endheaders()
response = conn.getresponse()
self.response_header = response.headers
self.response_content = str(response.read(self.response_content_bytes))
conn.close()
return response
def build_url(self, path):
"""
Construct an absolute url by appending a path to a domain.
"""
return 'http://%s%s' % (self.website, path)
def get_once(self, url, needpurge=False, **kwargs):
if needpurge:
self.purge_request(url)
response = self.get_request(url)
return response
def get_twice(self, url, **kwargs):
"""
Fetch a url twice and return the second response (for testing cache hits).
"""
self.get_request(url)
# time.sleep(2)
response = self.get_request(url)
return response
def get_twice_tokenized(self, url, tokenname=None, **kwargs):
"""
Fetch a url twice with two different tokens and return the 2nd response
"""
if tokenname is not None:
token = tokenname + "=" + str(random.randint(10000, 999999))
else:
token = str(random.randint(10000, 999999))
# print("url1: " + url + "?" + token)
self.get_request(url + "?" + token)
if tokenname is not None:
token = tokenname + "=" + str(random.randint(10000, 999999))
else:
token = str(random.randint(10000, 999999))
# print("url2: " + url + "?" + token)
response = self.get_request(url + "?" + token)
return response
def purgeandget_twice(self, url, **kwargs):
"""
Fetch a url twice and return the second response (for testing cache hits).
"""
self.purge_request(url)
time.sleep(1)
self.get_request(url)
time.sleep(2)
response = self.get_request(url)
return response
"""
Assertions
"""
def assertHit(self, response):
"""
Assert that a given response contains the header indicating a cache hit.
"""
self.assertEqual(str(response.headers['X-Cache']).lower(), 'HIT'.lower(),
msg='Uncached while cache was expected')
def assertMiss(self, response):
"""
Assert that a given response contains the header indicating a cache miss.
"""
self.assertEqual(str(response.headers['X-Cache']).lower(), 'miss'.lower())
def assertPass(self, response):
"""
Assert that a given response contains the header indicating a pass.
"""
self.assertEqual(str(response.headers['X-Cache']).lower(), 'pass'.lower())
def assertSynth(self, response):
"""
Assert that a given response contains the header indicating a pass.
"""
self.assertEqual(str(response.headers['X-Cache']).lower(), 'synth'.lower())
def assertMaxAge(self, response, value):
"""
Assert that a given response contains the header indicating specific "max-age" value.
"""
max_age_regex = re.compile('max-age\s*=\s*(\d+)')
try:
cache_control = response.headers['cache-control']
except KeyError:
try:
cache_control = response.headers['Cache-Control']
except:
raise AssertionError('No cache-control header.')
max_age = max_age_regex.match(cache_control)
if not max_age:
raise AssertionError('No max-age specified in cache-control header.')
self.assertEqual(int(max_age.group(1)), value)
def assert200(self, response):
# Ok
self.assertEqual(response.status, 200)
def assert30X(self, response):
self.assertRegex(str(response.status), '30?')
def assert301(self, response):
# Permanent redirect
self.assertEqual(response.status, 301)
def assert302(self, response):
# Temporary redirect
self.assertEqual(response.status, 302)
def assert304(self, response):
# Not modified
self.assertEqual(response.status, 304)
def assert40X(self, response):
self.assertRegex(str(response.status), '40?')
def assert400(self, response):
# Bad Request
self.assertEqual(response.status, 400)
def assert401(self, response):
# Unauthorized
self.assertEqual(response.status, 401)
def assert403(self, response):
# Forbidden
self.assertEqual(response.status, 403)
def assert404(self, response):
# Not found
self.assertEqual(response.status, 404)
def assert405(self, response):
# Method Not allowed
self.assertEqual(response.status, 405)
def assert50X(self, response):
# Method Not allowed
self.assertRegex(str(response.status), '50?')
def assertBackend(self, response, backend):
self.assertEqual(str(response.headers['X-Back']).lower(), backend.lower())
def assertRedirectURL(self, response, url):
self.assertEqual(str(response.headers['location']).lower(), url.lower())
def assertValidJSON(self, response):
try:
logging.debug("Parsing response {} first {} bytes, expecting valid JSON".format(self.response_content,
self.response_content_bytes))
json_object = json.loads(self.response_content)
return True
except ValueError as error:
return False
def assertValidXML(self, response):
try:
logging.debug("Parsing response {} first {} bytes, expecting valid JSON".format(self.response_content,
self.response_content_bytes))
minidom.parseString(self.response_content)
return True
except ValueError as error:
return False
| StarcoderdataPython |
8165534 | import tkinter as tk
root = tk.Tk()
root.title("Hallo Welt!")
root.mainloop() | StarcoderdataPython |
11231352 | # -*- coding:utf-8 -*-
from torcms.core import tools
from torcms.model.entity_model import MEntity
class TestMEntity():
def setup(self):
print('setup 方法执行于本类中每条用例之前')
self.uid = tools.get_uu4d()
self.path = 'path'
def test_create_entity(self):
uid = self.uid
post_data = {
'path': self.path,
}
MEntity.create_entity(uid, post_data['path'])
assert True
self.tearDown()
def test_create_entity_2(self):
'''Wiki insert: Test invalid title'''
post_data = {
'path': '',
}
uu = MEntity.get_id_by_impath(post_data['path'])
assert uu is None
post_data = {
'path': self.path,
}
uu = MEntity.get_id_by_impath(post_data['path'])
assert uu is None
self.tearDown()
def test_get_by_uid(self):
MEntity.get_by_uid(self.uid)
assert True
def test_query_all(self):
MEntity.query_all()
assert True
def test_get_by_kind(self):
MEntity.get_by_kind('2')
assert True
def test_get_all_pager(self):
MEntity.get_all_pager()
assert True
def test_get_id_by_impath(self):
MEntity.get_id_by_impath(self.path)
assert True
def test_total_number(self):
MEntity.total_number()
assert True
def test_delete(self):
MEntity.delete(self.uid)
assert True
def test_delete_by_path(self):
MEntity.delete_by_path(self.path)
assert True
def tearDown(self):
print("function teardown")
tt = MEntity.get_id_by_impath(self.path)
if tt:
MEntity.delete(tt.uid)
| StarcoderdataPython |
3240734 | <filename>tests/test_beam.py
from __future__ import absolute_import, division, print_function
import os
import dxtbx
from dxtbx.model.beam import BeamFactory
def test_beam():
dxtbx_dir = dxtbx.__path__[0]
image = os.path.join(dxtbx_dir, "tests", "phi_scan_001.cbf")
assert BeamFactory.imgCIF(image)
| StarcoderdataPython |
9636389 | """
This is the init file for WindSE. It handle importing all the
submodules and initializing the parameters.
"""
import os
import __main__
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
from windse.ParameterManager import windse_parameters
def initialize(loc,updated_parameters=[]):
"""
This function initializes all the submodules in WindSE.
Args:
loc (str): This string is the location of the .yaml parameters file.
"""
windse_parameters.Load(loc,updated_parameters=updated_parameters)
global BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce, Optimizer#, ReducedFunctional
if windse_parameters["general"].get("dolfin_adjoint", False) or main_file in ["sphinx-build", "__main__.py"]:
from windse.dolfin_adjoint_helper import BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce#, ReducedFunctional
from windse.OptimizationManager import Optimizer
else:
from windse.helper_functions import BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce
global BoxDomain, CylinderDomain, CircleDomain, RectangleDomain, ImportedDomain, InterpolatedCylinderDomain, InterpolatedBoxDomain, PeriodicDomain
from windse.DomainManager import BoxDomain, CylinderDomain, CircleDomain, RectangleDomain, ImportedDomain, InterpolatedCylinderDomain, InterpolatedBoxDomain, PeriodicDomain
global GridWindFarm, RandomWindFarm, ImportedWindFarm, EmptyWindFarm
from windse.WindFarmManager import GridWindFarm, RandomWindFarm, ImportedWindFarm, EmptyWindFarm
global RefineMesh, WarpMesh
from windse.RefinementManager import RefineMesh, WarpMesh
global LinearFunctionSpace, TaylorHoodFunctionSpace
from windse.FunctionSpaceManager import LinearFunctionSpace, TaylorHoodFunctionSpace
global PowerInflow, UniformInflow, LogLayerInflow, TurbSimInflow
from windse.BoundaryManager import PowerInflow, UniformInflow, LogLayerInflow, TurbSimInflow
global StabilizedProblem, TaylorHoodProblem, IterativeSteady, UnsteadyProblem
from windse.ProblemManager import StabilizedProblem, TaylorHoodProblem, IterativeSteady, UnsteadyProblem
global SteadySolver, IterativeSteadySolver, UnsteadySolver, MultiAngleSolver, TimeSeriesSolver
from windse.SolverManager import SteadySolver, IterativeSteadySolver, UnsteadySolver, MultiAngleSolver, TimeSeriesSolver
| StarcoderdataPython |
44146 | <gh_stars>10-100
import collections
class Solution:
def largestMultipleOfThree(self, digits: List[int]) -> str:
count = collections.Counter(digits)
remain1Count = count[1] + count[4] + count[7]
remain2Count = count[2] + count[5] + count[8]
total = sum(digits)
if total % 3 == 1:
if remain1Count > 0:
remain1Count -= 1
else:
remain2Count -= 2
elif total % 3 == 2:
if remain2Count > 0:
remain2Count -= 1
else:
remain1Count -= 2
result = []
for d in range(9, -1, -1):
num = count[d]
if d % 3 == 1:
num = min(num, remain1Count)
remain1Count -= num
elif d % 3 == 2:
num = min(num, remain2Count)
remain2Count -= num
result.append(str(d) * num)
result = ''.join(result)
return '0' if result and result[0] == '0' else result
| StarcoderdataPython |
364579 | <gh_stars>0
# All members to be imported
from api_view import APIView, route, api_action
from error import APIError
from schema_mixin import SchemaMixin
# Miscellaneous
SUCCESS_RESP = {"status": "success"}
| StarcoderdataPython |
5123734 | from time import time
from rich import print
t = time()
def len(l, bnH):
x = l**2 + (bnH) ** 2
return x == (int(x ** (1 / 2))) ** 2
ans = 1975
M = 100
while ans < 1_000_000:
for bnH in range(3, 2 * M):
if len(M, bnH):
if bnH > M:
ans += M - bnH // 2 + 1 if bnH % 2 == 0 else M - bnH // 2
else:
ans += bnH // 2
M += 1
print(f"M = {M-1},\nTotal solution cuboids = {ans},\nTime Taken: {time() - t} seconds")
| StarcoderdataPython |
98065 | class Solution:
def kLengthApart(self, nums: List[int], k: int) -> bool:
| StarcoderdataPython |
95529 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
import six
class BaseTagFactory:
__UTF8_CHARACTER_ENCODING = 'UTF-8'
# String field values are limited to 2000 bytes size when encoded in UTF-8.
__STRING_VALUE_UTF8_MAX_LENGTH = 2000
__SUFFIX_CHARS_LENGTH = 3
@classmethod
def _set_bool_field(cls, tag, field_id, value):
if value is not None:
bool_field = datacatalog.TagField()
bool_field.bool_value = value
tag.fields[field_id] = bool_field
@classmethod
def _set_double_field(cls, tag, field_id, value):
if value is not None:
double_field = datacatalog.TagField()
double_field.double_value = value
tag.fields[field_id] = double_field
@classmethod
def _set_string_field(cls, tag, field_id, value):
"""
String field values are limited by Data Catalog API at 2000 chars
length when encoded in UTF-8. UTF-8 chars may need from 1 to 4 bytes
(https://en.wikipedia.org/wiki/UTF-8 for details):
- the first 128 characters (US-ASCII) need one byte;
- the next 1,920 characters need two bytes to encode, which covers the
remainder of almost all Latin-script alphabets, and also Greek,
Cyrillic, Coptic, Armenian, Hebrew, Arabic, Syriac, Thaana and N'Ko
alphabets, as well as Combining Diacritical Marks;
- three bytes are needed for characters in the rest of the Basic
Multilingual Plane, which contains virtually all characters in common
use, including most Chinese, Japanese and Korean characters;
- four bytes are needed for characters in the other planes of Unicode,
which include less common CJK characters, various historic scripts,
mathematical symbols, and emoji (pictographic symbols).
Given a value and a string Tag Field, this method assigns the field the
value. Before assigning it checks the value's UTF-8 byte-size and
truncates if needed. When it happens, 3 periods are appended to the
result string so users will know it's different from the original
value.
"""
if not (value and isinstance(value, six.string_types)):
return
encoding = cls.__UTF8_CHARACTER_ENCODING
max_length = cls.__STRING_VALUE_UTF8_MAX_LENGTH
suffix_length = cls.__SUFFIX_CHARS_LENGTH
encoded = value.encode(encoding)
# the max length supported is stored at max_length
# we leave some chars as the suffix_length to be used when
# creating the new string, so this line truncates the existing string.
truncated_string_field = encoded[:max_length - suffix_length]
decoded = u'{}...'.format(
truncated_string_field.decode(
encoding,
'ignore')) if len(encoded) > max_length else encoded.decode(
encoding, 'ignore')
string_field = datacatalog.TagField()
string_field.string_value = decoded
tag.fields[field_id] = string_field
@classmethod
def _set_timestamp_field(cls, tag, field_id, value):
if value:
timestamp = timestamp_pb2.Timestamp()
timestamp.FromDatetime(value)
timestamp_field = datacatalog.TagField()
timestamp_field.timestamp_value = timestamp
tag.fields[field_id] = timestamp_field
| StarcoderdataPython |
83230 | import uuid
import datetime
from typing import List, Union, Dict
from plugins.adversary.app.engine.database import EncryptedDictField
from plugins.adversary.app.engine.objects import Log
from plugins.adversary.app.util import tz_utcnow
version = 1.1
class Operation(dict):
def __init__(self):
super().__init__()
self['id'] = str(uuid.uuid4())
self['steps'] = []
self['nodetype'] = 'operation'
class AttackReference(dict):
def __init__(self, technique_id, technique_name, tactics):
super().__init__()
self['technique_id'] = technique_id
self['technique_name'] = technique_name
self["tactic"] = tactics
class Step(dict):
def __init__(self, attack_info: List[AttackReference], dest_hosts: List[str] = None, description: str = None):
super().__init__()
self['id'] = str(uuid.uuid4())
self['nodetype'] = 'step'
self['attack_info'] = attack_info
self['events'] = []
self['key_technique'] = attack_info[0]['technique_id'] if len(attack_info) else None
self['key_event'] = None
self['host'] = None
self['time'] = None
if dest_hosts is not None:
self['dest_hosts'] = dest_hosts
if description is not None:
self['description'] = description
class Event(dict):
def __init__(self, obj, action, host, start_time=None, fields=None):
if start_time is None:
start_time = tz_utcnow().isoformat()
if fields is None:
fields = {}
super().__init__()
self['id'] = str(uuid.uuid4())
self['nodetype'] = 'event'
self['host'] = host
self['object'] = obj
self['action'] = action
self['happened_after'] = start_time
self.update(**fields)
def end(self, successful):
self['happened_before'] = tz_utcnow().isoformat()
# self['successful'] = successful
if not successful:
return None
return self
class ProcessEvent(Event):
def __init__(self, host, ppid, pid, command_line, action='create'):
args = {'fqdn': host,
'ppid': ppid,
'pid': pid,
'command_line': command_line}
super().__init__("process", action, host, fields=args)
class FileEvent(Event):
def __init__(self, fqdn, file_path, action='create'):
args = {'fqdn': fqdn,
'file_path': file_path}
super().__init__('file', action, fqdn, fields=args)
class CredentialDump(Event):
def __init__(self, fqdn, pid, typ, usernames):
args = {'fqdn': fqdn,
'pid': pid,
'type': typ,
'usernames': usernames}
super().__init__('cred', 'dump', fqdn, fields=args)
class RegistryEvent(Event):
def __init__(self, fqdn, key, data, value, action="add"):
args = {'fqdn': fqdn,
'key': key,
'value': value,
'data': data}
super().__init__('registry', action, fqdn, fields=args)
class ProcessOpen(Event):
def __init__(self, fqdn, file_path, actor_pid):
args = {'fqdn': fqdn,
'file_path': file_path,
'actor_pid': actor_pid}
super().__init__('process', 'open', fqdn, fields=args)
class BSFEmitter(object):
def __init__(self, log: Log):
"""
An object that handles emitting BSF events
Args:
log: the log to emit log entries to
"""
self.log = log
self.is_done = False
self.encrypt = EncryptedDictField.encrypt_dict
def append_to_log_stream(self, bsf_node):
enc = self.encrypt(bsf_node)
self.log.modify(push__event_stream=enc)
def start_operation(self):
self.log.modify(active_operation=Operation())
def _pick_step_key_event(self) -> Union[Dict, None]:
"""
Select a key event from the active step's events and return that event's id.
:return: The database ID of the key event
"""
if not len(self.log.active_step['events']):
return None
events = list(filter(lambda e: e['id'] in self.log.active_step['events'], self.log.event_stream))
new_files = list(filter(lambda e: e['object'] == 'file' and e['action'] == 'create', events))
new_processes = list(filter(lambda e: e['object'] == 'process' and e['action'] == 'create', events))
if new_processes:
# Prefer the first process:create
return new_processes[0]
elif new_files:
# If there are no process:create events, then prefer the first file:create
return new_files[0]
elif events:
# just get the first event if there is one
return events[0]
@staticmethod
def _avg_time(happened_before: str, happened_after: str):
before = datetime.datetime.fromisoformat(happened_before)
after = datetime.datetime.fromisoformat(happened_after)
return (before + (after - before) / 2).isoformat()
def _push_active_step(self):
key_event = self._pick_step_key_event()
if key_event:
avg_key_time = self._avg_time(key_event['happened_before'], key_event['happened_after'])
self.log.modify(active_step__key_event=key_event['id'],
active_step__host=key_event['host'],
active_step__time=avg_key_time)
self.append_to_log_stream(self.log.active_step)
def add_step(self, step: Step):
if self.log.active_step and len(self.log.active_step['events']) > 0:
self._push_active_step()
self.log.modify(push__active_operation__steps=step['id'])
self.log.modify(active_step=step)
def add_event(self, event):
if not isinstance(event, CredentialDump):
self.log.modify(push__active_step__events=event['id'])
self.append_to_log_stream(event)
def done(self):
if self.is_done:
# This BSF Log has already been marked done.
return
if self.log.active_step:
self._push_active_step()
if self.log.active_operation:
self.append_to_log_stream(self.log.active_operation)
self.is_done = True
| StarcoderdataPython |
3425106 | import csv
import random
days = 31
orders = 100
def randomTime():
hrs = str(random.randint(0, 23))
mins = str(random.randint(0, 59))
if(int(mins) < 10):
mins = "0" + mins
if(int(hrs) < 10):
hrs = "0" + hrs
return hrs + ":" + mins
with open('database.csv', 'w', newline='') as csvfile:
fieldnames = ['id', 'date', 'time', 'milk', 'sugar', 'coffee', 'water']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for j in range(days):
for i in range(orders):
if(j + 1 > 9):
writer.writerow({'id': random.randint(1, 10), 'date': '2020-01-' + str(j+1), 'time': randomTime(), 'milk': random.randint(0, 1), 'sugar': random.randint(0, 4), 'coffee': random.randint(1, 4), 'water': random.randint(1, 4)})
else:
writer.writerow({'id': random.randint(1, 10), 'date': '2020-01-0' + str(j+1), 'time': randomTime(), 'milk': random.randint(0, 1), 'sugar': random.randint(0, 4), 'coffee': random.randint(1, 4), 'water': random.randint(1, 4)})
| StarcoderdataPython |
398878 | <gh_stars>100-1000
# Copyright (C) 2019-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import logging as log
# Disable B410: import_lxml - the library is used for writing
from lxml import etree as ET # nosec, lxml has proper XPath implementation
from datumaro.components.annotation import (
Annotation, AnnotationType, Bbox, Caption, Label, Mask, Points, Polygon,
PolyLine,
)
from datumaro.components.extractor import ItemTransform
class DatasetItemEncoder:
@classmethod
def encode(cls, item, categories=None):
item_elem = ET.Element('item')
ET.SubElement(item_elem, 'id').text = str(item.id)
ET.SubElement(item_elem, 'subset').text = str(item.subset)
image = item.image
if image is not None:
item_elem.append(cls.encode_image(image))
for ann in item.annotations:
item_elem.append(cls.encode_annotation(ann, categories))
return item_elem
@classmethod
def encode_image(cls, image):
image_elem = ET.Element('image')
size = image.size
if size is not None:
h, w = size
else:
h = 'unknown'
w = h
ET.SubElement(image_elem, 'width').text = str(w)
ET.SubElement(image_elem, 'height').text = str(h)
ET.SubElement(image_elem, 'has_data').text = '%d' % int(image.has_data)
ET.SubElement(image_elem, 'path').text = image.path
return image_elem
@classmethod
def encode_annotation_base(cls, annotation):
assert isinstance(annotation, Annotation)
ann_elem = ET.Element('annotation')
ET.SubElement(ann_elem, 'id').text = str(annotation.id)
ET.SubElement(ann_elem, 'type').text = str(annotation.type.name)
for k, v in annotation.attributes.items():
ET.SubElement(ann_elem, k.replace(' ', '-')).text = str(v)
ET.SubElement(ann_elem, 'group').text = str(annotation.group)
return ann_elem
@staticmethod
def _get_label(label_id, categories):
label = ''
if label_id is None:
return ''
if categories is not None:
label_cat = categories.get(AnnotationType.label)
if label_cat is not None:
label = label_cat.items[label_id].name
return label
@classmethod
def encode_label_object(cls, obj, categories):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'label').text = \
str(cls._get_label(obj.label, categories))
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
return ann_elem
@classmethod
def encode_mask_object(cls, obj, categories):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'label').text = \
str(cls._get_label(obj.label, categories))
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
return ann_elem
@classmethod
def encode_bbox_object(cls, obj, categories):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'label').text = \
str(cls._get_label(obj.label, categories))
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
ET.SubElement(ann_elem, 'x').text = str(obj.x)
ET.SubElement(ann_elem, 'y').text = str(obj.y)
ET.SubElement(ann_elem, 'w').text = str(obj.w)
ET.SubElement(ann_elem, 'h').text = str(obj.h)
ET.SubElement(ann_elem, 'area').text = str(obj.get_area())
return ann_elem
@classmethod
def encode_points_object(cls, obj, categories):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'label').text = \
str(cls._get_label(obj.label, categories))
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
x, y, w, h = obj.get_bbox()
area = w * h
bbox_elem = ET.SubElement(ann_elem, 'bbox')
ET.SubElement(bbox_elem, 'x').text = str(x)
ET.SubElement(bbox_elem, 'y').text = str(y)
ET.SubElement(bbox_elem, 'w').text = str(w)
ET.SubElement(bbox_elem, 'h').text = str(h)
ET.SubElement(bbox_elem, 'area').text = str(area)
points = obj.points
for i in range(0, len(points), 2):
point_elem = ET.SubElement(ann_elem, 'point')
ET.SubElement(point_elem, 'x').text = str(points[i])
ET.SubElement(point_elem, 'y').text = str(points[i + 1])
ET.SubElement(point_elem, 'visible').text = \
str(obj.visibility[i // 2].name)
return ann_elem
@classmethod
def encode_polygon_object(cls, obj, categories):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'label').text = \
str(cls._get_label(obj.label, categories))
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
x, y, w, h = obj.get_bbox()
area = w * h
bbox_elem = ET.SubElement(ann_elem, 'bbox')
ET.SubElement(bbox_elem, 'x').text = str(x)
ET.SubElement(bbox_elem, 'y').text = str(y)
ET.SubElement(bbox_elem, 'w').text = str(w)
ET.SubElement(bbox_elem, 'h').text = str(h)
ET.SubElement(bbox_elem, 'area').text = str(area)
points = obj.points
for i in range(0, len(points), 2):
point_elem = ET.SubElement(ann_elem, 'point')
ET.SubElement(point_elem, 'x').text = str(points[i])
ET.SubElement(point_elem, 'y').text = str(points[i + 1])
return ann_elem
@classmethod
def encode_polyline_object(cls, obj, categories):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'label').text = \
str(cls._get_label(obj.label, categories))
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
x, y, w, h = obj.get_bbox()
area = w * h
bbox_elem = ET.SubElement(ann_elem, 'bbox')
ET.SubElement(bbox_elem, 'x').text = str(x)
ET.SubElement(bbox_elem, 'y').text = str(y)
ET.SubElement(bbox_elem, 'w').text = str(w)
ET.SubElement(bbox_elem, 'h').text = str(h)
ET.SubElement(bbox_elem, 'area').text = str(area)
points = obj.points
for i in range(0, len(points), 2):
point_elem = ET.SubElement(ann_elem, 'point')
ET.SubElement(point_elem, 'x').text = str(points[i])
ET.SubElement(point_elem, 'y').text = str(points[i + 1])
return ann_elem
@classmethod
def encode_caption_object(cls, obj):
ann_elem = cls.encode_annotation_base(obj)
ET.SubElement(ann_elem, 'caption').text = str(obj.caption)
return ann_elem
@classmethod
def encode_annotation(cls, o, categories=None):
if isinstance(o, Label):
return cls.encode_label_object(o, categories)
if isinstance(o, Mask):
return cls.encode_mask_object(o, categories)
if isinstance(o, Bbox):
return cls.encode_bbox_object(o, categories)
if isinstance(o, Points):
return cls.encode_points_object(o, categories)
if isinstance(o, PolyLine):
return cls.encode_polyline_object(o, categories)
if isinstance(o, Polygon):
return cls.encode_polygon_object(o, categories)
if isinstance(o, Caption):
return cls.encode_caption_object(o)
raise NotImplementedError("Unexpected annotation object passed: %s" % o)
@staticmethod
def to_string(encoded_item):
return ET.tostring(encoded_item, encoding='unicode', pretty_print=True)
class XPathDatasetFilter(ItemTransform):
def __init__(self, extractor, xpath=None):
super().__init__(extractor)
if xpath is not None:
try:
xpath = ET.XPath(xpath)
except Exception:
log.error("Failed to create XPath from expression '%s'", xpath)
raise
self._f = lambda item: bool(xpath(
DatasetItemEncoder.encode(item, extractor.categories())))
else:
self._f = None
def transform_item(self, item):
if self._f and not self._f(item):
return None
return item
class XPathAnnotationsFilter(ItemTransform):
def __init__(self, extractor, xpath=None, remove_empty=False):
super().__init__(extractor)
if xpath is not None:
try:
xpath = ET.XPath(xpath)
except Exception:
log.error("Failed to create XPath from expression '%s'", xpath)
raise
self._filter = xpath
self._remove_empty = remove_empty
def transform_item(self, item):
if self._filter is None:
return item
encoded = DatasetItemEncoder.encode(item, self._extractor.categories())
filtered = self._filter(encoded)
filtered = [elem for elem in filtered if elem.tag == 'annotation']
encoded = encoded.findall('annotation')
annotations = [item.annotations[encoded.index(e)] for e in filtered]
if self._remove_empty and len(annotations) == 0:
return None
return self.wrap_item(item, annotations=annotations)
| StarcoderdataPython |
1693058 | <gh_stars>1-10
import time
from Config import *
from Logger import logger
from AllFundCrawler import *
from FundTradeCrawler import *
from FundStockShare import *
from FundDividendCrawler import *
from FundReviewCrawler import *
from FundIndustryCrawler import *
from FundManagerHistoryCrawler import *
from FundBasicCrawler import *
from FundFinanceCrawler import *
import random
class CrawlerFacade:
allFundCrawler = AllFundCrawler()
fundTradeCrawler = FundTradeCrawler()
fundStockShare = FundStockShare()
fundDividendCrawler = FundDividendCrawler()
fundReviewCrawler = FundReviewCrawler()
fundIndustryCrawler = FundIndustryCrawler()
fundManagerHistoryCrawler = FundManagerHistoryCrawler()
fundBasicCrawler = FundBasicCrawler()
fundFinanceCrawler = FundFinanceCrawler()
def updateAllFundList(self):
self.allFundCrawler.crawlFundList()
def updateFrame(self, callback, name, index=0):
fund_codes = self.allFundCrawler.getAllFundsCode()
i = index
while i < len(fund_codes):
try:
callback(fund_code=fund_codes[i])
print('\r', f'更新基金的{name}数据:{i + 1} / {len(fund_codes)}', end='', flush=True)
i = i + 1
logger.info(f'更新基金{fund_codes[i - 1]}的{name}数据完成!')
except Exception as e:
time.sleep(random.randint(5, 10))
errs = f"errors = {e}, retry funcode = {fund_codes[i]}"
if "_id_ dup key" in errs:
i = i + 1
logger.error(f"errors = {e}, retry funcode = {fund_codes[i]}")
print()
# 更新所有基金的历史交易数据
def updateFundsTradeHistory(self):
fund_codes = self.allFundCrawler.getAllFundsCode()
i = 0
while i < len(fund_codes):
try:
self.fundTradeCrawler.crawlHistoryList(fund_code=fund_codes[i])
print('\r', f'更新基金的历史交易数据:{i + 1} / {len(fund_codes)}', end='', flush=True)
i = i + 1
logger.info(f'更新基金{fund_codes[i - 1]}的历史交易数据完成!')
except Exception as e:
time.sleep(random.randint(5, 10))
errs = f"errors = {e}, retry funcode = {fund_codes[i]}"
if "_id_ dup key" in errs:
i = i + 1
logger.error(f"errors = {e}, retry funcode = {fund_codes[i]}")
print()
# 更新所有基金的历史持仓数据
def updateFundStockShareHistory(self):
self.updateFrame(self.fundStockShare.crawlFundStockShareList, "历史持仓", 4497)
# 更新所有基金的分红数据
def updateFundDividendHistory(self):
fund_codes = self.allFundCrawler.getAllFundsCode()
i = 0
print(len(fund_codes))
while i < len(fund_codes):
fund_code = fund_codes[i]
try:
self.fundDividendCrawler.crawlDividendHistoryList(fund_code)
print('\r', f'更新所有基金{fund_code}的分红数据,当前进度{i + 1} / {len(fund_codes)}', end='', flush=True)
# print(f'更新所有基金的分红数据{i + 1}')
if i % 110:
time.sleep(random.randint(5, 10))
else:
time.sleep(random.random())
i = i + 1
except Exception as e:
time.sleep(random.randint(5, 10))
logger.error(f"errors = {e}, retry funcode = {fund_code}")
print()
# 更新所有基金的评级数据
def updateFundsReviewHistory(self):
fund_codes = self.allFundCrawler.getAllFundsCode()
i = 0
while i < len(fund_codes):
try:
self.fundReviewCrawler.crawlReviewHistoryList(fund_code=fund_codes[i])
print('\r', f'更新基金的历史交易数据:{i + 1} / {len(fund_codes)}', end='', flush=True)
i = i + 1
logger.info(f'更新基金{fund_codes[i - 1]}的历史交易数据完成!')
except Exception as e:
time.sleep(random.randint(5, 10))
errs = f"errors = {e}, retry funcode = {fund_codes[i]}"
if "_id_ dup key" in errs:
i = i + 1
logger.error(f"errors = {e}, retry funcode = {fund_codes[i]}")
print()
def updateFundIndustryHistory(self):
self.updateFrame(self.fundIndustryCrawler.crawlFundIndustryList, self.fundIndustryCrawler.name)
def updateFundManagerHistory(self):
self.updateFrame(self.fundManagerHistoryCrawler.crawlFundManagerInfoList, self.fundManagerHistoryCrawler.name)
def updateFundBasic(self):
self.updateFrame(self.fundBasicCrawler.crawlFundBasicInfoList, self.fundBasicCrawler.name)
def updateFundFinance(self):
self.updateFrame(self.fundFinanceCrawler.crawlFundFinanceList, self.fundFinanceCrawler.name)
if __name__ == '__main__':
crawler = CrawlerFacade()
#crawler.updateAllFundList()
crawler.updateFundsTradeHistory()
#crawler.updateFundStockShareHistory()
#crawler.updateFundIndustryHistory()
#crawler.updateFundDividendHistory()
#crawler.updateFundsReviewHistory()
#crawler.updateFundManagerHistory()
#crawler.updateFundBasic()
#crawler.updateFundFinance()
# driver.close()
# driver.quit()
| StarcoderdataPython |
1785092 | import asyncio
from datetime import datetime
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
from asyncdb import AsyncDB, AsyncPool
from asyncdb.providers.pg import pg, pgPool
params = {
"user": "troc_pgdata",
"password": "<PASSWORD>",
"host": "127.0.0.1",
"port": "5432",
"database": "navigator_dev",
"DEBUG": True,
}
# pool = AsyncPool('pg', loop=loop, params=params)
pool = pgPool(loop=loop, params=params)
loop.run_until_complete(pool.connect())
print('Pool Connected: ', pool.is_connected())
db = loop.run_until_complete(pool.acquire())
print('Is Connected: ', db.is_connected())
sql = "SELECT * FROM troc.query_util WHERE query_slug = '{}'".format("walmart_stores")
async def connect(c):
async with await c.connection() as conn:
print('Connection: ', conn)
result, error = await conn.test_connection()
print(result, error)
start = datetime.now()
result, error = await conn.query(sql)
exec_time = (datetime.now() - start).total_seconds()
if not error:
print(result)
print(f"Execution Time {exec_time:.3f}s\n")
# execute a sentence
result, error = await conn.execute("SET TIMEZONE TO 'America/New_York'")
print(result)
async def pooler(p):
async with await p.acquire() as conn:
print('Connection: ', conn)
result, error = await conn.test_connection()
# a huge dataset:
result, error = await conn.query('SELECT * FROM trocplaces.stores')
if not error:
for row in result:
print(row)
start = datetime.now()
result, error = await conn.query('SELECT * FROM troc.dashboards')
exec_time = (datetime.now() - start).total_seconds()
if not error:
for row in result:
print(row)
print(f"Execution Time {exec_time:.3f}s\n")
if __name__ == "__main__":
loop.run_until_complete(connect(db))
print('Working on huge datasets')
loop.run_until_complete(pooler(pool))
loop.run_until_complete(pool.wait_close(gracefully=True, timeout=5))
loop.stop()
| StarcoderdataPython |
108913 | #! /usr/bin/env python3
import os
from datetime import timedelta
import flask
from module.Interface import *
app = flask.Flask(__name__, template_folder="./static/html")
app.config['SECRET_KEY'] = os.urandom(24)
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=30)
@app.route('/', methods=["GET", "POST"])
def index() :
return flask.redirect('/static/html/index.html')
@app.route('/test', methods=["GET", "POST"])
def test() :
return flask.redirect('/static/test/index.html')
# 博客数据接口
blogInterfaceList = [
['/blog/is_login', BlogInterface.isLogin],
['/blog/login', BlogInterface.login],
['/blog/username', BlogInterface.getUsername],
['/blog/avatar', BlogInterface.getAvatar],
['/blog/running_days', BlogInterface.getRunDays],
['/blog/visiting_count', BlogInterface.getVisitingCount],
['/blog/visiting_modify', BlogInterface.addVisitingCount],
]
for route in blogInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
# 文章数据接口
articleInterfaceList = [
['/article/count', ArticleInterface.count],
['/article/get_id_by_order', ArticleInterface.getIdByOrder],
['/article/title', ArticleInterface.title],
['/article/date', ArticleInterface.date],
['/article/reading_count', ArticleInterface.readingCount],
['/article/markdown', ArticleInterface.markdown],
['/article/html', ArticleInterface.html],
['/article/total', ArticleInterface.total],
['/article/aside', ArticleInterface.aside],
['/article/list', ArticleInterface.list],
['/article/pages', ArticleInterface.pages],
['/article/latest', ArticleInterface.latest],
['/article/modify/reading_count', ArticleInterface.modifyReadingCount],
['/article/save', ArticleInterface.save],
['/article/delete', ArticleInterface.delete],
['/article/add_reading', ArticleInterface.addReading],
]
for route in articleInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
# 留言数据接口
messageInterfaceList = [
['/message/count', MessageInterface.count],
['/message/get_id_by_order', MessageInterface.getIdByOrder],
['/message/visitor_name', MessageInterface.visitorName],
['/message/date', MessageInterface.date],
['/message/markdown', MessageInterface.markdown],
['/message/html', MessageInterface.html],
['/message/total', MessageInterface.total],
['/message/pages', MessageInterface.pages],
['/message/list', MessageInterface.getList],
['/message/aside', MessageInterface.getAside],
['/message/save', MessageInterface.save],
['/message/delete', MessageInterface.delete],
]
for route in messageInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
# Markdown接口
markdownInterfaceList = [
['/markdown/render', MarkdownInterface.render],
]
for route in markdownInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
if __name__ == "__main__" :
app.run(port=8102) | StarcoderdataPython |
1683850 | from torch.optim.lr_scheduler import CosineAnnealingLR, _LRScheduler
from torch.optim.optimizer import Optimizer
__all__ = [
'CosineDecayWithWarmupScheduler',
]
# Inspired by https://github.com/seominseok0429/pytorch-warmup-cosine-lr/blob/master/warmup_scheduler/scheduler.py
class CosineDecayWithWarmupScheduler(_LRScheduler):
def __init__(
self,
optimizer: Optimizer,
max_lr: float,
total_epochs: int,
initial_div_factor: float = 25,
warmup_epochs: int = 5,
last_epoch: int = -1,
verbose: bool = False
) -> None:
initial_lr = max_lr / initial_div_factor
self.warmup_factor = max_lr / initial_lr
self.warmup_epochs = warmup_epochs
self.cosine_decay = CosineAnnealingLR(optimizer, total_epochs - warmup_epochs)
self.warmup_completed = False
if last_epoch == -1:
for idx, group in enumerate(optimizer.param_groups):
group['initial_lr'] = initial_lr
group['max_lr'] = max_lr
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if self.last_epoch > self.warmup_epochs:
if not self.warmup_completed:
self.cosine_decay.base_lrs = [base_lr * self.warmup_factor for base_lr in self.base_lrs]
self.warmup_completed = True
return self.cosine_decay.get_lr()
return [
base_lr * ((self.warmup_factor - 1.) * self.last_epoch / self.warmup_epochs + 1.)
for base_lr
in self.base_lrs
]
def step(self, epoch: int = None):
if self.warmup_completed:
if epoch is None:
self.cosine_decay.step(None)
else:
self.cosine_decay.step(epoch - self.warmup_epochs)
self._last_lr = self.cosine_decay.get_last_lr()
else:
super().step(epoch)
| StarcoderdataPython |
6567524 | """
画出下列代码内存图
找出打印结果
"""
g01 = 100
g02 = 100
g03 = [100]
def func01():
g01 = 200# 创建一个局部变量
g03[0] = 200 # 修改的是列表中元素(读取全局变量)
def func02():
global g02
g02 = 200
func01()
print(g01) # 100
print(g03) # 200
func02()
print(g02) # 200
class MyClass:
cls01 = 300 # 饮水机
def __init__(self):
self.ins01 = 400 # 杯子
self.ins01 += 1
MyClass.cls01 += 1
instance01 = MyClass()# 400->401 300 -> 301
print(instance01.ins01) # 401
print(MyClass.cls01) # 301
instance02 = MyClass()# 400->401 301 -> 302
print(instance02.ins01) # 401
print(MyClass.cls01) # 302
| StarcoderdataPython |
5161729 | <reponame>JarryChou/etsi-qkd-api<gh_stars>1-10
"""Class implementing the Key Management Entity (KME).
"""
import random
from api import helper
import configparser
from typing import List
from api import crawler
class KME:
"""
Class for the KME on each node. This class also defines the related methods for manipulating
the keys. Most of the configurations for the KME, such as the IP address of the web server hosting the API,
IP address of the SAE etc. is stored in a ``config.ini`` file.
Parameters
----------
config_path : str
ABSOLUTE file path to config.ini that is contained in the etsi-qkd-api/api folder.
Attributes
----------
source_KME_ID: str
IP address of the source (master) KME.
target_KME_ID: str
IP address of the target (slave) KME
master_SAE_ID: str
IP address of the master SAE (user application that requests keys)
slave_SAE_ID: str
IP address of the slave SAE.
key_size: int
Size of each key in bits in the KME.
max_key_per_request: int
Maximum number of keys per request
max_key_size: int
Maximum size of each key in bits that can be requested.
min_key_size: int
Minimum size of each key in bits that can be requested.
max_SAE_ID_count: int
Maximum number of additional SAEs allowed (0 at the moment).
status_extension: str
Optional field for future use (unclear what it should be used for at the moment)
rd: random (object, from random Python library)
Object to initialize random seed and pass to UUID generator to generate UUIDs as the key IDs.
"""
def __init__(self, config_path: str):
# read attributes from config file
config = configparser.ConfigParser()
config.read(config_path)
default_section = config['DEFAULT']
self.key_file_path = default_section.get('key_file_path')
# read and count keys from qcrypto files available at key_file_path by using KeyFileCrawler class
self.key_file_crawler = crawler.KeyFileCrawler(self.key_file_path)
self.stored_key_count = self.key_file_crawler.get_stored_key_count()
# class attributes
self.max_key_count = self.stored_key_count
self.source_KME_ID = default_section.get('source_KME_ID')
self.target_KME_ID = default_section.get('target_KME_ID')
self.master_SAE_ID = default_section.get('master_SAE_ID')
self.slave_SAE_ID = default_section.get('source_SAE_ID')
self.key_size = default_section.getint('key_size')
self.max_key_per_request = default_section.getint('max_key_per_request')
self.max_key_size = default_section.getint('max_key_size')
self.min_key_size = default_section.getint('min_key_size')
self.max_SAE_ID_count = default_section.getint('max_SAE_ID_count')
self.status_extension = default_section.get('status_extension')
# set a random seed for generating UUIDs
self.rd = random.Random()
self.rd.seed(0) # fix initial seed to be 0 for both master and slave
def get_key(self, number: int, size: int) -> dict:
"""Master function that returns the key container of keys from KME.
Function that handles the logic for retrieving the keys from qcrypto files. If the size of each key
is a multiple of 32, then the keys need to be concatenated. The file retrieving is done by a helper
function, :func:`~api.helper.retrieve_keys_from_file`, that actually opens the qcrypto file and retrieves the
keys.
Parameters
----------
number : int
The number of keys requested.
size : int
The size of each key in bits.
Returns
-------
dict
Key container containing the keys requested.
Raises
------
ValueError
Error if there are insufficient keys.
"""
if number is None:
number = 1
if size is None:
size = self.key_size
num_key_in_each = int(size/self.key_size)
# If insufficient keys raise ValueError
if num_key_in_each*number > self.stored_key_count:
raise ValueError
# Pass to helper function to retrieve key from the qcrypto binary key files
keys_retrieved = helper.retrieve_keys_from_file(number, num_key_in_each, self.key_file_path)
# Each key in keys_retrieved is 32bits, so if you want longer keys then pass to
# helper function to concatenate the keys
# concatenated_keys will be an array of integers
concatenated_keys = helper.concat_keys(keys_retrieved)
# convert each key to base64
concatenated_keys = [helper.int_to_base64(x) for x in concatenated_keys]
# create the keys object as per key container specification in API
keys_array = []
for ind, val in enumerate(zip(concatenated_keys, keys_retrieved)):
concat_key = val[0]
constituent_keys = val[1]
list_of_uuids = [helper.convert_int_to_uuid(x) for x in constituent_keys]
separator = '+'
key_ID = separator.join(list_of_uuids) # delimit each key with '+'
temp_dict = {"key_ID": key_ID, "key": concat_key}
keys_array.append(temp_dict)
key_container = {'keys': keys_array}
self.stored_key_count -= number*num_key_in_each # update how many keys retrieved from kme
return key_container
def get_key_with_id(self, key_ids: List[dict]) -> dict:
""" Returns the key container of keys from KME given the key IDs.
Function will be called by the 'slave' application requesting for keys. The actual retrieving of keys
is passed to the helper function :func:`~api.helper.retrieve_keys_given_uuid`.
Parameters
---------
key_ids: List[dict]
Array of dictionaries containing the key ids. Each dictionary contains one key id, in the format:
{ "key_id": <key_id> }
Returns
-------
dict
Key container containing the keys requested.
Raises
------
KeyError
Error if the keys requested cannot be found. Thrown by :func:`~api.helper.retrieve_keys_given_uuid`.
"""
num_keys_retrieved = 0
uuid_array = []
# uuid_array is a 2D list, where each row contains the constituent key IDs (UUIDs) that make up each key
for val in key_ids:
concat_key_id = val["key_ID"]
key_ids_arr = concat_key_id.split("+") # remember key IDs are concatenated with '+'
# key_ids_arr = textwrap.wrap(concat_key_id, 36)
num_keys_retrieved += len(key_ids_arr)
uuid_array.append(key_ids_arr)
# pass to helper
keys_retrieved = helper.retrieve_keys_given_uuid(uuid_array, self.key_file_path)
# rest of code is similar to retrieve_key_from_file
concatenated_keys = helper.concat_keys(keys_retrieved)
concatenated_keys = [helper.int_to_base64(x) for x in concatenated_keys]
keys_array = []
for ind, val in enumerate(zip(concatenated_keys, keys_retrieved)):
concat_key = val[0]
constituent_keys = val[1]
list_of_uuids = [helper.convert_int_to_uuid(x) for x in constituent_keys]
separator = '+'
key_ID = separator.join(list_of_uuids) # delimit each key with '+'
temp_dict = {"key_ID": key_ID, "key": concat_key}
keys_array.append(temp_dict)
key_container = {'keys': keys_array}
self.stored_key_count -= num_keys_retrieved # update how many keys retrieved from kme
return key_container
def get_status(self) -> dict:
"""Returns status of KME according to the ETSI specification.
Calls :func:`~api.crawler.get_stored_key_count` and updates ``stored_key_count``. This is to ensure updated key figures
if a new key file is added.
Returns
-------
dict
Dictionary containing status properties of KME.
"""
# update stored key count when get_status is called
self.stored_key_count = self.key_file_crawler.get_stored_key_count()
status = {
"source_KME_ID": self.source_KME_ID,
"target_KME_ID": self.target_KME_ID,
"master_SAE_ID": self.master_SAE_ID,
"slave_SAE_ID": self.slave_SAE_ID,
"key_size": self.key_size,
"stored_key_count": self.stored_key_count,
"max_key_count": self.max_key_count,
"max_key_per_request": self.max_key_per_request,
"max_key_size": self.max_key_size,
"min_key_size": self.min_key_size,
"max_SAE_ID_count": self.max_SAE_ID_count
}
return status
| StarcoderdataPython |
34930 | #!/usr/bin/env python
from setuptools import find_packages, setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="tplink-wr-api",
version="0.2.1",
url="https://github.com/n1k0r/tplink-wr-api",
author="n1k0r",
author_email="<EMAIL>",
description="API to some budget TP-Link routers",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Communications",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Networking",
],
packages=find_packages(exclude=["tests", "tests.*"]),
python_requires=">=3.8",
install_requires=[
"requests~=2.26",
],
)
| StarcoderdataPython |
3324493 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
from integration_tests import AgentTestWithPlugins, BaseTestCase
from integration_tests.tests.utils import get_resource as resource
class TestWorkflow(AgentTestWithPlugins):
def test_deploy_with_agent_worker(self):
# In 4.2, the default (remote) agent installation path only requires
# the "create" operation
install_events = [
"Task succeeded 'cloudify_agent.installer.operations.create'"
]
uninstall_events = [
"Task succeeded 'cloudify_agent.installer.operations.stop'",
"Task succeeded 'cloudify_agent.installer.operations.delete'"
]
self._test_deploy_with_agent_worker(
'dsl/agent_tests/with_agent.yaml',
install_events,
uninstall_events
)
def test_deploy_with_agent_worker_3_2(self):
install_events = [
"Task succeeded 'worker_installer.tasks.install'",
"Task succeeded 'worker_installer.tasks.start'"
]
uninstall_events = [
"Task succeeded 'worker_installer.tasks.stop'",
"Task succeeded 'worker_installer.tasks.uninstall'"
]
self._test_deploy_with_agent_worker(
'dsl/agent_tests/with_agent_3_2.yaml',
install_events,
uninstall_events
)
def _test_deploy_with_agent_worker(self,
blueprint,
install_events,
uninstall_events):
deployment_id = 'd{0}'.format(uuid.uuid4())
dsl_path = resource(blueprint)
_, execution_id = self.deploy_application(
dsl_path,
deployment_id=deployment_id,
timeout_seconds=120)
events = self.client.events.list(execution_id=execution_id,
sort='timestamp')
filtered_events = [event['message'] for event in events if
event['message'] in install_events]
# Make sure the install events were called (in the correct order)
self.assertListEqual(install_events, filtered_events)
execution_id = self.undeploy_application(deployment_id)
events = self.client.events.list(execution_id=execution_id,
sort='timestamp')
filtered_events = [event['message'] for event in events if
event['message'] in uninstall_events]
# Make sure the uninstall events were called (in the correct order)
self.assertListEqual(uninstall_events, filtered_events)
def test_deploy_with_operation_executor_override(self):
self.upload_mock_plugin('target-aware-mock')
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'webserver_host'
dsl_path = resource('dsl/agent_tests/operation_executor_override.yaml')
_, execution_id = self.deploy_application(
dsl_path,
deployment_id=self.setup_deployment_id,
timeout_seconds=120
)
deployment_nodes = self.client.node_instances.list(
deployment_id=self.setup_deployment_id
)
webserver_nodes = filter(lambda node: 'host' not in node.node_id,
deployment_nodes)
self.assertEquals(1, len(webserver_nodes))
webserver_node = webserver_nodes[0]
webserver_host_node = self.client.node_instances.list(
deployment_id=self.setup_deployment_id,
node_id='webserver_host'
)[0]
create_invocation = self.get_plugin_data(
plugin_name='target_aware_mock',
deployment_id=self.setup_deployment_id
)[webserver_node.id]['create']
expected_create_invocation = {'target': webserver_host_node.id}
self.assertEqual(expected_create_invocation, create_invocation)
# Calling like this because "start" would be written on the manager
# as opposed to the host (hence the "override")
start_invocation = BaseTestCase.get_plugin_data(
self,
plugin_name='target_aware_mock',
deployment_id=self.setup_deployment_id
)[webserver_node.id]['start']
expected_start_invocation = {'target': 'cloudify.management'}
self.assertEqual(expected_start_invocation, start_invocation)
| StarcoderdataPython |
9786267 | class data(object):
number=16
name="whwl"
| StarcoderdataPython |
3423646 | <filename>batch/batch/driver/instance_collection/job_private.py
from typing import List, Tuple
import random
import json
import logging
import asyncio
import sortedcontainers
from gear import Database
from hailtop import aiotools
from hailtop.utils import (
Notice,
run_if_changed,
WaitableSharedPool,
time_msecs,
retry_long_running,
secret_alnum_string,
AsyncWorkerPool,
periodically_call,
)
from ...batch_format_version import BatchFormatVersion
from ...inst_coll_config import JobPrivateInstanceManagerConfig
from ...utils import Box, ExceededSharesCounter
from ...instance_config import QuantifiedResource
from ..instance import Instance
from ..job import mark_job_creating, schedule_job
from ..resource_manager import CloudResourceManager
from .base import InstanceCollectionManager, InstanceCollection
log = logging.getLogger('job_private_inst_coll')
class JobPrivateInstanceManager(InstanceCollection):
@staticmethod
async def create(
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: JobPrivateInstanceManagerConfig,
task_manager: aiotools.BackgroundTaskManager,
):
jpim = JobPrivateInstanceManager(
app, db, inst_coll_manager, resource_manager, machine_name_prefix, config, task_manager
)
log.info(f'initializing {jpim}')
async for record in db.select_and_fetchall(
'''
SELECT instances.*, instances_free_cores_mcpu.free_cores_mcpu
FROM instances
INNER JOIN instances_free_cores_mcpu
ON instances.name = instances_free_cores_mcpu.name
WHERE removed = 0 AND inst_coll = %s;
''',
(jpim.name,),
):
jpim.add_instance(Instance.from_record(app, jpim, record))
return jpim
def __init__(
self,
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: JobPrivateInstanceManagerConfig,
task_manager: aiotools.BackgroundTaskManager,
):
super().__init__(
db,
inst_coll_manager,
resource_manager,
config.cloud,
config.name,
machine_name_prefix,
is_pool=False,
max_instances=config.max_instances,
max_live_instances=config.max_live_instances,
task_manager=task_manager,
)
self.app = app
global_scheduler_state_changed: Notice = self.app['scheduler_state_changed']
self.create_instances_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler_state_changed = asyncio.Event()
self.async_worker_pool: AsyncWorkerPool = app['async_worker_pool']
self.exceeded_shares_counter = ExceededSharesCounter()
self.boot_disk_size_gb = config.boot_disk_size_gb
task_manager.ensure_future(
retry_long_running(
'create_instances_loop',
run_if_changed,
self.create_instances_state_changed,
self.create_instances_loop_body,
)
)
task_manager.ensure_future(
retry_long_running(
'schedule_jobs_loop', run_if_changed, self.scheduler_state_changed, self.schedule_jobs_loop_body
)
)
task_manager.ensure_future(periodically_call(15, self.bump_scheduler))
def config(self):
return {
'name': self.name,
'worker_disk_size_gb': self.boot_disk_size_gb,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
async def configure(self, boot_disk_size_gb, max_instances, max_live_instances):
await self.db.just_execute(
'''
UPDATE inst_colls
SET boot_disk_size_gb = %s, max_instances = %s, max_live_instances = %s
WHERE name = %s;
''',
(boot_disk_size_gb, max_instances, max_live_instances, self.name),
)
self.boot_disk_size_gb = boot_disk_size_gb
self.max_instances = max_instances
self.max_live_instances = max_live_instances
async def bump_scheduler(self):
self.scheduler_state_changed.set()
async def schedule_jobs_loop_body(self):
if self.app['frozen']:
log.info(f'not scheduling any jobs for {self}; batch is frozen')
return True
log.info(f'starting scheduling jobs for {self}')
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
n_scheduled = 0
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.*, batches.format_version, batches.userdata, batches.user, attempts.instance_name
FROM batches
INNER JOIN jobs ON batches.id = jobs.batch_id
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE batches.state = 'running'
AND jobs.state = 'Creating'
AND (jobs.always_run OR NOT jobs.cancelled)
AND jobs.inst_coll = %s
AND instances.`state` = 'active'
ORDER BY instances.time_activated ASC
LIMIT 300;
''',
(self.name,),
):
batch_id = record['batch_id']
job_id = record['job_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
log.info(f'scheduling job {id}')
instance = self.name_instance[instance_name]
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
await waitable_pool.wait()
log.info(f'scheduled {n_scheduled} jobs for {self}')
return should_wait
def max_instances_to_create(self):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
return min(
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
)
async def compute_fair_share(self):
n_jobs_to_allocate = self.max_instances_to_create()
user_live_jobs = {}
user_total_jobs = {}
result = {}
pending_users_by_live_jobs = sortedcontainers.SortedSet(key=lambda user: user_live_jobs[user])
allocating_users_by_total_jobs = sortedcontainers.SortedSet(key=lambda user: user_total_jobs[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(n_creating_jobs), 0) AS SIGNED) AS n_creating_jobs,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_creating_jobs + n_running_jobs > 0;
''',
(self.name,),
)
async for record in records:
user = record['user']
user_live_jobs[user] = record['n_creating_jobs'] + record['n_running_jobs']
user_total_jobs[user] = record['n_ready_jobs'] + record['n_creating_jobs'] + record['n_running_jobs']
pending_users_by_live_jobs.add(user)
record['n_allocated_jobs'] = 0
result[user] = record
def allocate_jobs(user, mark):
result[user]['n_allocated_jobs'] = mark - user_live_jobs[user]
mark = 0
while n_jobs_to_allocate > 0 and (pending_users_by_live_jobs or allocating_users_by_total_jobs):
lowest_running = None
lowest_total = None
if pending_users_by_live_jobs:
lowest_running_user = pending_users_by_live_jobs[0]
lowest_running = user_live_jobs[lowest_running_user]
if lowest_running == mark:
pending_users_by_live_jobs.remove(lowest_running_user)
allocating_users_by_total_jobs.add(lowest_running_user)
continue
if allocating_users_by_total_jobs:
lowest_total_user = allocating_users_by_total_jobs[0]
lowest_total = user_total_jobs[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_jobs.remove(lowest_total_user)
allocate_jobs(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_jobs)
jobs_to_allocate = n_allocating_users * (allocation - mark)
if jobs_to_allocate > n_jobs_to_allocate:
mark += int(n_jobs_to_allocate / n_allocating_users + 0.5)
n_jobs_to_allocate = 0
break
mark = allocation
n_jobs_to_allocate -= jobs_to_allocate
for user in allocating_users_by_total_jobs:
allocate_jobs(user, mark)
return result
async def create_instance(self, machine_spec: dict) -> Tuple[Instance, List[QuantifiedResource]]:
machine_type = machine_spec['machine_type']
preemptible = machine_spec['preemptible']
storage_gb = machine_spec['storage_gib']
_, cores = self.resource_manager.worker_type_and_cores(machine_type)
instance, total_resources_on_instance = await self._create_instance(
app=self.app,
cores=cores,
machine_type=machine_type,
job_private=True,
location=None,
preemptible=preemptible,
max_idle_time_msecs=None,
local_ssd_data_disk=False,
data_disk_size_gb=storage_gb,
boot_disk_size_gb=self.boot_disk_size_gb,
)
return (instance, total_resources_on_instance)
async def create_instances_loop_body(self):
if self.app['frozen']:
log.info(f'not creating instances for {self}; batch is frozen')
return True
log.info(f'create_instances for {self}: starting')
start = time_msecs()
n_instances_created = 0
user_resources = await self.compute_fair_share()
total = sum(resources['n_allocated_jobs'] for resources in user_resources.values())
if not total:
log.info(f'create_instances {self}: no allocated jobs')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['n_allocated_jobs'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled, userdata, user, format_version
FROM batches
LEFT JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, jobs.spec, jobs.cores_mcpu, COALESCE(SUM(instances.state IS NOT NULL AND
(instances.state = 'pending' OR instances.state = 'active')), 0) as live_attempts
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE jobs.batch_id = %s AND jobs.state = 'Ready' AND always_run = 1 AND jobs.inst_coll = %s
GROUP BY jobs.job_id, jobs.spec, jobs.cores_mcpu
HAVING live_attempts = 0
LIMIT %s;
''',
(batch['id'], self.name, remaining.value),
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, jobs.spec, jobs.cores_mcpu, COALESCE(SUM(instances.state IS NOT NULL AND
(instances.state = 'pending' OR instances.state = 'active')), 0) as live_attempts
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE jobs.batch_id = %s AND jobs.state = 'Ready' AND always_run = 0 AND jobs.inst_coll = %s AND cancelled = 0
GROUP BY jobs.job_id, jobs.spec, jobs.cores_mcpu
HAVING live_attempts = 0
LIMIT %s;
''',
(batch['id'], self.name, remaining.value),
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
n_allocated_instances = resources['n_allocated_jobs']
if n_allocated_instances == 0:
continue
n_user_instances_created = 0
share = user_share[user]
log.info(f'create_instances {self}: user-share: {user}: {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if n_user_instances_created >= n_allocated_instances:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
n_instances_created += 1
n_user_instances_created += 1
should_wait = False
log.info(f'creating job private instance for job {id}')
async def create_instance_with_error_handling(
batch_id: int, job_id: int, attempt_id: str, record: dict, id: Tuple[int, int]
):
try:
batch_format_version = BatchFormatVersion(record['format_version'])
spec = json.loads(record['spec'])
machine_spec = batch_format_version.get_spec_machine_spec(spec)
instance, total_resources_on_instance = await self.create_instance(machine_spec)
log.info(f'created {instance} for {(batch_id, job_id)}')
await mark_job_creating(
self.app, batch_id, job_id, attempt_id, instance, time_msecs(), total_resources_on_instance
)
except Exception:
log.exception(f'while creating job private instance for job {id}', exc_info=True)
await waitable_pool.call(create_instance_with_error_handling, batch_id, job_id, attempt_id, record, id)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'create_instances: created instances for {n_instances_created} jobs in {end - start}ms for {self}')
await asyncio.sleep(15) # ensure we don't create more instances than GCE limit
return should_wait
def __str__(self):
return f'jpim {self.name}'
| StarcoderdataPython |
1706795 | import json
import numpy as np
from ..utils import *
from .. import logger
class Randomizer:
def __init__(self, randomization_config_fp='default_dr.json', default_config_fp='default.json'):
try:
with open(get_file_path('randomization/config', randomization_config_fp, 'json'), mode='r') as f:
self.randomization_config = json.load(f)
except:
logger.warning("Couldn't find {} in randomization/config subdirectory".format(randomization_config_fp))
self.randomization_config = dict()
with open(get_file_path('randomization/config', default_config_fp, 'json'), mode='r') as f:
self.default_config = json.load(f)
self.keys = set(list(self.randomization_config.keys()) + list(self.default_config.keys()))
def randomize(self):
"""Returns a dictionary of randomized parameters, with key: parameter name and value: randomized
value
"""
randomization_settings = dict()
for k in self.keys:
setting = None
if k in self.randomization_config:
randomization_definition = self.randomization_config[k]
if randomization_definition['type'] == 'int':
try:
low = randomization_definition['low']
high = randomization_definition['high']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.randint(low=low, high=high, size=size)
elif randomization_definition['type'] == 'uniform':
try:
low = randomization_definition['low']
high = randomization_definition['high']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.uniform(low=low, high=high, size=size)
elif randomization_definition['type'] == 'normal':
try:
loc = randomization_definition['loc']
scale = randomization_definition['scale']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.normal(loc=loc, scale=scale, size=size)
else:
raise NotImplementedError("You've specified an unsupported distribution type")
elif k in self.default_config:
randomization_definition = self.default_config[k]
setting = randomization_definition['default']
randomization_settings[k] = setting
return randomization_settings
| StarcoderdataPython |
4822535 | import asyncio
from PyPSocket.util import *
from PyPSocket.exception import *
__all__ = [
# Classes
"Client",
"ClientEventHandler"
]
class Client:
def __init__(self, address_info, option):
self._address_info = address_info
self._option = option
self._closed = False
self._handle = None
self.event = ClientEventHandler()
async def _listener(self):
await self.event.on_connect()
while not self._closed and not self._handle.is_closed:
try:
data, opcode = await self._handle.receive()
await self.event.on_message(data, opcode)
except HandleClosedException:
await self.close()
break
except Exception as exception:
await self.event.on_exception(exception)
async def send(self, opcode, data):
try:
await self._handle.send(data, opcode=opcode)
except HandleClosedException as handle_closed_exception:
raise handle_closed_exception
except Exception as exception:
await self.event.on_exception(exception)
async def run(self):
loop = asyncio.get_running_loop()
self._handle = await self._option.handler(loop).connect(self._address_info)
await self._listener()
async def close(self):
self._closed = True
self._handle.close()
await self.event.on_close()
class ClientEventHandler:
def __init__(self):
self.on_connect = Event()
self.on_message = Event()
self.on_exception = Event()
self.on_close = Event()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.