id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1687251 | <filename>sport_activities_features/tests/test_data_analysis.py
import os
from unittest import TestCase
from sport_activities_features import DataAnalysis
class TestDataAnalysis(TestCase):
def setUp(self):
self.__data_analysis = DataAnalysis()
def test_run_analysis(self):
pipeline = self.__data_analysis.analyze_data(
os.path.join(os.path.dirname(__file__), "data", 'test_data.csv'),
'Accuracy',
5,
50,
'DifferentialEvolution',
[
'AdaBoost',
'Bagging',
'MultiLayerPerceptron'
],
[
'SelectKBest',
'SelectPercentile',
'ParticleSwarmOptimization'
],
[
'Normalizer',
'StandardScaler'
]
)
self.assertEqual("<class 'niaaml.pipeline.Pipeline'>", str(type(pipeline)))
def test_load_pipeline(self):
pipeline = self.__data_analysis.load_pipeline(os.path.join(os.path.dirname(__file__), "data", 'test.ppln'))
self.assertEqual("<class 'niaaml.pipeline.Pipeline'>", str(type(pipeline)))
| StarcoderdataPython |
1755728 | # 16. Take integer inputs from user until he/she presses q
# ( Ask to press q to quit after every integer input ).
# Print average and product of all numbers.
allnumbers = list()
while True:
user = input("Enter a number (q to Quit):")
if "q" in user.lower():
break
else:
allnumbers.append(int(user))
total = 0
count = 0
product = 1
for onenumber in allnumbers:
count += 1
total += onenumber
product *= onenumber
average = 0
if count > 0:
average = total / count
print("Average = ", average)
print("Product = ", product) | StarcoderdataPython |
157628 | <filename>pyobs/images/processors/detection/daophot.py
from typing import Tuple
from astropy.table import Table
import logging
import numpy as np
from .sourcedetection import SourceDetection
from pyobs.images import Image
log = logging.getLogger(__name__)
class DaophotSourceDetection(SourceDetection):
"""Detect source using Daophot."""
__module__ = 'pyobs.images.processors.detection'
def __init__(self, fwhm: float = 3., threshold: float = 4., bkg_sigma: float = 3.,
bkg_box_size: Tuple[int, int] = (50, 50), bkg_filter_size: Tuple[int, int] = (3, 3),
*args, **kwargs):
"""Initializes a wrapper for photutils. See its documentation for details.
Args:
fwhm: Full-width at half maximum for Gaussian kernel.
threshold: Threshold pixel value for detection.
bkg_sigma: Sigma for background kappa-sigma clipping.
bkg_box_size: Box size for background estimation.
bkg_filter_size: Filter size for background estimation.
"""
# store
self.fwhm = fwhm
self.threshold = threshold
self.bkg_sigma = bkg_sigma
self.bkg_box_size = bkg_box_size
self.bkg_filter_size = bkg_filter_size
def __call__(self, image: Image) -> Table:
"""Find stars in given image and append catalog.
Args:
image: Image to find stars in.
Returns:
Full table with results.
"""
from astropy.stats import SigmaClip, sigma_clipped_stats
from photutils import Background2D, MedianBackground, DAOStarFinder
# get data
data = image.data.astype(np.float).copy()
# mask?
mask = image.mask.data if image.mask is not None else None
# estimate background
sigma_clip = SigmaClip(sigma=self.bkg_sigma)
bkg_estimator = MedianBackground()
bkg = Background2D(data, self.bkg_box_size, filter_size=self.bkg_filter_size,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
# do statistics
mean, median, std = sigma_clipped_stats(data, sigma=3.0)
# find stars
daofind = DAOStarFinder(fwhm=self.fwhm, threshold=self.threshold * std)
sources = daofind(data - median)
# rename columns
sources.rename_column('xcentroid', 'x')
sources.rename_column('ycentroid', 'y')
# match fits conventions
sources['x'] += 1
sources['y'] += 1
# pick columns for catalog
cat = sources['x', 'y', 'flux', 'peak']
# set it
image.catalog = cat
# return full catalog
return sources
__all__ = ['DaophotSourceDetection']
| StarcoderdataPython |
117035 | <gh_stars>0
# -*- coding: utf-8 -*-
import dateutil.parser
from geonode.people.models import Profile
from bims.models.profile import Profile as BimsProfile
from sass.scripts.fbis_importer import FbisImporter
class FbisUserImporter(FbisImporter):
content_type_model = Profile
table_name = 'User'
def process_row(self, row, index):
# print(row)
password_value = '<PASSWORD>${salt}${hash}'.format(
salt=str(self.get_row_value('SaltValue', row)),
hash=str(self.get_row_value('PasswordHash', row))
)
username_value = self.get_row_value('UserName', row).replace(
' ', '_').lower()
date_joined = dateutil.parser.parse(
self.get_row_value('DateFrom', row))
email_value = self.get_row_value('Email', row)
if Profile.objects.filter(username=username_value).exists():
profiles = Profile.objects.filter(username=username_value)
if not profiles.filter(email=email_value).exists():
same_username = len(profiles)
username_value += '_%s' % str(same_username)
profile, created = Profile.objects.get_or_create(
username=username_value,
email=self.get_row_value('Email', row),
is_active=True,
)
profile.first_name = self.get_row_value('FirstName', row)
profile.last_name = self.get_row_value('Surname', row)
profile.date_joined = date_joined
profile.fax = str(self.get_row_value('FaxNumber', row))
profile.delivery = self.get_row_value('PostalAddress', row)
profile.zipcode = self.get_row_value('PostalCode', row)
profile.position = self.get_row_value('Qualifications', row)
profile.voice = str(self.get_row_value('Telephone', row))
profile.password = <PASSWORD>
profile.save()
# Other information
bims_profile, bims_created = BimsProfile.objects.get_or_create(
user=profile,
)
bims_profile.qualifications = self.get_row_value('Qualifications', row)
bims_profile.full_name = self.get_row_value('UserName', row)
bims_profile.other = self.get_row_value('Other', row)
bims_profile.data = {
'PasswordHint': str(self.get_row_value('PasswordHint', row)),
'RegionPolID': str(self.get_row_value('RegionPolID', row)),
'OrganisationID': str(self.get_row_value('OrganisationID', row)),
'RegionalChampion': str(self.get_row_value(
'RegionalChampion',
row)),
'NTUserName': str(self.get_row_value('NTUserName', row)),
'SASS4': str(self.get_row_value('SASS4', row)),
'RipVegIndex': str(self.get_row_value('RipVegIndex', row)),
'FAIIndex': str(self.get_row_value('FAIIndex', row)),
'DateModified': str(self.get_row_value('DateModified', row))
}
bims_profile.save()
self.save_uuid(
uuid=self.get_row_value('UserID', row),
object_id=profile.id
)
| StarcoderdataPython |
1702206 | from pynwb import TimeSeries
class SampleCountTimestampCorespondenceBuilder:
def __init__(self, data):
self.data = data
def build(self):
return TimeSeries(name="sample_count",
description="acquisition system sample count",
data=self.data[:, 0],
timestamps=self.data[:, 1],
unit='int64'
)
| StarcoderdataPython |
3365458 |
"""
This is a combination of facets and wstacks. The outer iteration is over facet and the inner is over w.
"""
import numpy
from arl.data.data_models import Visibility, Image
from arl.imaging.wstack import predict_wstack, invert_wstack
from arl.image.iterators import image_raster_iter
from arl.imaging.iterated import predict_with_raster_iterator, invert_with_raster_iterator
import logging
log = logging.getLogger(__name__)
def predict_facets_wstack(vis: Visibility, model: Image, facets=1, **kwargs) -> Visibility:
""" Predict using image facets, calling specified predict function
:param vis: Visibility to be predicted
:param model: model image
:return: resulting visibility (in place works)
"""
log.info("predict_facets_wstack: Predicting by image facets and w stacking")
return predict_with_raster_iterator(vis, model, image_iterator= image_raster_iter, predict_function=predict_wstack,
facets=1, **kwargs)
def invert_facets_wstack(vis: Visibility, im: Image, dopsf=False, normalize=True, facets=1, **kwargs) -> (Image,
numpy.ndarray):
""" Invert using image partitions, calling specified Invert function
:param vis: Visibility to be inverted
:param im: image template (not changed)
:param dopsf: Make the psf instead of the dirty image
:param normalize: Normalize by the sum of weights (True)
:return: resulting image[nchan, npol, ny, nx], sum of weights[nchan, npol]
"""
log.info("invert_facets_wstack: Inverting by image facets and w stacking")
return invert_with_raster_iterator(vis, im, normalize=normalize, image_iterator= image_raster_iter, dopsf=dopsf,
invert_function=invert_wstack, facets=1, **kwargs)
| StarcoderdataPython |
3398849 | <filename>util/Gpio.py
import logging
import RPi.GPIO as GPIO
log = logging.getLogger(__name__)
class Gpio:
def __init__(self, debug, pin=4):
self.debug = debug
self.pin = pin
if not self.debug:
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN)
def add_event_detect(self, callback):
if not self.debug:
GPIO.add_event_detect(self.pin, GPIO.BOTH, callback=callback, bouncetime=200)
log.info("Added event detect to GPIO BOTH on pin " + str(self.pin))
def remove_event_detect(self):
if not self.debug:
GPIO.remove_event_detect(self.pin)
log.info("Removed event detect to GPIO pin " + str(self.pin))
def input(self):
if not self.debug:
return GPIO.input(self.pin)
else:
return True
def cleanup(self):
if not self.debug:
GPIO.cleanup()
| StarcoderdataPython |
3241308 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities import calvinlogger
from calvin.utilities import calvinuuid
from calvin.runtime.south.transports import base_transport
_log = calvinlogger.get_logger(__name__)
_join_request_reply = {'cmd': 'JOIN_REPLY', 'id': None, 'sid': None, 'serializer': None}
_join_request = {'cmd': 'JOIN_REQUEST', 'id': None, 'sid': None, 'serializers': []}
class CalvinTransport(base_transport.BaseTransport):
def __init__(self, rt_id, remote_uri, callbacks, transport, proto=None, node_name=None, server_node_name=None, client_validator=None):
"""docstring for __init__"""
_log.debug("CalvinTransport::__init__: "
"\n\trt_id={}"
"\n\tnode_name={}"
"\n\tremote_uri={}"
"\n\tcallbacks={}"
"\n\ttransport={}"
"\n\tserver_node_name={}".format(rt_id,
node_name,
remote_uri,
callbacks,
transport,
server_node_name))
super(CalvinTransport, self).__init__(rt_id, remote_uri, callbacks=callbacks)
self._rt_id = rt_id
self._node_name = node_name
self._remote_rt_id = None
self._coder = None
self._transport = transport(self._uri.hostname, self._uri.port, callbacks, proto=proto, node_name=self._node_name, server_node_name=server_node_name)
self._rtt = None # Init rtt in s
if not client_validator:
self._verify_client = lambda x: True
else:
self._verify_client = client_validator
self._incoming = proto is not None
if self._incoming:
# TODO: Set timeout
# Incomming connection timeout if no join
self._transport.callback_register("disconnected", CalvinCB(self._disconnected))
self._transport.callback_register("data", CalvinCB(self._data_received))
def connect(self, timeout=10):
if self._transport.is_connected():
raise Exception("Transport already connected")
self._transport.callback_register("connected", CalvinCB(self._send_join))
self._transport.callback_register("connection_failed", CalvinCB(self._connection_failed))
self._transport.callback_register("disconnected", CalvinCB(self._disconnected))
self._transport.callback_register("data", CalvinCB(self._data_received))
# TODO: set timeout
self._transport.join()
def disconnect(self, timeout=10):
# TODO: Set timepout
if self._transport.is_connected():
self._transport.disconnect()
def is_connected(self):
return self._transport.is_connected()
def send(self, payload, timeout=None, coder=None):
tcoder = coder or self._coder
try:
_log.debug('send_message %s => %s "%s"' % (self._rt_id, self._remote_rt_id, payload))
self._callback_execute('send_message', self, payload)
# Send
raw_payload = tcoder.encode(payload)
# _log.debug('raw_send_message %s => %s "%s"' % (self._rt_id, self._remote_rt_id, raw_payload))
self._callback_execute('raw_send_message', self, raw_payload)
self._transport.send(raw_payload)
# TODO: Set timeout of send
return True
except:
_log.exception("Send message failed!!")
_log.error("Payload = '%s'" % repr(payload))
return False
def _get_join_coder(self):
return self.get_coders()['json']
def _get_msg_uuid(self):
return calvinuuid.uuid("MSGID")
def _send_join(self):
self._callback_execute('peer_connected', self, self.get_uri())
msg = _join_request
msg['id'] = self._rt_id
msg['sid'] = self._get_msg_uuid()
msg['serializers'] = self.get_coders().keys()
self._join_start = time.time()
self.send(msg, coder=self._get_join_coder())
def _send_join_reply(self, _id, serializer, sid):
msg = _join_request_reply
msg['id'] = self._rt_id
msg['sid'] = sid
msg['serializer'] = serializer
self.send(msg, coder=self._get_join_coder())
def _handle_join(self, data):
sid = None
coder_name = None
rt_id = None
valid = False
try:
data_obj = self._get_join_coder().decode(data)
# Verify package
if 'cmd' not in data_obj or data_obj['cmd'] != 'JOIN_REQUEST' or \
'serializers' not in data_obj or 'id' not in data_obj or 'sid' not in data_obj:
raise Exception('Not a valid package "%s"' % data_obj)
sid = data_obj['sid']
for coder in self.get_coders_prio():
if coder in data_obj['serializers']:
self._coder = self.get_coders()[coder]
coder_name = coder
break
# Verify remote
valid = self._verify_client(data_obj)
# TODO: Callback or use join_finished
if valid:
rt_id = self._rt_id
self._remote_rt_id = data_obj['id']
else:
rt_id = None
self._joined(False, False, reason={'reason': 'not_verified', 'info': ''})
except Exception as exc:
_log.exception("_handle_join: Failed!!")
self._joined(False, False, reason={'reason': 'unknown', 'info': str(exc)})
self._send_join_reply(rt_id, coder_name, sid)
if valid:
self._joined(True, False)
def _joined(self, success, is_orginator, reason=None):
if not success:
self._callback_execute('join_failed', self, self._remote_rt_id, self.get_uri(), is_orginator, reason)
else:
self._callback_execute('join_finished', self, self._remote_rt_id, self.get_uri(), is_orginator)
def _handle_join_reply(self, data):
valid = False
self._rtt = time.time() - self._join_start
try:
data_obj = self.get_coders()['json'].decode(data)
# Verify package and set local data
if 'cmd' not in data_obj or data_obj['cmd'] != 'request_reply' or \
'serializer' not in data_obj or 'id' not in data_obj or 'sid' not in data_obj:
pass
if data_obj['serializer'] in self.get_coders():
self._coder = self.get_coders()[data_obj['serializer']]
if data_obj['id'] is not None:
# Request denied
self._remote_rt_id = data_obj['id']
else:
self._joined(False, False, reason={'reason': 'remote_rejected', 'info': ''})
return
valid = self._verify_client(data_obj)
if not valid:
self._joined(False, False, reason={'reason': 'not_verified', 'info': ''})
# TODO: Callback or use join_finished
except Exception as exc:
_log.exception("_handle_join_reply: Failed!!")
self._joined(False, False, reason={'reason': 'unknown', 'info': str(exc)})
# TODO: disconnect ?
return
if valid:
self._joined(True, True)
def _disconnected(self, reason):
# TODO: unify reason
status = "ERROR"
if reason.getErrorMessage() == "Connection was closed cleanly.":
status = "OK"
self._callback_execute('peer_disconnected', self, self._remote_rt_id, status)
def _connection_failed(self, reason):
status = "ERROR"
if reason.getErrorMessage() == "An error occurred while connecting: 22":
status = "OK"
# TODO: unify reason
self._callback_execute('peer_connection_failed', self, self.get_uri(), status)
def _data_received(self, data):
self._callback_execute('raw_data_received', self, data)
if self._remote_rt_id is None:
if self._incoming:
self._handle_join(data)
else:
# We have not joined yet
self._handle_join_reply(data)
return
# TODO: How to error this
data_obj = None
# decode
try:
data_obj = self._coder.decode(data)
except:
_log.exception("Message decode failed")
self._callback_execute('data_received', self, data_obj)
class CalvinServer(base_transport.BaseServer):
def __init__(self, rt_id, node_name, listen_uri, callbacks, server_transport, client_transport, client_validator=None):
super(CalvinServer, self).__init__(rt_id, listen_uri, callbacks=callbacks)
self._rt_id = rt_id
self._node_name = node_name
self._port = None
self._peers = {}
self._callbacks = callbacks
self._client_validator = client_validator
# TODO: Get iface from addr and lookup host
iface = '::'
self._transport = server_transport(iface=iface, node_name=self._node_name, port=self._listen_uri.port or 0, uri=listen_uri)
self._client_transport = client_transport
def _started(self, port):
self._port = port
self._callback_execute('server_started', self, self._port)
def _stopped(self):
self._port = None
self._callback_execute('server_stopped', self)
# TODO: remove this ?
# self._transport.callback_register('peer_connected', CalvinCB(self._peer_connected))
def _client_connected(self, uri, protocol):
"""
Callback when the client connects still needs a join to be finnshed
before we can callback upper layers
"""
_log.debug("Client connected")
import socket
if uri in self._peers:
_log.info("Peer %s already connected" % uri)
# Disconnect client localy and remove callbacks
class ErrorMessage:
def __init__(self, str):
self._str = str
def getErrorMessage(self):
return self._str
self._peers[uri]._transport._proto.connectionLost(ErrorMessage("Connection was closed cleanly."))
from calvin.utilities import calvinconfig
_conf = calvinconfig.get()
runtime_to_runtime_security = _conf.get("security","runtime_to_runtime_security")
if runtime_to_runtime_security=="tls":
_log.debug("TLS enabled, get FQDN of runtime")
try:
junk, ipv6 = uri.split("//")
ipv6_addr_list = ipv6.split(":")[:-1]
ipv6_addr = ":".join(ipv6_addr_list)
hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ipv6_addr)
fqdn = socket.getfqdn(hostname)
except Exception as err:
_log.error("Could not resolve ip address to hostname"
"\n\terr={}"
"\n\turi={}".format(err, uri))
raise
else:
fqdn=None
tp = CalvinTransport(self._rt_id, uri, self._callbacks,
self._client_transport, proto=protocol,
node_name=self._node_name,
server_node_name=fqdn,
client_validator=self._client_validator)
self._callback_execute('peer_connected', tp, tp.get_uri())
self._peers[uri] = tp
def start(self):
# These should come from us
self._transport.callback_register('server_started', CalvinCB(self._started))
self._transport.callback_register('server_stopped', CalvinCB(self._stopped))
self._transport.callback_register('client_connected', CalvinCB(self._client_connected))
# Start the server
self._port = self._transport.start()
def stop(self):
return self._transport.stop()
def is_listening(self):
return self._transport.is_listening()
| StarcoderdataPython |
3372245 | import sys
import urllib2
from redash.query_runner import *
class Url(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'title': 'URL base path'
}
}
}
@classmethod
def annotate_query(cls):
return False
def run_query(self, query):
base_url = self.configuration.get("url", None)
try:
error = None
query = query.strip()
if base_url is not None and base_url != "":
if query.find("://") > -1:
return None, "Accepting only relative URLs to '%s'" % base_url
if base_url is None:
base_url = ""
url = base_url + query
json_data = urllib2.urlopen(url).read().strip()
if not json_data:
error = "Error reading data from '%s'" % url
return json_data, error
except urllib2.URLError as e:
return None, str(e)
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
register(Url)
| StarcoderdataPython |
3314542 | """lib/netbox/ansible.py"""
class NetBoxToAnsible:
"""Main NetBox to Ansible class"""
def __init__(self, netbox_data):
self.netbox_data = netbox_data
self.ansible_data = {}
def data(self):
"""Translate NetBox data to Ansible constructs"""
# DCIM
self.dcim_translations()
# Tenancy
self.tenancy_translations()
# IPAM
self.ipam_translations()
# Virtualization
self.virtualization_translations()
# Circuits
self.circuits_translations()
# Secrets
self.secrets_translations()
# Extras
self.extras_translations()
return self.ansible_data
def dcim_translations(self):
"""Translate DCIM related info"""
self.regions()
self.sites()
self.rack_roles()
self.rack_groups()
self.racks()
self.manufacturers()
self.platforms()
self.device_types()
self.device_roles()
self.devices()
self.interfaces()
self.inventory_items()
def tenancy_translations(self):
"""Translate tenancy related info"""
self.tenant_groups()
self.tenants()
def ipam_translations(self):
"""Translate IPAM related info"""
self.roles()
self.vlan_groups()
self.vlans()
self.vrfs()
self.rirs()
self.aggs()
self.prefixes()
self.ip_addresses()
def virtualization_translations(self):
"""Translate virtualization related info"""
self.cluster_groups()
self.cluster_types()
self.clusters()
self.virtual_machines()
self.virtual_interfaces()
def circuits_translations(self):
"""Translate circuit related info"""
# self.providers()
# self.circuit_types()
# self.circuits()
def extras_translations(self):
"""Translate extras related info"""
# self.config_contexts()
def secrets_translations(self):
"""Translate secrets related info"""
# self.secret_roles()
# self.secrets()
def roles(self):
"""Extract NetBox roles"""
netbox_ipam_roles = []
for role in self.netbox_data['netbox_ipam_roles']:
data = role['data']
role_info = {
'data': {'name': data['name'], 'weight': data['weight']},
'state': role['state']}
netbox_ipam_roles.append(role_info)
self.ansible_data['netbox_ipam_roles'] = netbox_ipam_roles
def vlan_groups(self):
"""Extract NetBox VLAN groups"""
netbox_vlan_groups = []
for group in self.netbox_data['netbox_vlan_groups']:
data = group['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
group_info = {
'data': {'name': data['name'], 'site': data['site']},
'state': group['state']}
netbox_vlan_groups.append(group_info)
self.ansible_data['netbox_vlan_groups'] = netbox_vlan_groups
def vlans(self):
"""Extract NetBox VLANs"""
netbox_vlans = []
for vlan in self.netbox_data['netbox_vlans']:
data = vlan['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
vlan_info = {
'data': {'name': data['name'], 'site': data['site']},
'state': vlan['state']}
netbox_vlans.append(vlan_info)
self.ansible_data['netbox_vlans'] = netbox_vlans
def vrfs(self):
"""Extract NetBox VRFs"""
netbox_vrfs = []
for vrf in self.netbox_data['netbox_vrfs']:
data = vrf['data']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
vrf_info = {
'data': {'name': data['name'], 'rd': data['rd'],
'enforce_unique': data['enforce_unique'],
'description': data['description'],
'tags': data['tags'],
'custom_fields': data['custom_fields'],
'tenant': data['tenant']},
'state': vrf['state']}
netbox_vrfs.append(vrf_info)
self.ansible_data['netbox_vrfs'] = netbox_vrfs
def rirs(self):
"""Extract NetBox RIRs"""
netbox_rirs = []
for rir in self.netbox_data['netbox_rirs']:
data = rir['data']
rir_info = {
'data': {'name': data['name'],
'is_private': data['is_private']},
'state': rir['state']}
netbox_rirs.append(rir_info)
self.ansible_data['netbox_rirs'] = netbox_rirs
def aggs(self):
"""Extract NetBox aggregates"""
netbox_aggregates = []
for agg in self.netbox_data['netbox_aggregates']:
data = agg['data']
if data['rir'] is not None:
data['rir'] = data['rir']['name']
agg_info = {
'data': {'custom_fields': data['custom_fields'],
'description': data['description'],
'prefix': data['prefix'],
'rir': data['rir'],
'tags': data['tags']},
'state': agg['state']}
netbox_aggregates.append(agg_info)
self.ansible_data['netbox_aggregates'] = netbox_aggregates
def prefixes(self):
"""Extract NetBox prefixes"""
netbox_prefixes = []
for prefix in self.netbox_data['netbox_prefixes']:
data = prefix['data']
# Update role with name only if defined
if data['role'] is not None:
data['role'] = data['role']['name']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
# Update vrf with name only if defined
if data['vrf'] is not None:
data['vrf'] = data['vrf']['name']
prefix_info = {
'data': {'custom_fields': data['custom_fields'],
'description': data['description'],
'family': data['family']['value'],
'is_pool': data['is_pool'],
'prefix': data['prefix'],
'site': data['site'],
'status': data['status']['label'],
'prefix_role': data['role'],
'tags': data['tags'],
'tenant': data['tenant'],
'vlan': data['vlan'],
'vrf': data['vrf']
}, 'state': prefix['state']}
netbox_prefixes.append(prefix_info)
self.ansible_data['netbox_prefixes'] = netbox_prefixes
def ip_addresses(self):
"""Extract NetBox IP addresses"""
netbox_ip_addresses = []
for address in self.netbox_data['netbox_ip_addresses']:
data = address['data']
# Update interface with name and device
if data['interface'] is not None:
interface = data['interface']
data['interface'] = {
'name': interface['name']
}
try:
data['interface']['device'] = interface['device']['name']
except TypeError:
pass
if interface['virtual_machine'] is not None:
data['interface']['virtual_machine'] = interface[
'virtual_machine']['name']
# Update nat_inside
if data['nat_inside'] is not None:
data['nat_inside'] = {
'address': data['nat_inside']['address'],
'vrf': data['nat_inside']['vrf']
}
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
# Update vrf with name only if defined
if data['vrf'] is not None:
data['vrf'] = data['vrf']['name']
address_info = {'data': {'address': data['address'],
'custom_fields': data['custom_fields'],
'description': data['description'],
'family': data['family']['value'],
'interface': data['interface'],
'nat_inside': data['nat_inside'],
'status': data['status']['label'],
'tags': data['tags'],
'tenant': data['tenant'],
'vrf': data['vrf']},
'state': address['state']}
if data['role'] is not None:
address_info['data']['role'] = data['role']['label']
netbox_ip_addresses.append(address_info)
self.ansible_data['netbox_ip_addresses'] = netbox_ip_addresses
def tenant_groups(self):
"""Extract NetBox tenant groups"""
netbox_tenant_groups = []
for group in self.netbox_data['netbox_tenant_groups']:
data = group['data']
group_info = {
'data': {'name': data['name']}, 'state': group['state']}
netbox_tenant_groups.append(group_info)
self.ansible_data['netbox_tenant_groups'] = netbox_tenant_groups
def tenants(self):
"""Extract NetBox tenant groups"""
netbox_tenants = []
for tenant in self.netbox_data['netbox_tenants']:
data = tenant['data']
# Update group with name only if defined
if data['group'] is not None:
data['group'] = data['group']['name']
tenant_info = {
'data': {'description': data['description'],
'comments': data['comments'],
'custom_fields': data['custom_fields'],
'name': data['name'],
'slug': data['slug'],
'tenant_group': data['group'],
'tags': data['tags']},
'state': tenant['state']}
netbox_tenants.append(tenant_info)
self.ansible_data['netbox_tenants'] = netbox_tenants
def regions(self):
"""Extract NetBox regions"""
netbox_regions = []
for region in self.netbox_data['netbox_regions']:
data = region['data']
# Update parent region with name only if defined
if data['parent'] is not None:
data['parent'] = data['parent']['name']
region_info = {
'data': {'name': data['name'],
'parent_region': data['parent']},
'state': region['state']}
netbox_regions.append(region_info)
self.ansible_data['netbox_regions'] = netbox_regions
def sites(self):
"""Extract NetBox sites"""
netbox_sites = []
for site in self.netbox_data['netbox_sites']:
data = site['data']
# Update region with name only if defined
if data['region'] is not None:
data['region'] = data['region']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
site_info = {
'data': {'asn': data['asn'],
'comments': data['comments'],
'contact_name': data['contact_name'],
'contact_phone': data['contact_phone'],
'contact_email': data['contact_email'],
'custom_fields': data['custom_fields'],
'description': data['description'],
'facility': data['facility'],
'latitude': data['latitude'],
'longitude': data['longitude'],
'name': data['name'],
'physical_address': data['physical_address'],
'shipping_address': data['shipping_address'],
'slug': data['slug'],
'region': data['region'],
'status': data['status']['label'],
'tags': data['tags'],
'tenant': data['tenant'],
'time_zone': data['time_zone'],
}, 'state': site['state']}
netbox_sites.append(site_info)
self.ansible_data['netbox_sites'] = netbox_sites
def rack_roles(self):
"""Extract NetBox rack roles"""
netbox_rack_roles = []
for role in self.netbox_data['netbox_rack_roles']:
data = role['data']
role_info = {'data': {'name': data['name'],
'color': data['color']},
'state': role['state']}
netbox_rack_roles.append(role_info)
self.ansible_data['netbox_rack_roles'] = netbox_rack_roles
def rack_groups(self):
"""Extract NetBox rack groups"""
netbox_rack_groups = []
for group in self.netbox_data['netbox_rack_groups']:
data = group['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
group_info = {
'data': {'name': data['name'], 'site': data['site']},
'state': group['state']}
netbox_rack_groups.append(group_info)
self.ansible_data['netbox_rack_groups'] = netbox_rack_groups
def racks(self):
"""Extract NetBox racks"""
netbox_racks = []
for rack in self.netbox_data['netbox_racks']:
data = rack['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
# Update rack group with name only if defined
if data['group'] is not None:
data['group'] = data['group']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
data['tenant'] = data['tenant']['name']
# Update type with label only if defined
if data['type'] is not None:
data['type'] = data['type']['label']
# Update width with value only if defined
if data['width'] is not None:
data['width'] = data['width']['value']
rack_info = {
'data': {'asset_tag': data['asset_tag'],
'comments': data['comments'],
'custom_fields': data['custom_fields'],
'desc_units': data['desc_units'],
'name': data['name'],
'facility_id': data['facility_id'],
'outer_depth': data['outer_depth'],
'outer_width': data['outer_width'],
'rack_group': data['group'],
'rack_role': data['role'],
'serial': data['serial'],
'site': data['site'],
'status': data['status']['label'],
'tags': data['tags'],
'tenant': data['tenant'],
'type': data['type'],
'u_height': data['u_height'],
'width': data['width']
}, 'state': rack['state']}
if data['outer_unit'] is not None:
rack_info['data']['outer_unit'] = data['outer_unit']
netbox_racks.append(rack_info)
self.ansible_data['netbox_racks'] = netbox_racks
def manufacturers(self):
"""Extract NetBox manufacturers"""
netbox_manufacturers = []
for manufacturer in self.netbox_data['netbox_manufacturers']:
data = manufacturer['data']
manufacturer_info = {'data': {'name': data['name']},
'state': manufacturer['state']}
netbox_manufacturers.append(manufacturer_info)
self.ansible_data['netbox_manufacturers'] = netbox_manufacturers
def platforms(self):
"""Extract NetBox platforms"""
netbox_platforms = []
for platform in self.netbox_data['netbox_platforms']:
data = platform['data']
# Update manufacturer with name only if defined
if data['manufacturer'] is not None:
data['manufacturer'] = data['manufacturer']['name']
platform_info = {'data': {'manufacturer': data['manufacturer'],
'name': data['name'],
'napalm_driver': data['napalm_driver'],
'napalm_args': data['napalm_args']},
'state': platform['state']}
netbox_platforms.append(platform_info)
self.ansible_data['netbox_platforms'] = netbox_platforms
def device_types(self):
"""Extract NetBox device types"""
netbox_device_types = []
for device_type in self.netbox_data['netbox_device_types']:
data = device_type['data']
# Update manufacturer with name only if defined
if data['manufacturer'] is not None:
data['manufacturer'] = data['manufacturer']['name']
device_type_info = {
'data': {
'comments': data['comments'],
'custom_fields': data['custom_fields'],
'is_full_depth': data['is_full_depth'],
'manufacturer': data['manufacturer'],
'model': data['model'],
'part_number': data['part_number'],
'slug': data['slug'],
'tags': data['tags'],
'u_height': data['u_height']
},
'state': device_type['state']}
if data['subdevice_role'] is not None:
device_type_info['data']['subdevice_role'] = data[
'subdevice_role']['label']
netbox_device_types.append(device_type_info)
self.ansible_data['netbox_device_types'] = netbox_device_types
def device_roles(self):
"""Extract NetBox device roles"""
netbox_device_roles = []
for role in self.netbox_data['netbox_device_roles']:
data = role['data']
role_info = {'data': {
'name': data['name'],
'color': data['color'],
'vm_role': data['vm_role']
}, 'state': role['state']}
netbox_device_roles.append(role_info)
self.ansible_data['netbox_device_roles'] = netbox_device_roles
def devices(self):
"""Extract NetBox devices"""
netbox_devices = []
for device in self.netbox_data['netbox_devices']:
data = device['data']
device_info = {'data': {
'name': data['name'],
'platform': data['platform'],
'serial': data['serial'],
'asset_tag': data['asset_tag'],
'position': data['position'],
'status': data['status']['label'],
'comments': data['comments'],
'tags': data['tags'],
'custom_fields': data['custom_fields']
}, 'state': device['state']}
# Update cluster with name only if defined
if data['cluster'] is not None:
device_info['data']['cluster'] = data['cluster']['name']
# Update device_role with name only if defined
if data['device_role'] is not None:
device_info['data']['device_role'] = data['device_role'][
'name']
# Update device_type with name only if defined
if data['device_type'] is not None:
device_info['data']['device_type'] = data['device_type'][
'model']
# Update face with label only if defined
if data['face'] is not None:
device_info['data']['face'] = data['face']['label']
# Update rack with name only if defined
if data['rack'] is not None:
device_info['data']['rack'] = data['rack']['name']
# Update site with name only if defined
if data['site'] is not None:
device_info['data']['site'] = data['site']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
device_info['data']['tenant'] = data['tenant']['name']
netbox_devices.append(device_info)
self.ansible_data['netbox_devices'] = netbox_devices
def interfaces(self):
"""Extract NetBox interfaces"""
netbox_device_interfaces = []
for interface in self.netbox_data['netbox_device_interfaces']:
data = interface['data']
# This is related to https://github.com/netbox-community/ansible_modules/issues/193
form_factor = data.get('form_factor')
int_type = data.get('type')
if int_type is not None:
data['type'] = data['type']['label']
elif form_factor is not None:
data['type'] = data['form_factor']['label']
if data['mode'] is not None:
data['mode'] = data['mode']['label']
interface_info = {'data': {
'description': data['description'],
'device': data['device']['name'],
'enabled': data['enabled'],
'type': data['type'],
'lag': data['lag'],
'mac_address': data['mac_address'],
'mgmt_only': data['mgmt_only'],
'mode': data['mode'],
'mtu': data['mtu'],
'name': data['name'],
'tagged_vlans': data['tagged_vlans'],
'tags': data['tags'],
'untagged_vlan': data['untagged_vlan']
}, 'state': interface['state']}
netbox_device_interfaces.append(interface_info)
self.ansible_data[
'netbox_device_interfaces'] = netbox_device_interfaces
def inventory_items(self):
"""Extract NetBox inventory items"""
netbox_inventory_items = []
for item in self.netbox_data['netbox_inventory_items']:
data = item['data']
if data['manufacturer'] is not None:
data['manufacturer'] = data['manufacturer']['name']
item_info = {
'data': {'device': data['device']['name'],
'name': data['name'],
'part_id': data['part_id'],
'manufacturer': data['manufacturer'],
'serial': data['serial'],
'asset_tag': data['asset_tag'],
'description': data['description'],
'tags': data['tags']
}, 'state': item['state']}
netbox_inventory_items.append(item_info)
self.ansible_data['netbox_inventory_items'] = netbox_inventory_items
def cluster_groups(self):
"""Extract NetBox cluster groups"""
netbox_cluster_groups = []
for group in self.netbox_data['netbox_cluster_groups']:
data = group['data']
group_info = {'data': {'name': data['name']},
'state': group['state']}
netbox_cluster_groups.append(group_info)
self.ansible_data['netbox_cluster_groups'] = netbox_cluster_groups
def cluster_types(self):
"""Extract NetBox cluster types"""
netbox_cluster_types = []
for cluster_type in self.netbox_data['netbox_cluster_types']:
data = cluster_type['data']
cluster_type_info = {'data': {'name': data['name']},
'state': cluster_type['state']}
netbox_cluster_types.append(cluster_type_info)
self.ansible_data['netbox_cluster_types'] = netbox_cluster_types
def clusters(self):
"""Extract NetBox clusters"""
netbox_clusters = []
for cluster in self.netbox_data['netbox_clusters']:
data = cluster['data']
# Update site with name only if defined
if data['site'] is not None:
data['site'] = data['site']['name']
cluster_info = {'data': {'comments': data['comments'],
'custom_fields': data['custom_fields'],
'name': data['name'],
'cluster_group': data['group']['name'],
'cluster_type': data['type']['name'],
'site': data['site'],
'tags': data['tags']},
'state': cluster['state']}
netbox_clusters.append(cluster_info)
self.ansible_data['netbox_clusters'] = netbox_clusters
def virtual_machines(self):
"""Extract NetBox virtual machines"""
netbox_virtual_machines = []
for virtual_machine in self.netbox_data['netbox_virtual_machines']:
data = virtual_machine['data']
vm_info = {'data': {'disk': data['disk'],
'memory': data['memory'],
'name': data['name'],
'platform': data['platform']['name'],
'site': data['site'],
'vcpus': data['vcpus'],
'status': data['status']['label'],
'tags': data['tags'],
'custom_fields': data['custom_fields']
},
'state': virtual_machine['state']}
# Update cluster with name only if defined
if data['cluster'] is not None:
vm_info['data']['cluster'] = data['cluster']['name']
# Update virtual_machine_role with name only if defined
if data['role'] is not None:
vm_info['data']['virtual_machine_role'] = data['role']['name']
# Update site with name only if defined
if data['site'] is not None:
vm_info['data']['site'] = data['site']['name']
# Update tenant with name only if defined
if data['tenant'] is not None:
vm_info['data']['tenant'] = data['tenant']['name']
netbox_virtual_machines.append(vm_info)
self.ansible_data['netbox_virtual_machines'] = netbox_virtual_machines
def virtual_interfaces(self):
"""Extract NetBox virtual interfaces"""
netbox_virtual_interfaces = []
for interface in self.netbox_data['netbox_virtual_interfaces']:
data = interface['data']
if data['form_factor'] is not None:
data['form_factor'] = data['form_factor']['label']
if data['mode'] is not None:
data['mode'] = data['mode']['label']
interface_info = {'data': {
'description': data['description'],
'enabled': data['enabled'],
'mac_address': data['mac_address'],
'mode': data['mode'],
'mtu': data['mtu'],
'name': data['name'],
'tagged_vlans': data['tagged_vlans'],
'tags': data['tags'],
'untagged_vlan': data['untagged_vlan'],
'virtual_machine': data['virtual_machine']['name']
}, 'state': interface['state']}
netbox_virtual_interfaces.append(interface_info)
self.ansible_data[
'netbox_virtual_interfaces'] = netbox_virtual_interfaces
| StarcoderdataPython |
3370464 | import numpy as np
import sys
def find_ids(passes):
ids = set()
for p in passes:
rows = [0, 127]
columns = [0, 7]
for r in p[:8]:
if r == 'F':
rows[1] = (rows[1] - rows[0] - 1) / 2 + rows[0]
else:
rows[0] = (rows[1] - rows[0] + 1) / 2 + rows[0]
if (rows[0] == rows[1]):
break
for c in p[-3:]:
if c == 'L':
columns[1] = (columns[1] - columns[0] - 1) / 2 + columns[0]
else:
columns[0] = (columns[1] - columns[0] + 1) / 2 + columns[0]
if (columns[0] == columns[1]):
break
ids.add(int(rows[0] * 8 + columns[0]))
return ids
def find_missing_seat(ids):
for i in range(max(ids)):
if i not in ids and (i-1 in ids and i+1 in ids):
return i
if __name__ == "__main__":
with open(sys.argv[1]) as f:
passes = f.readlines()
passes = [l.replace('\n', '') for l in passes]
ids = find_ids(passes)
print(f'Max id = {max(ids)}')
missing_id = find_missing_seat(ids)
print(f'Missing id = {missing_id}') | StarcoderdataPython |
104573 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import pytz
import datetime
BASEDIR = os.path.realpath(os.path.dirname(__file__))
### Core Settings
DB_URL = 'postgresql+psycopg2://compiler2017:mypassword@localhost/compiler2017'
# DB_URL = 'sqlite:///data/compiler.db'
TIMEZONE = pytz.timezone('Asia/Shanghai')
TEST_PHASES = [
'semantic pretest', 'semantic extended',
'codegen pretest', 'codegen extended',
'optim pretest', 'optim extended'
]
JUDGE_TOKEN = 'you can copy from: python -c "print(repr(__import__(\"os\").urandom(30)))"'
CORE_BUILD_LOG_PATH = os.path.join(BASEDIR, 'data', 'build')
CORE_TESTRUN_STDERR_PATH = os.path.join(BASEDIR, 'data', 'testrun')
### Website Settings
WEBSITE_NAME = 'Compiler 2017'
WEBROOT = '/compiler2017'
# FINAL_ROOT = WEBROOT + '/static/final' # if the final result has been generated
FINAL_ROOT = None # if not final yet
BUILDS_PER_PAGE = 20
RUNS_PER_PAGE = 30
CORE_PORT = 6002
HOMEPAGE_TITLE = "Hello, Compiler 2017"
HOMEPAGE_DESCRIPTION = '''
<p>
Enjoy writing compilers.
</p>
Please do
<ul>
<li>Make use of the online judge. Commit frequently!</li>
<li>Report bugs if you find them</li>
</ul>
Please do not
<ul>
<li>Exploit bugs of the online judge.</li>
<li>Steal non-public testcases in any form (for example, print them to stderr)</li>
</ul>
Information:
<ul>
<li>Source code build time (i.e. the time taken to build your compiler): at most 30 seconds</li>
<li>Compile time (i.e. the time your compiler runs): at most 5 seconds</li>
<li>Memory usage: at most 256MB</li>
<li>Java version: Oracle JDK 1.8.0 Update 121</li>
<li>g++ version: 5.4.0</li>
</ul>
<hr>
Some Links:
<ul>
<li><a href="https://acm.sjtu.edu.cn/wiki/Compiler_2017">Compiler 2017 Course Wiki</a></li>
<li><a href="https://bitbucket.org/acmcompiler/compiler2017-demo/src">How to make my compiler run on the Online Judge</a></li>
<li><a href="https://github.com/abcdabcd987/acm-compiler-judge/blob/master/docs/use_guide.md">How to Use the Online Judge</a></li>
<li><a href="https://github.com/abcdabcd987/acm-compiler-judge/blob/master/docs/testcase_guide.md">How to Contribute a Testcase</a></li>
<li><a href="https://bitbucket.org/acmcompiler/compiler2017-testcases">Git repository of testcases (may not be up-to-date as the Online Judge)</a></li>
</ul>
'''
### Judge Settings
JUDGE_NAME = 'Judge 1'
JUDGE_BUILD_TIMEOUT = 60
JUDGE_REQUEST_TIMEOUT = 5
JUDGE_COMPILE_TIMEOUT = 15
JUDGE_RUN_TIMES_PER_TEST = 3
JUDGE_RUN_MEMORY_LIMIT = '512m'
LOG_LENGTH_LIMIT = 4096
CORE_URL = 'http://localhost:{}{}'.format(CORE_PORT, WEBROOT)
JUDGE_GIT_REPO_PATH = os.path.join(BASEDIR, 'data', 'repo')
JUDGE_TESTCASE_PATH = os.path.join(BASEDIR, 'data', 'testcase')
| StarcoderdataPython |
52186 | <reponame>157239n/k1lib<gh_stars>1-10
# AUTOGENERATED FILE! PLEASE DON'T EDIT
from .callbacks import Callback, Callbacks, Cbs
import k1lib, os, torch
__all__ = ["Autosave", "DontTrainValid", "InspectLoss", "ModifyLoss", "Cpu", "Cuda",
"DType", "InspectBatch", "ModifyBatch", "InspectOutput", "ModifyOutput",
"Beep"]
@k1lib.patch(Cbs)
class Autosave(Callback):
"""Autosaves 3 versions of the network to disk"""
def __init__(self): super().__init__(); self.order = 23
def endRun(self):
os.system("mv autosave-1.pth autosave-0.pth")
os.system("mv autosave-2.pth autosave-1.pth")
self.l.save("autosave-2.pth")
@k1lib.patch(Cbs)
class DontTrainValid(Callback):
"""If is not training, then don't run m.backward() and opt.step().
The core training loop in k1lib.Learner don't specifically do this,
cause there may be some weird cases where you want to also train valid."""
def _common(self):
if not self.l.model.training: return True
def startBackward(self): return self._common()
def startStep(self): return self._common()
@k1lib.patch(Cbs)
class InspectLoss(Callback):
"""Expected `f` to take in 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endLoss(self): self.f(self.loss.detach())
@k1lib.patch(Cbs)
class ModifyLoss(Callback):
"""Expected `f` to take in 1 float and return 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endLoss(self): self.l.loss = self.f(self.loss)
@k1lib.patch(Cbs)
class Cuda(Callback):
"""Moves batch and model to the default GPU"""
def startRun(self): self.l.model.cuda()
def startBatch(self):
self.l.xb = self.l.xb.cuda()
self.l.yb = self.l.yb.cuda()
@k1lib.patch(Cbs)
class Cpu(Callback):
"""Moves batch and model to CPU"""
def startRun(self): self.l.model.cpu()
def startBatch(self):
self.l.xb = self.l.xb.cpu()
self.l.yb = self.l.yb.cpu()
@k1lib.patch(Cbs)
class DType(Callback):
"""Moves batch and model to a specified data type"""
def __init__(self, dtype): super().__init__(); self.dtype = dtype
def startRun(self): self.l.model = self.l.model.to(self.dtype)
def startBatch(self):
self.l.xb = self.l.xb.to(self.dtype)
self.l.yb = self.l.yb.to(self.dtype)
@k1lib.patch(Cbs)
class InspectBatch(Callback):
"""Expected `f` to take in 2 tensors."""
def __init__(self, f:callable): super().__init__(); self.f = f; self.order = 15
def startBatch(self): self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class ModifyBatch(Callback):
"""Modifies xb and yb on the fly. Expected `f`
to take in 2 tensors and return 2 tensors."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def startBatch(self): self.l.xb, self.l.yb = self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class InspectOutput(Callback):
"""Expected `f` to take in 1 tensor."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endPass(self): self.f(self.y)
@k1lib.patch(Cbs)
class ModifyOutput(Callback):
"""Modifies output on the fly. Expected `f` to take
in 1 tensor and return 1 tensor"""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endPass(self): self.l.y = self.f(self.y)
@k1lib.patch(Cbs)
class Beep(Callback):
"""Plays a beep sound when the run is over"""
def endRun(self): k1lib.beep() | StarcoderdataPython |
3353740 | <reponame>djf604/django-alexa
from __future__ import absolute_import
import json
from ..base import AlexaBaseCommand
from ...internal import IntentsSchema
class Command(AlexaBaseCommand):
help = 'Prints the Alexa Skills Kit intents schema for an app'
def do_work(self, app):
data = IntentsSchema.generate_schema(app=app)
self.stdout.write(json.dumps(data, indent=4, sort_keys=True) + "\n")
| StarcoderdataPython |
46523 | <reponame>apaniukov/workbench
"""
OpenVINO DL Workbench
Script to getting system resources: CPU, RAM, DISK
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import json
import os
import psutil
def bytes_to_gb(num_bytes: int) -> float:
factor = 2 ** (3 * 10)
return num_bytes / factor
def get_cpu_usage(per_cpu: bool = False, number_of_measurements=5) -> list:
result = []
if per_cpu:
result = [0 for _ in psutil.cpu_percent(percpu=True)]
for _ in range(number_of_measurements):
for i, percentage in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
result[i] += percentage / number_of_measurements
else:
result.append(psutil.cpu_percent(interval=1, percpu=False))
return result
def get_ram_usage() -> dict:
# Get the memory details
svmem = psutil.virtual_memory()
# get the swap memory details (if exists)
swap = psutil.swap_memory()
return {
'TOTAL': bytes_to_gb(svmem.total),
'USED': bytes_to_gb(svmem.used),
'AVAILABLE': bytes_to_gb(svmem.available),
'PERCENTAGE': svmem.percent,
'SWAP': {
'TOTAL': bytes_to_gb(swap.total),
'USED': bytes_to_gb(swap.used),
'AVAILABLE': bytes_to_gb(swap.free),
'PERCENTAGE': swap.percent,
}
}
def get_disk_usage() -> dict:
result = {
'TOTAL': 0,
'USED': 0,
'AVAILABLE': 0,
'PERCENTAGE': 0,
}
partitions = psutil.disk_partitions()
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# This can be catched due to the disk that isn't ready
continue
result['TOTAL'] += bytes_to_gb(partition_usage.total)
result['USED'] += bytes_to_gb(partition_usage.used)
result['AVAILABLE'] += bytes_to_gb(partition_usage.free)
result['PERCENTAGE'] = (result['USED'] * 100) / result['TOTAL']
return result
def main(output: str):
system_info = {
'CPU': get_cpu_usage(per_cpu=True),
'RAM': get_ram_usage(),
'DISK': get_disk_usage(),
}
if not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(output))
with open(output, 'w') as file_d:
json.dump(system_info, file_d)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-output', type=str,
help='File to save results')
ARGUMENTS = PARSER.parse_args()
main(ARGUMENTS.output)
| StarcoderdataPython |
4827505 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import argparse, pickle
import shutil
from keras.models import load_model
import tensorflow as tf
import os
sys.path.append('../')
import keras
from keras import Input
from deephunter.coverage import Coverage
from keras.applications import MobileNet, VGG19, ResNet50
from keras.applications.vgg16 import preprocess_input
import random
import time
import numpy as np
from PIL import Image
from deephunter.image_queue import ImageInputCorpus, TensorInputCorpus
from deephunter.fuzzone import build_fetch_function
from lib.queue import Seed
from lib.fuzzer import Fuzzer
from deephunter.mutators import Mutators
from keras.utils.generic_utils import CustomObjectScope
#_, (x_test, y_test) = keras.datasets.cifar10.load_data()
#x_test=x_test/255.0
#x_test=x_test.reshape(10000,32,32,3)
#y_test=y_test.reshape(-1)
#yy = np.zeros((10000, 10))
#for i in range(10000):
# yy[i][y_test[i]] = 1
def imagenet_preprocessing(input_img_data):
temp = np.copy(input_img_data)
temp = np.float32(temp)
qq = preprocess_input(temp)
return qq
def imgnt_preprocessing(x_test):
return x_test
def mnist_preprocessing(x):
x = x.reshape(x.shape[0], 28, 28)
new_x = []
for img in x:
img = Image.fromarray(img.astype('uint8'), 'L')
img = img.resize(size=(32, 32))
img = np.asarray(img).astype(np.float32) / 255.0 - 0.1306604762738431
new_x.append(img)
new_x = np.stack(new_x)
new_x = np.expand_dims(new_x, axis=-1)
return new_x
def cifar_preprocessing(x_test):
temp = np.copy(x_test)
temp = temp.astype('float32')
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
for i in range(3):
temp[:, :, :, i] = (temp[:, :, :, i] - mean[i]) / std[i]
return temp
model_weight_path = {
'vgg16': "./profile/cifar10/models/vgg16.h5",
'resnet20': "/data/dnntest/zpengac/models/resnet/cifar10_resnet20v1_keras_deephunter_prob_kmnc2.h5",
'lenet1': "./profile/mnist/models/lenet1.h5",
'lenet4': "./profile/mnist/models/lenet4.h5",
'lenet5': "/data/dnntest/zpengac/models/lenet/mnist_lenet5_keras_32_py2.h5"
}
model_profile_path = {
'vgg16': "./profile/cifar10/profiling/vgg16/0_50000.pickle",
'resnet20': "/data/dnntest/zpengac/deephunter/deephunter/profile/cifar10_resnet20v1_keras_deephunter_prob_kmnc2.pickle",
'lenet1': "./profile/mnist/profiling/lenet1/0_60000.pickle",
'lenet4': "./profile/mnist/profiling/lenet4/0_60000.pickle",
'lenet5': "/data/dnntest/zpengac/deephunter/deephunter/profile/mnist_lenet5_32_py2.pickle",
'mobilenet': "./profile/imagenet/profiling/mobilenet_merged.pickle",
'vgg19': "./profile/imagenet/profiling/vgg19_merged.pickle",
'resnet50': "./profile/imagenet/profiling/resnet50_merged.pickle"
}
preprocess_dic = {
'vgg16': cifar_preprocessing,
'resnet20': cifar_preprocessing,
'lenet1': mnist_preprocessing,
'lenet4': mnist_preprocessing,
'lenet5': mnist_preprocessing,
'mobilenet': imagenet_preprocessing,
'vgg19': imagenet_preprocessing,
'resnet50': imgnt_preprocessing
}
shape_dic = {
'vgg16': (32, 32, 3),
'resnet20': (32, 32, 3),
'lenet1': (28, 28, 1),
'lenet4': (28, 28, 1),
'lenet5': (32, 32, 1),
'mobilenet': (224, 224, 3),
'vgg19': (224, 224, 3),
'resnet50': (256, 256, 3)
}
metrics_para = {
'kmnc': 1000,
'bknc': 10,
'tknc': 10,
'nbc': 10,
'newnc': 10,
'nc': 0.75,
'fann': 1.0,
'snac': 10
}
execlude_layer_dic = {
'vgg16': ['input', 'flatten', 'activation', 'batch', 'dropout'],
'resnet20': ['input', 'flatten', 'activation', 'batch', 'dropout'],
'lenet1': ['input', 'flatten', 'activation', 'batch', 'dropout'],
'lenet4': ['input', 'flatten', 'activation', 'batch', 'dropout'],
'lenet5': ['input', 'flatten', 'activation', 'batch', 'dropout'],
'mobilenet': ['input', 'flatten', 'padding', 'activation', 'batch', 'dropout',
'bn', 'reshape', 'relu', 'pool', 'concat', 'softmax', 'fc'],
'vgg19': ['input', 'flatten', 'padding', 'activation', 'batch', 'dropout', 'bn',
'reshape', 'relu', 'pool', 'concat', 'softmax', 'fc'],
'resnet50': ['input', 'flatten', 'padding', 'activation', 'batch', 'dropout', 'bn',
'reshape', 'relu', 'pool', 'concat', 'add', 'res4', 'res5']
}
def metadata_function(meta_batches):
return meta_batches
def image_mutation_function(batch_num):
# Given a seed, randomly generate a batch of mutants
def func(seed):
return Mutators.image_random_mutate(seed, batch_num)
return func
def objective_function(seed, names):
metadata = seed.metadata
ground_truth = seed.ground_truth
assert (names is not None)
results = []
if len(metadata) == 1:
# To check whether it is an adversarial sample
if metadata[0] != ground_truth:
results.append('')
else:
# To check whether it has different results between original model and quantized model
# metadata[0] is the result of original model while metadata[1:] is the results of other models.
# We use the string adv to represent different types;
# adv = '' means the seed is not an adversarial sample in original model but has a different result in the
# quantized version. adv = 'a' means the seed is adversarial sample and has a different results in quan models.
if metadata[0] == ground_truth:
adv = ''
else:
adv = 'a'
count = 1
while count < len(metadata):
if metadata[count] != metadata[0]:
results.append(names[count] + adv)
count += 1
# results records the suffix for the name of the failed tests
return results
def iterate_function(names):
def func(queue, root_seed, parent, mutated_coverage_list, mutated_data_batches, mutated_metadata_list,
objective_function):
ref_batches, batches, cl_batches, l0_batches, linf_batches = mutated_data_batches
successed = False
bug_found = False
# For each mutant in the batch, we will check the coverage and whether it is a failed test
for idx in range(len(mutated_coverage_list)):
input = Seed(cl_batches[idx], mutated_coverage_list[idx], root_seed, parent, mutated_metadata_list[:, idx],
parent.ground_truth, l0_batches[idx], linf_batches[idx])
# The implementation for the isFailedTest() in Algorithm 1 of the paper
results = objective_function(input, names)
if len(results) > 0:
# We have find the failed test and save it in the crash dir.
for i in results:
queue.save_if_interesting(input, batches[idx], True, suffix=i)
bug_found = True
else:
new_img = np.append(ref_batches[idx:idx + 1], batches[idx:idx + 1], axis=0)
# If it is not a failed test, we will check whether it has a coverage gain
result = queue.save_if_interesting(input, new_img, False)
successed = successed or result
return bug_found, successed
return func
def dry_run(indir, fetch_function, coverage_function, queue):
seed_lis = os.listdir(indir)
# Read each initial seed and analyze the coverage
for seed_name in seed_lis:
tf.logging.info("Attempting dry run with '%s'...", seed_name)
path = os.path.join(indir, seed_name)
img = np.load(path)
# Each seed will contain two images, i.e., the reference image and mutant (see the paper)
input_batches = img[1:2]
# Predict the mutant and obtain the outputs
# coverage_batches is the output of internal layers and metadata_batches is the output of the prediction result
coverage_batches, metadata_batches = fetch_function((0, input_batches, 0, 0, 0))
# Based on the output, compute the coverage information
coverage_list = coverage_function(coverage_batches)
metadata_list = metadata_function(metadata_batches)
# Create a new seed
input = Seed(0, coverage_list[0], seed_name, None, metadata_list[0][0], metadata_list[0][0])
new_img = np.append(input_batches, input_batches, axis=0)
# Put the seed in the queue and save the npy file in the queue dir
queue.save_if_interesting(input, new_img, False, True, seed_name)
if __name__ == '__main__':
start_time = time.time()
tf.logging.set_verbosity(tf.logging.INFO)
random.seed(time.time())
parser = argparse.ArgumentParser(description='coverage guided fuzzing for DNN')
parser.add_argument('-i', help='input seed directory')
parser.add_argument('-o', help='output directory')
parser.add_argument('-model', help="target model to fuzz", choices=['vgg16', 'resnet20', 'mobilenet', 'vgg19',
'resnet50', 'lenet1', 'lenet4', 'lenet5'], default='lenet5')
parser.add_argument('-criteria', help="set the criteria to guide the fuzzing",
choices=['nc', 'kmnc', 'nbc', 'snac', 'bknc', 'tknc', 'fann'], default='kmnc')
parser.add_argument('-batch_num', help="the number of mutants generated for each seed", type=int, default=20)
parser.add_argument('-max_iteration', help="maximum number of fuzz iterations", type=int, default=10000000)
parser.add_argument('-metric_para', help="set the parameter for different metrics", type=float)
parser.add_argument('-quantize_test', help="fuzzer for quantization", default=0, type=int)
# parser.add_argument('-ann_threshold', help="Distance below which we consider something new coverage.", type=float,
# default=1.0)
parser.add_argument('-quan_model_dir', help="directory including the quantized models for testing")
parser.add_argument('-random', help="whether to adopt random testing strategy", type=int, default=0)
parser.add_argument('-select', help="test selection strategy",
choices=['uniform', 'tensorfuzz', 'deeptest', 'prob'], default='prob')
args = parser.parse_args()
img_rows, img_cols = 256, 256
input_shape = (img_rows, img_cols, 3)
input_tensor = Input(shape=input_shape)
# Get the layers which will be excluded during the coverage computation
exclude_layer_list = execlude_layer_dic[args.model]
# Create the output directory including seed queue and crash dir, it is like AFL
if os.path.exists(args.o):
shutil.rmtree(args.o)
os.makedirs(os.path.join(args.o, 'queue'))
os.makedirs(os.path.join(args.o, 'crashes'))
# Load model. For ImageNet, we use the default models from Keras framework.
# For other models, we load the model from the h5 file.
model = None
if args.model == 'mobilenet':
model = MobileNet(input_tensor=input_tensor)
elif args.model == 'vgg19':
model = VGG19(input_tensor=input_tensor, input_shape=input_shape)
elif args.model == 'resnet50':
model = ResNet50(input_tensor=input_tensor)
else:
model = load_model(model_weight_path[args.model])
# Get the preprocess function based on different dataset
preprocess = preprocess_dic[args.model]
# Load the profiling information which is needed by the metrics in DeepGauge
profile_dict = pickle.load(open(model_profile_path[args.model], 'rb'))
# Load the configuration for the selected metrics.
if args.metric_para is None:
cri = metrics_para[args.criteria]
elif args.criteria == 'nc':
cri = args.metric_para
else:
cri = int(args.metric_para)
# The coverage computer
coverage_handler = Coverage(model=model, criteria=args.criteria, k=cri,
profiling_dict=profile_dict, exclude_layer=exclude_layer_list)
# The log file which records the plot data after each iteration of the fuzzing
plot_file = open(os.path.join(args.o, 'plot.log'), 'a+')
# If testing for quantization, we will load the quantized versions
# fetch_function is to perform the prediction and obtain the outputs of each layers
if args.quantize_test == 1:
model_names = os.listdir(args.quan_model_dir)
model_paths = [os.path.join(args.quan_model_dir, name) for name in model_names]
if args.model == 'mobilenet':
import keras
with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,
'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):
models = [load_model(m) for m in model_paths]
else:
models = [load_model(m) for m in model_paths]
fetch_function = build_fetch_function(coverage_handler, preprocess, models)
model_names.insert(0, args.model)
else:
fetch_function = build_fetch_function(coverage_handler, preprocess)
model_names = [args.model]
# Like AFL, dry_run will run all initial seeds and keep all initial seeds in the seed queue
dry_run_fetch = build_fetch_function(coverage_handler, preprocess)
# The function to update coverage
coverage_function = coverage_handler.update_coverage
# The function to perform the mutation from one seed
mutation_function = image_mutation_function(args.batch_num)
# The seed queue
if args.criteria == 'fann':
queue = TensorInputCorpus(args.o, args.random, args.select, cri, "kdtree")
else:
queue = ImageInputCorpus(args.o, args.random, args.select, coverage_handler.total_size, args.criteria)
# Perform the dry_run process from the initial seeds
dry_run(args.i, dry_run_fetch, coverage_function, queue)
# For each seed, compute the coverage and check whether it is a "bug", i.e., adversarial example
image_iterate_function = iterate_function(model_names)
# The main fuzzer class
fuzzer = Fuzzer(queue, coverage_function, metadata_function, objective_function, mutation_function, fetch_function,
image_iterate_function, args.select)
# The fuzzing process
fuzzer.loop(args.max_iteration)
#x_test = cifar_preprocessing(x_test)
#model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
#print(model.metrics_names)
#print(model.evaluate(x_test, yy, verbose=1))
spent_time = time.time() - start_time
print('finish', spent_time)
f = open('time.txt', 'a+')
f.write(args.model + '\t' + args.criteria + '\t' + args.select + '\t' + str(spent_time) + '\n')
f.close()
| StarcoderdataPython |
1711104 | import threading
import typing
import nacl.signing
import time
import typing as tp
import logging.config
from .istation import IStation, StationData, STATION_VERSION, Measurement
from ..drivers.sds011 import SDS011_MODEL, SDS011
from collections import deque
from connectivity.config.logging import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("sensors-connectivity")
def _read_data_thread(sensor: SDS011, q: deque, timeout: int) -> None:
while True:
meas = sensor.query()
timestamp = int(time.time())
q.append((meas, timestamp))
time.sleep(timeout)
class COMStation(IStation):
"""
Reads data from a serial port
"""
def __init__(self, config: dict) -> None:
super().__init__(config)
self.version: str = f"airalab-com-{STATION_VERSION}"
self.sensor: SDS011 = SDS011(config["comstation"]["port"])
work_period: int = int(config["comstation"]["work_period"])
self.sensor.set_work_period(work_time=int(work_period / 60))
self.geo: tp.List[float, float] = [0, 0]
if config["comstation"]["geo"]:
self.geo = config["comstation"]["geo"].split(",")
if "public_key" in config["comstation"] and config["comstation"]["public_key"]:
self.public = config["comstation"]["public_key"]
else:
signing_key = nacl.signing.SigningKey.generate()
verify_key = signing_key.verify_key
self.public = bytes(verify_key).hex()
logger.info(f"COMStation public key: {self.public}")
self.meas_data = {"pm25": 0, "pm10": 0, "timestamp": 0}
self.q = deque(maxlen=1)
threading.Thread(
target=_read_data_thread, args=(self.sensor, self.q, work_period)
).start()
def get_data(self) -> tp.List[StationData]:
meas = Measurement(self.public, SDS011_MODEL, 0, 0, self.meas_data)
if self.q:
values = self.q[0]
pm = values[0]
self.meas_data.update(
{"pm25": pm[0], "pm10": pm[1], "timestamp": values[1]}
)
meas = Measurement(
self.public,
SDS011_MODEL,
float(self.geo[0]),
float(self.geo[1]),
self.meas_data,
)
return [
StationData(
self.version, self.mac_address, time.time() - self.start_time, meas
)
]
| StarcoderdataPython |
1664816 | <filename>algorithms/dfs/find_all_paths_dfs_style.py
def paths(root):
if not root:
return []
stack = [(root, [root.val])]
paths = []
while stack:
node, path = stack.pop()
if not node.left and not node.right:
paths.append(path)
if node.left:
stack.append((node.left, path + [node.left.val]))
if node.right:
stack.append((node.right, path + [node.right.val]))
print(paths)
| StarcoderdataPython |
4818394 | <filename>mopidy_gpiocont/__init__.py
from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
__version__ = '0.2.2'
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-GPIOcont'
ext_name = 'gpiocont'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
logger.debug("GPIOcont: Default configuration loaded.")
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
# "Normal" control pins
schema['enabled'] = config.Boolean()
schema['play_pin'] = config.Integer(optional=True)
schema['next_pin'] = config.Integer(optional=True)
schema['prev_pin'] = config.Integer(optional=True)
#Import A and B channels volume encoder
schema['vol_a_pin'] = config.Integer(optional=True)
schema['vol_b_pin'] = config.Integer(optional=True)
schema['vol_bounce_time'] = config.Integer(optional=True)
schema['vol_change'] = config.Integer(optional=True)
#import dedicated playlist pins
schema['list1_pin'] = config.Integer(optional=True)
schema['list2_pin'] = config.Integer(optional=True)
schema['list3_pin'] = config.Integer(optional=True)
schema['list4_pin'] = config.Integer(optional=True)
#import dedicated playlist names
schema['list1_name'] = config.String()
schema['list2_name'] = config.String()
schema['list3_name'] = config.String()
schema['list4_name'] = config.String()
#imoport lcd address and port
schema['lcd_enable'] = config.Boolean(optional=True)
schema['lcd_address'] = config.String()
schema['lcd_port'] = config.Integer(optional=True)
logger.debug("GPIOcont: User specified configuration loaded.")
return schema
def get_command(self):
from .commands import GPIOcontCommand
return GPIOcontCommand()
def validate_environment(self):
# Any manual checks of the environment to fail early.
# Dependencies described by setup.py are checked by Mopidy, so you
# should not check their presence here.
# if os.getuid() != 0:
# logger.warning("""GPIOcont: You are not root, change the line
# \"DAEMON_USER=mopidy\" to \"DAEMON_USER=root\"
# in the file \"/etc/init.d/mopidy\" if you are running Pi MusicBox.
# Else GPIO wont work""")
# logger.debug("GPIOcont: Environment validated.")
pass
def setup(self, registry):
from .frontend import GPIOcont
registry.add('frontend', GPIOcont)
| StarcoderdataPython |
1645530 | from django.conf.urls import patterns, url
from frontend import views
""" URL setup (kinda like htaccess) """
urlpatterns = patterns(
'',
url(r'^$', views.main, name="main"),
)
| StarcoderdataPython |
3346189 | <reponame>KanChiMoe/rforms<filename>mod.py
from flask import Blueprint, abort, jsonify, render_template, request
from decorators import mod_required, api_disallowed
from models import User
from sqlalchemy import func
import json
mod = Blueprint('mod', __name__, template_folder='templates')
@mod.route('/settings')
@mod_required
def settings():
# shows site settings
return render_template("settings.html")
@mod.route('/users')
@mod_required
@api_disallowed
def users():
# manage users
page = int(request.args.get('page', 1))
count = int(request.args.get('limit', 25))
None if page == 1 else page - 1
if page == 1:
button_back = False
else:
button_back = f"/mod/users?page={page-1}&limit={count}"
button_next = f"/mod/users?page={page+1}&limit={count}"
if request.args.get('user') is not None:
username = request.args.get('user')
users = User.query.filter(
func.lower(
User.username) == func.lower(username)).first()
return render_template("users.html", users=[users])
elif request.args.get('mod') is not None:
raw = str(request.args.get('mod')).lower()
if raw == "false":
mod = False
else:
mod = True
users = User.query.filter_by(
form_mod=mod).paginate(
page, count, False).items
if button_back:
button_back += f"&mod={raw}"
button_next += f"&mod={raw}"
elif request.args.get('exempt') is not None:
raw = str(request.args.get('exempt')).lower()
if raw == "false":
exempt = False
else:
exempt = True
users = User.query.filter_by(
is_exempt=exempt).paginate(
page, count, False).items
if button_back:
button_back += f"&exempt={raw}"
button_next += f"&exempt={raw}"
else:
users = User.query.paginate(page, count, False).items
button_data = [button_back, button_next]
return render_template("users.html", users=users, button_data=button_data)
@mod.route('/user/<string:username>')
@mod_required
@api_disallowed
def user_lookup(username):
# shows a user's page
is_json = False
if username.endswith(".json"):
username = username.split(".")[0]
is_json = True
user = User.query.filter_by(username=username).first()
if not user:
# check to see if a similar username exists
user = User.query.filter(User.username.ilike(username)).first()
show_warning = True
if user.username.lower() == username.lower():
show_warning = False
if not user:
return abort(404)
if is_json:
return jsonify(username=user.username,
response_md=user.full_body_md,
response_html=user.full_body_html,
submitted=user.submitted,
processed=user.processed,
last_login=user.last_login)
return render_template(
"user.html",
user=user,
username=username,
show_warning=show_warning)
@mod.route('/questions')
@mod_required
def questions():
return render_template("question_editor.html")
@mod.route('/response')
@mod_required
def response():
return render_template("response_editor.html")
@mod.route('/api')
@mod_required
def api():
with open("data/api_docs.json") as api_docs:
data = json.loads(api_docs.read())
return render_template("api.html", data=data)
| StarcoderdataPython |
52974 | <filename>Library_Manage_System/login.py
# -*- coding: utf-8 -*-
import pymssql
import tkinter as tk
import re
r1=re.compile(r'.*')
import tkinter.messagebox
import tkinter.messagebox as messagebox
from tkinter import StringVar
#sql服务器名,这里(127.0.0.1)是本地数据库IP
serverName = 'localhost'
#登陆用户名和密码
userName = 'sa'
passWord = '<PASSWORD>'
#建立连接并获取cursor
con = pymssql.connect(serverName , userName , passWord, "BookShop")
def click_query_author(data,root):
if str(data).startswith('张'):
cur1 = con.cursor()
# 执行sql语句
cur1.execute('exec Query_Book_Author @Bauthor=张嘉佳')
data1=cur1.fetchall()
r=data1[0]
for a in r:
a1=tk.Label(root, text=a)
a1.pack()
elif str(data).startswith('林汉'):
cur2 = con.cursor()
# 执行sql语句
cur2.execute('exec Query_Book_Author @Bauthor=林汉达')
data2=cur2.fetchall()
s=data2[0]
for b in s:
b1=tk.Label(root, text=b)
b1.pack()
elif str(data).startswith('谷'):
cur3 = con.cursor()
# 执行sql语句
cur3.execute('exec Query_Book_Author @Bauthor=谷崎润一郎')
data3=cur3.fetchall()
t=data3[0]
for c in t:
c1=tk.Label(root, text=c)
c1.pack()
elif str(data).startswith('叶'):
cur4 = con.cursor()
# 执行sql语句
cur4.execute('exec Query_Book_Author @Bauthor=叶楚桥')
data4=cur4.fetchall()
u=data4[0]
for d in u:
d1=tk.Label(root, text=d)
d1.pack()
elif str(data).startswith('林海'):
cur5 = con.cursor()
# 执行sql语句
cur5.execute('exec Query_Book_Author @Bauthor=林海英')
data5 = cur5.fetchall()
v = data5[0]
for e in v:
e1 = tk.Label(root, text=e)
e1.pack()
elif str(data).startswith('丹'):
cur6 = con.cursor()
# 执行sql语句
cur6.execute('exec Query_Book_Author @Bauthor=丹尼尔笛福')
data6=cur6.fetchall()
w=data6[0]
for f in w:
f1=tk.Label(root, text=f)
f1.pack()
elif str(data).startswith('蕾'):
cur7 = con.cursor()
# 执行sql语句
cur7.execute('exec Query_Book_Author @Bauthor=蕾切尔卡斯克')
data7 = cur7.fetchall()
x = data7[0]
for g in x:
g1 = tk.Label(root, text=g)
g1.pack()
def click_query_bname(data,root):
if str(data).startswith('鲁'):
cur1 = con.cursor()
# 执行sql语句
cur1.execute('exec Query_Book_BName @Bname=鲁冰逊漂流记')
data1=cur1.fetchall()
r=data1[0]
for a in r:
a1=tk.Label(root, text=a)
a1.pack()
elif str(data).startswith('上'):
cur2 = con.cursor()
# 执行sql语句
cur2.execute('exec Query_Book_BName @Bname=上下五千年')
data2=cur2.fetchall()
s=data2[0]
for b in s:
b1=tk.Label(root, text=b)
b1.pack()
elif str(data).startswith('桃'):
cur3 = con.cursor()
# 执行sql语句
cur3.execute('exec Query_Book_BName @Bname=桃李春风一杯酒')
data3=cur3.fetchall()
t=data3[0]
for c in t:
c1=tk.Label(root, text=c)
c1.pack()
elif str(data).startswith('春'):
cur4 = con.cursor()
# 执行sql语句
cur4.execute('exec Query_Book_BName @Bname=春琴抄')
data4=cur4.fetchall()
u=data4[0]
for d in u:
d1=tk.Label(root, text=d)
d1.pack()
elif str(data).startswith('成'):
cur5 = con.cursor()
# 执行sql语句
cur5.execute('exec Query_Book_BName @Bname=成为母亲')
data5 = cur5.fetchall()
v = data5[0]
for e in v:
e1 = tk.Label(root, text=e)
e1.pack()
elif str(data).startswith('从'):
cur6 = con.cursor()
# 执行sql语句
cur6.execute('exec Query_Book_BName @Bname=从你的全世界路过')
data6=cur6.fetchall()
w=data6[0]
for f in w:
f1=tk.Label(root, text=f)
f1.pack()
elif str(data).startswith('城'):
cur7 = con.cursor()
# 执行sql语句
cur7.execute('exec Query_Book_BName @Bname=城南旧事')
data7 = cur7.fetchall()
x = data7[0]
for g in x:
g1 = tk.Label(root, text=g)
g1.pack()
def click_query_isbn(data,root):
if str(data).startswith('A'):
cur1 = con.cursor()
# 执行sql语句
cur1.execute('exec Query_Book_ISBN @ISBN=A00001')
data1=cur1.fetchall()
r=data1[0]
for a in r:
a1=tk.Label(root, text=a)
a1.pack()
elif str(data).startswith('B'):
cur2 = con.cursor()
# 执行sql语句
cur2.execute('exec Query_Book_ISBN @ISBN=B00001')
data2=cur2.fetchall()
s=data2[0]
for b in s:
b1=tk.Label(root, text=b)
b1.pack()
elif str(data).startswith('C00002'):
cur3 = con.cursor()
# 执行sql语句
cur3.execute('exec Query_Book_ISBN @ISBN=C00002')
data3=cur3.fetchall()
t=data3[0]
for c in t:
c1=tk.Label(root, text=c)
c1.pack()
elif str(data).startswith('D'):
cur4 = con.cursor()
# 执行sql语句
cur4.execute('exec Query_Book_ISBN @ISBN=D00001')
data4=cur4.fetchall()
u=data4[0]
for d in u:
d1=tk.Label(root, text=d)
d1.pack()
elif str(data).startswith('E'):
cur5 = con.cursor()
# 执行sql语句
cur5.execute('exec Query_Book_ISBN @ISBN=E00001')
data5 = cur5.fetchall()
v = data5[0]
for e in v:
e1 = tk.Label(root, text=e)
e1.pack()
elif str(data).startswith('F'):
cur6 = con.cursor()
# 执行sql语句
cur6.execute('exec Query_Book_ISBN @ISBN=F00001')
data6=cur6.fetchall()
w=data6[0]
for f in w:
f1=tk.Label(root, text=f)
f1.pack()
elif str(data).startswith('C00001'):
cur7 = con.cursor()
# 执行sql语句
cur7.execute('exec Query_Book_ISBN @ISBN=C00001')
data7 = cur7.fetchall()
x = data7[0]
for g in x:
g1 = tk.Label(root, text=g)
g1.pack()
def click_query_brnumber(data,root):
if str(data).startswith('171'):
cur1 = con.cursor()
# 执行sql语句
cur1.execute('exec Query_Book_Author @Bauthor=鲁冰逊漂流记')
data1=cur1.fetchall()
r=data1[0]
for a in r:
a1=tk.Label(root, text=a)
a1.pack()
elif str(data).startswith('上'):
cur2 = con.cursor()
# 执行sql语句
cur2.execute('exec Query_Bookreader_BR# @Bauthor=1711216')
data2=cur2.fetchall()
s=data2[0]
for b in s:
b1=tk.Label(root, text=b)
b1.pack()
elif str(data).startswith('1759115'):
cur3 = con.cursor()
# 执行sql语句
cur3.execute('exec Query_Bookreader_BR# @BR#=1759115')
data3=cur3.fetchall()
t=data3[0]
for c in t:
c1=tk.Label(root, text=c)
c1.pack()
elif str(data).startswith('1759116'):
cur4 = con.cursor()
# 执行sql语句
cur4.execute('exec Query_Bookreader_BR# @BR#=1759116')
data4=cur4.fetchall()
u=data4[0]
for d in u:
d1=tk.Label(root, text=d)
d1.pack()
elif str(data).startswith('1759117'):
cur5 = con.cursor()
# 执行sql语句
cur5.execute('exec Query_Bookreader_BR# @BR#=1759117')
data5 = cur5.fetchall()
v = data5[0]
for e in v:
e1 = tk.Label(root, text=e)
e1.pack()
elif str(data).startswith('1759119'):
cur6 = con.cursor()
# 执行sql语句
cur6.execute('exec Query_Bookreader_BR# @BR#=1759119')
data6=cur6.fetchall()
w=data6[0]
for f in w:
f1=tk.Label(root, text=f)
f1.pack()
elif str(data).startswith('1759140'):
cur7 = con.cursor()
# 执行sql语句
cur7.execute('exec Query_Bookreader_BR# @BR#=1759140')
data7 = cur7.fetchall()
x = data7[0]
for g in x:
g1 = tk.Label(root, text=g)
g1.pack()
def click_query_brname(data,root):
if str(data).startswith('鲁'):
cur1 = con.cursor()
# 执行sql语句
cur1.execute('exec Query_Bookreader_BRname @BRname=鲁冰逊漂流记')
data1=cur1.fetchall()
r=data1[0]
for a in r:
a1=tk.Label(root, text=a)
a1.pack()
elif str(data).startswith('蔡'):
cur2 = con.cursor()
# 执行sql语句
cur2.execute('exec Query_Bookreader_BRname @BRname=蔡佳泉')
data2=cur2.fetchall()
s=data2[0]
for b in s:
b1=tk.Label(root, text=b)
b1.pack()
elif str(data).startswith('岑'):
cur3 = con.cursor()
# 执行sql语句
cur3.execute('exec Query_Bookreader_BRname @BRname=岑拓望')
data3=cur3.fetchall()
t=data3[0]
for c in t:
c1=tk.Label(root, text=c)
c1.pack()
elif str(data).startswith('钟'):
cur4 = con.cursor()
# 执行sql语句
cur4.execute('exec Query_Bookreader_BRname @BRname=钟宇航')
data4=cur4.fetchall()
u=data4[0]
for d in u:
d1=tk.Label(root, text=d)
d1.pack()
elif str(data).startswith('何'):
cur5 = con.cursor()
# 执行sql语句
cur5.execute('exec Query_Bookreader_BRname @BRname=何昌霖')
data5 = cur5.fetchall()
v = data5[0]
for e in v:
e1 = tk.Label(root, text=e)
e1.pack()
elif str(data).startswith('乔伯'):
cur6 = con.cursor()
# 执行sql语句
cur6.execute('exec Query_Bookreader_BRname @BRname=乔伯昱')
data6=cur6.fetchall()
w=data6[0]
for f in w:
f1=tk.Label(root, text=f)
f1.pack()
elif str(data).startswith('贺'):
cur7 = con.cursor()
# 执行sql语句
cur7.execute('exec Query_Bookreader_BRname @BRname=贺依凡')
data7 = cur7.fetchall()
x = data7[0]
for g in x:
g1 = tk.Label(root, text=g)
g1.pack()
def qurey_author():
window = tk.Tk() # 创建窗口
window.title("读者") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
# 在`window`上创建一个`frame`
frm = tk.Frame(window)
frm.pack()
# 在刚刚创建的`frame`上创建两个`frame`,我们可以把它理解成一个大容器里套了一个小容器,即`frm`上有两个`frame` ,`frm_l`和`frm_r`
frm_l = tk.Frame(frm)
frm_r = tk.Frame(frm)
# 这里是控制小的`frm`部件在大的`frm`的相对位置,此处`frm_l`就是在`frm`的左边,`frm_r`在`frm`的右边
frm_l.pack(side='left')
frm_r.pack(side='right')
tk.Label(frm_l, text='请输入作者名字:').pack()
var1 = StringVar()
e = tk.Entry(window,textvariable=var1)
e.pack()
e.get()
type(e.get())
b = tk.Button(window, text='查询', width=14, height=1, command=lambda: click_query_author(e.get(),window))
b.pack()
# 提交修改
con.commit()
window.mainloop() # 结束(不停循环刷新)
def qurey_bname():
window = tk.Tk() # 创建窗口
window.title("读者") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
# 在`window`上创建一个`frame`
frm = tk.Frame(window)
frm.pack()
# 在刚刚创建的`frame`上创建两个`frame`,我们可以把它理解成一个大容器里套了一个小容器,即`frm`上有两个`frame` ,`frm_l`和`frm_r`
frm_l = tk.Frame(frm)
frm_r = tk.Frame(frm)
# 这里是控制小的`frm`部件在大的`frm`的相对位置,此处`frm_l`就是在`frm`的左边,`frm_r`在`frm`的右边
frm_l.pack(side='left')
frm_r.pack(side='right')
tk.Label(frm_l, text='请输入书名:').pack()
var1 = StringVar()
e = tk.Entry(window,textvariable=var1)
e.pack()
e.get()
type(e.get())
b11 = tk.Button(window, text='查询', width=14, height=1, command=lambda: click_query_bname(e.get(),window))
b11.pack()
# 提交修改
con.commit()
window.mainloop() # 结束(不停循环刷新)
def qurey_isbn():
window = tk.Tk() # 创建窗口
window.title("读者") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
# 在`window`上创建一个`frame`
frm = tk.Frame(window)
frm.pack()
# 在刚刚创建的`frame`上创建两个`frame`,我们可以把它理解成一个大容器里套了一个小容器,即`frm`上有两个`frame` ,`frm_l`和`frm_r`
frm_l = tk.Frame(frm)
frm_r = tk.Frame(frm)
# 这里是控制小的`frm`部件在大的`frm`的相对位置,此处`frm_l`就是在`frm`的左边,`frm_r`在`frm`的右边
frm_l.pack(side='left')
frm_r.pack(side='right')
tk.Label(frm_l, text='请输入ISBN:').pack()
var1 = StringVar()
e = tk.Entry(window,textvariable=var1)
e.pack()
e.get()
type(e.get())
b = tk.Button(window, text='查询', width=14, height=1, command=lambda: click_query_isbn(e.get(),window))
b.pack()
# 提交修改
con.commit()
window.mainloop() # 结束(不停循环刷新)
def qurey_brnumber():
window = tk.Tk() # 创建窗口
window.title("读者") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
# 在`window`上创建一个`frame`
frm = tk.Frame(window)
frm.pack()
# 在刚刚创建的`frame`上创建两个`frame`,我们可以把它理解成一个大容器里套了一个小容器,即`frm`上有两个`frame` ,`frm_l`和`frm_r`
frm_l = tk.Frame(frm)
frm_r = tk.Frame(frm)
# 这里是控制小的`frm`部件在大的`frm`的相对位置,此处`frm_l`就是在`frm`的左边,`frm_r`在`frm`的右边
frm_l.pack(side='left')
frm_r.pack(side='right')
tk.Label(frm_l, text='请输入您的编号:').pack()
var1 = StringVar()
e = tk.Entry(window,textvariable=var1)
e.pack()
e.get()
type(e.get())
b = tk.Button(window, text='查询', width=14, height=1, command=lambda: click_query_brnumber(e.get(),window))
b.pack()
# 提交修改
con.commit()
window.mainloop() # 结束(不停循环刷新)
def qurey_brname():
window = tk.Tk() # 创建窗口
window.title("读者") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
# 在`window`上创建一个`frame`
frm = tk.Frame(window)
frm.pack()
# 在刚刚创建的`frame`上创建两个`frame`,我们可以把它理解成一个大容器里套了一个小容器,即`frm`上有两个`frame` ,`frm_l`和`frm_r`
frm_l = tk.Frame(frm)
frm_r = tk.Frame(frm)
# 这里是控制小的`frm`部件在大的`frm`的相对位置,此处`frm_l`就是在`frm`的左边,`frm_r`在`frm`的右边
frm_l.pack(side='left')
frm_r.pack(side='right')
tk.Label(frm_l, text='请输入您的姓名:').pack()
var1 = StringVar()
e = tk.Entry(window,textvariable=var1)
e.pack()
e.get()
type(e.get())
b = tk.Button(window, text='查询', width=14, height=1, command=lambda: click_query_brname(e.get(),window))
b.pack()
# 提交修改
con.commit()
window.mainloop() # 结束(不停循环刷新)
# 五个存储过程
def reader():
window = tk.Tk() # 创建窗口
window.title("读者") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
tk.Label(window, text='图书信息:').pack()
b1 = tk.Button(window,text = '通过作者查询',width = 14 , height = 1,command=qurey_author)
b1.pack()
b2 = tk.Button(window, text='通过书名查询', width=14, height=1,command=qurey_bname)
b2.pack()
b3 = tk.Button(window, text='通过ISBN查询', width=14, height=1,command=qurey_isbn)
b3.pack()
tk.Label(window, text='读者信息:').pack()
b4 = tk.Button(window, text='通过编号查询', width=14, height=1,command=qurey_brnumber)
b4.pack()
b5 = tk.Button(window, text='通过姓名查询', width=14, height=1,command=qurey_brname)
b5.pack()
window.mainloop() # 结束(不停循环刷新)
# 读者子窗口
def manager():
window = tk.Tk() # 创建窗口
window.title("管理员") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出', command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
tk.Label(window, text='请选择功能:').pack()
b1 = tk.Button(window,text = '借阅图书',width = 14 , height = 1,)
b1.pack()
b2 = tk.Button(window, text='归还图书', width=14, height=1, )
b2.pack()
b3 = tk.Button(window, text='购买图书', width=14, height=1, )
b3.pack()
b3 = tk.Button(window, text='注销图书', width=14, height=1, )
b3.pack()
b4 = tk.Button(window, text='征订图书', width=14, height=1, )
b4.pack()
b4 = tk.Button(window, text='管理图书信息', width=14, height=1, )
b4.pack()
b4 = tk.Button(window, text='管理读者信息', width=14, height=1, )
b4.pack()
window.mainloop() # 结束(不停循环刷新)
# 管理员子窗口
def quie():
root = tk()
root.quit()
def main():
window = tk.Tk() # 创建窗口
window.title("图书管理系统") # 窗口标题
window.geometry('500x400') # 窗口大小,小写字母x
# 窗口的label
k = tk.Label(window,
text='欢迎使用图书管理系统', # 文本
bg='green', # 字体的背景颜色
font=('Arial', 12), # 字体和大小
width=30, height=2 # 字体所占的宽度和高度
)
k.pack() # 固定
# 以上是窗口的主体
menubar = tk.Menu(window) # 在窗口上添加菜单栏
filemenu = tk.Menu(menubar, tearoff=0) # filemenu放在menu中
submenu = tk.Menu(filemenu, tearoff=0) # submenu放在filemenu中
menubar.add_cascade(label='嘿嘿嘿', menu=filemenu) # add_cascade用来创建下拉栏,filemenu命名为File
filemenu.add_cascade(label='小组成员', menu=submenu)
submenu.add_cascade(label='钟宇航')
submenu.add_cascade(label='岑拓望')
submenu.add_cascade(label='蔡佳泉')
submenu.add_cascade(label='何昌霖')
submenu.add_cascade(label='乔伯昱')
submenu.add_cascade(label='贺依凡')
filemenu.add_command(label='退出',command=quit) # add_command用来创建命令栏,不可有子项
window.config(menu=menubar) # 创建完毕
tk.Label(window, text='请选择你的身份:').pack()
# 在`window`上创建一个`frame`
frm = tk.Frame(window)
frm.pack()
# 在刚刚创建的`frame`上创建两个`frame`,我们可以把它理解成一个大容器里套了一个小容器,即`frm`上有两个`frame` ,`frm_l`和`frm_r`
frm_l = tk.Frame(frm)
frm_r = tk.Frame(frm)
# 这里是控制小的`frm`部件在大的`frm`的相对位置,此处`frm_l`就是在`frm`的左边,`frm_r`在`frm`的右边
frm_l.pack(side='left')
frm_r.pack(side='right')
b1 = tk.Button(frm_l,text='读者',width=14, height=1,command=reader)
b1.pack()
b2 = tk.Button(frm_r,text='管理员',width=14, height=1,command=manager)
b2.pack()
window.mainloop() # 结束(不停循环刷新)
# 主窗口
if __name__ == '__main__':
main()
# tk.messagebox.showinfo(title=' ', message=' ') # 提示信息对话窗
# tk.messagebox.showwarning(title=' ', message=' ') # 提出警告对话窗
# tk.messagebox.showerror(title=' ', message=' ') # 提出错误对话窗
# tk.messagebox.askquestion(title=' ', message=' ') # 询问选择对话窗
# conn.close()
| StarcoderdataPython |
179156 | <gh_stars>0
import uvicorn
from fastapi import FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from src.config.settings import Settings
from src.config.db import init_db
from src.routes import add_routers
def create_app(_config: Settings):
_app = FastAPI()
@_app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError):
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder({"detail": exc.errors(), "body": exc.body}),
)
@_app.get("/")
def index():
return {"message": "FastAPI starter template, Use freely"}
add_routers(app=_app, config=_config)
return _app
# Load configuration
config = Settings.load_config()
# Create app
app = create_app(config)
origins = [
"http://anglestack-claims.herokuapp.com",
"https://anglestack-claims.herokuapp.com",
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.mount('/invoices', StaticFiles(directory="invoices"), name="static")
# Initialize database
init_db(app)
if __name__ == '__main__':
uvicorn.run("main:app", port=config.PORT, reload=True)
| StarcoderdataPython |
128155 | <reponame>MattToul/CycleGAN
import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataset', type=str,default='./datasets/cityscapes')
parser.add_argument('--mode',type=str, default='train')
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--epochs',type=int,default=500)
parser.add_argument('--lr',type=float,default=1e-4,help="Learning Rate")
parser.add_argument('--momentum', type=float, default=0, help="Momentum")
parser.add_argument('--w_decay', type=float, default=1e-5, help="Weight Decay")
parser.add_argument('--step_size', type=int, default=50, help="Step size")
parser.add_argument('--gamma', type=float, default=0.5, help="Gamma")
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt | StarcoderdataPython |
3319771 | <filename>tests/conftest.py<gh_stars>10-100
import time
import collections
import threading
import pytest
import pysoem
def pytest_addoption(parser):
parser.addoption('--ifname', action='store')
class PySoemTestEnvironment:
"""Setup a basic pysoem test fixture that is needed for most of tests"""
BECKHOFF_VENDOR_ID = 0x0002
EK1100_PRODUCT_CODE = 0x044c2c52
EL3002_PRODUCT_CODE = 0x0bba3052
EL1259_PRODUCT_CODE = 0x04eb3052
def __init__(self, ifname):
self._ifname = ifname
self._master = pysoem.Master()
self._master.in_op = False
self._master.do_check_state = False
self._proc_thread_handle = None
self._check_thread_handle = None
self._pd_thread_stop_event = threading.Event()
self._ch_thread_stop_event = threading.Event()
self._actual_wkc = 0
self.SlaveSet = collections.namedtuple('SlaveSet', 'name vendor_id product_code config_func')
self.el3002_config_func = None
self.el1259_config_func = None
self._expected_slave_layout = None
def setup(self):
self._expected_slave_layout = {
0: self.SlaveSet('XMC43-Test-Device', 0, 0x12783456, None),
1: self.SlaveSet('EK1100', self.BECKHOFF_VENDOR_ID, self.EK1100_PRODUCT_CODE, None),
2: self.SlaveSet('EL3002', self.BECKHOFF_VENDOR_ID, self.EL3002_PRODUCT_CODE, self.el3002_config_func),
3: self.SlaveSet('EL1259', self.BECKHOFF_VENDOR_ID, self.EL1259_PRODUCT_CODE, self.el1259_config_func),
}
self._master.open(self._ifname)
assert self._master.config_init(False) > 0
self._master.config_dc()
for i, slave in enumerate(self._master.slaves):
assert slave.man == self._expected_slave_layout[i].vendor_id
assert slave.id == self._expected_slave_layout[i].product_code
slave.config_func = self._expected_slave_layout[i].config_func
slave.is_lost = False
self._master.config_map()
assert self._master.state_check(pysoem.SAFEOP_STATE) == pysoem.SAFEOP_STATE
def go_to_op_state(self):
self._master.state = pysoem.OP_STATE
self._proc_thread_handle = threading.Thread(target=self._processdata_thread)
self._proc_thread_handle.start()
self._check_thread_handle = threading.Thread(target=self._check_thread)
self._check_thread_handle.start()
self._master.write_state()
for _ in range(400):
self._master.state_check(pysoem.OP_STATE, 50000)
if self._master.state == pysoem.OP_STATE:
all_slaves_reached_op_state = True
break
assert 'all_slaves_reached_op_state' in locals(), 'could not reach OP state'
self._master.in_op = True
def teardown(self):
self._pd_thread_stop_event.set()
self._ch_thread_stop_event.set()
if self._proc_thread_handle:
self._proc_thread_handle.join()
if self._check_thread_handle:
self._check_thread_handle.join()
self._master.state = pysoem.INIT_STATE
self._master.write_state()
self._master.close()
def get_master(self):
return self._master
def get_slaves(self):
return self._master.slaves
def get_slave_for_foe_testing(self):
return self._master.slaves[0] # the XMC device
def get_slave_without_foe_support(self):
return self._master.slaves[2] # the EL3002
def _processdata_thread(self):
while not self._pd_thread_stop_event.is_set():
self._master.send_processdata()
self._actual_wkc = self._master.receive_processdata(10000)
time.sleep(0.01)
@staticmethod
def _check_slave(slave, pos):
if slave.state == (pysoem.SAFEOP_STATE + pysoem.STATE_ERROR):
print(
'ERROR : slave {} is in SAFE_OP + ERROR, attempting ack.'.format(pos))
slave.state = pysoem.SAFEOP_STATE + pysoem.STATE_ACK
slave.write_state()
elif slave.state == pysoem.SAFEOP_STATE:
print(
'WARNING : slave {} is in SAFE_OP, try change to OPERATIONAL.'.format(pos))
slave.state = pysoem.OP_STATE
slave.write_state()
elif slave.state > pysoem.NONE_STATE:
if slave.reconfig():
slave.is_lost = False
print('MESSAGE : slave {} reconfigured'.format(pos))
elif not slave.is_lost:
slave.state_check(pysoem.OP_STATE)
if slave.state == pysoem.NONE_STATE:
slave.is_lost = True
print('ERROR : slave {} lost'.format(pos))
if slave.is_lost:
if slave.state == pysoem.NONE_STATE:
if slave.recover():
slave.is_lost = False
print(
'MESSAGE : slave {} recovered'.format(pos))
else:
slave.is_lost = False
print('MESSAGE : slave {} found'.format(pos))
def _check_thread(self):
while not self._ch_thread_stop_event.is_set():
if self._master.in_op and ((self._actual_wkc < self._master.expected_wkc) or self._master.do_check_state):
self._master.do_check_state = False
self._master.read_state()
for i, slave in enumerate(self._master.slaves):
if slave.state != pysoem.OP_STATE:
self._master.do_check_state = True
self._check_slave(slave, i)
if not self._master.do_check_state:
print('OK : all slaves resumed OPERATIONAL.')
time.sleep(0.01)
@pytest.fixture
def pysoem_environment(request):
env = PySoemTestEnvironment(request.config.getoption('--ifname'))
yield env
env.teardown()
| StarcoderdataPython |
172084 | <filename>app/db/db_users.py
from app.db import update_entry
import uuid
import logging
from typing import List, Dict, Optional
from .. import schema
logger = logging.getLogger("yatb.db.users")
# logger.debug(f"GlobalUsers, FileDB: {_db}")
async def get_user(username: str) -> Optional[schema.User]:
from . import _db
for i in _db._index["users"]:
if _db._index["users"][i].username == username:
return _db._index["users"][i]
return None
async def get_user_uuid(uuid: uuid.UUID) -> Optional[schema.User]:
from . import _db
if uuid in _db._index["users"]:
return _db._index["users"][uuid]
async def get_user_oauth_id(id: int) -> schema.User:
from . import _db
if id == -1:
return None
for i in _db._index["users"]:
if _db._index["users"][i].oauth_id == id:
return _db._index["users"][i]
return None
async def get_all_users() -> Dict[uuid.UUID, schema.User]:
from . import _db
return _db._db["users"]
async def check_user(username: str) -> bool:
from . import _db
for i in _db._index["users"]:
if _db._index["users"][i].username == username:
return True
return False
async def check_user_uuid(uuid: uuid.UUID) -> bool:
from . import _db
return uuid in _db._index["users"]
async def check_user_oauth_id(id: int) -> bool:
from . import _db
if id == -1:
return False
for i in _db._index["users"]:
if _db._index["users"][i].oauth_id == id:
return True
return False
async def insert_user(username: str, password: str):
from . import _db
# WTF: SHITCODE
user = schema.User(
username=username,
password_hash=password,
)
_db._db["users"][user.user_id] = user
_db._index["users"][user.user_id] = user
return user
async def insert_oauth_user(oauth_id: int, username: str, country: str):
from . import _db
# WTF: SHITCODE
user = schema.User(
username=username,
password_hash=<PASSWORD>,
country=country,
oauth_id=oauth_id,
)
_db._db["users"][user.user_id] = user
_db._index["users"][user.user_id] = user
return user
async def update_user(user_id: uuid.UUID, new_user: schema.UserUpdateForm):
from . import _db
user: schema.User = _db._index["users"][user_id]
user.parse_obj(new_user) # WTF: crazy updater?
async def update_user_admin(user_id: uuid.UUID, new_user: schema.User):
from . import _db
user: schema.User = _db._index["users"][user_id]
logger.debug(f"Update user {user} to {new_user}")
update_entry(
user,
new_user.dict(
exclude={
"user_id",
"password_hash",
"score",
"solved_tasks",
"oauth_id",
}
),
)
# user.parse_obj(new_user)
logger.debug(f"Resulting user={user}")
return user
| StarcoderdataPython |
1751210 | <gh_stars>0
"""
Pairwise "noise" correlations among neurons.
"""
import pickle
import numpy as np
from scipy.io import savemat
import matplotlib.pyplot as plt
import seaborn as sns
from src.data_utils import get_per_mouse_boutons
from src.corr_utils import compute_noise_corrs, compute_response_integral
sns.set_palette("colorblind")
sns.set_context("poster")
data_dir = '../data/per_mouse/AFC'
phases = ['hab', 'acq', 'rec']
all_corrs = {'CS+': {}, 'CS-': {}} # stack them
C = {} # Per mouse
stim_frames = np.arange(12, 21)
stims = ['CS-', 'CS+']
for i, phase in enumerate(phases):
print(f"Computing noise corrs. phase {phase}")
X, y = get_per_mouse_boutons(phase, data_dir)
Xint = {}
all_corrs['CS+'][phase] = []
all_corrs['CS-'][phase] = []
for mouse_id in X.keys():
if i == 0: # init.
C[mouse_id] = {'CS+': {}, 'CS-': {}}
# Exclude first trials (one per stim.) since these had huge corrs. during habituation
Xint[mouse_id] = compute_response_integral(X[mouse_id][2:], stim_frames)
for label, CS in enumerate(stims):
# sub. mean response (although doesn't matter for corr.)
fluctuation = Xint[mouse_id][y[mouse_id][2:] == label]
fluctuation -= Xint[mouse_id][y[mouse_id][2:] == label].mean(0, keepdims=True)
C[mouse_id][CS][phase], _ = compute_noise_corrs(fluctuation)
all_corrs[CS][phase] += list(C[mouse_id][CS][phase])
# Save data
fname = "../data/noise_corrs"
print(f"Save data as {fname}.pickle and {fname}.mat")
with open(f'{fname}.pickle', 'wb') as handle:
pickle.dump(all_corrs, handle, protocol=pickle.HIGHEST_PROTOCOL)
savemat(f'{fname}.mat', all_corrs)
# Figure
plt.figure()
plt.bar([0, 1, 2], [np.mean(all_corrs['CS+'][phase]) for phase in phases])
plt.xticks([0, 1, 2], phases)
plt.yticks([0, 0.05, 0.1])
plt.ylabel("Noise corr.")
sns.despine()
plt.tight_layout()
fname = "../figures/noise_corrs.pdf"
print(f"Save figure as {fname}")
plt.savefig(fname, dpi=300)
| StarcoderdataPython |
32239 | #!/usr/bin/python
import elasticsearch
from elasticsearch_dsl import Search, A, Q
#import logging
import sys
import os
#logging.basicConfig(level=logging.WARN)
#es = elasticsearch.Elasticsearch(
# ['https://gracc.opensciencegrid.org/q'],
# timeout=300, use_ssl=True, verify_certs=False)
es = elasticsearch.Elasticsearch(
['localhost:9200'],
timeout=300)
osg_raw_index = 'gracc.osg.raw-*'
s = Search(using=es, index=osg_raw_index)
# Match the records by ProbeName and processors = 0.
s = s.query("match", ProbeName="htcondor-ce:hosted-ce18.grid.uchicago.edu")
s = s.query("match", Processors=0)
s = s.filter('range', EndTime={'from': 'now-12M', 'to': 'now'})
response = s.execute()
print "Query took %i milliseconds" % response.took
print "Query got %i hits" % response.hits.total
#update_id = "8c5816978fee6fc17718bcf81350d1f4"
#print "About to update record with id: %s" % update_id
#es.update(index="gracc.osg.raw3-2017.07", doc_type='JobUsageRecord', id=update_id, body={'doc': {'VOName': 'UserSchool2017'}})
update_buffer = []
for hit in s.scan():
# Calculate the new CoreHours (cores = 1):
core_hours = hit.WallDuration / 3600.0
updated_doc = {
"doc": {
"CoreHours": core_hours,
"Processors": 1
},
"_index": hit.meta.index,
"_id": hit.meta.id,
"_type": hit.meta.doc_type,
"_op_type": "update"
}
update_buffer.append(updated_doc)
print "Update %s" % updated_doc
if len(update_buffer) > 200:
elasticsearch.helpers.bulk(es, update_buffer)
update_buffer = []
elasticsearch.helpers.bulk(es, update_buffer)
#es.update(index=hit.meta.index, doc_type=hit.meta.doc_type, id=hit.meta.id, body={'doc': updated_doc})
| StarcoderdataPython |
1687442 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2019 <NAME> and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
#
# Please acknowledge and cite the use of this software and its authors
# when results are used in publications or published elsewhere.
#
# You can use the following reference:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# and <NAME>, "Unified functional network and nonlinear time series analysis
# for complex systems science: The pyunicorn package"
"""
Provides classes for the analysis of dynamical systems and time series based
on recurrence plots, including measures of recurrence quantification
analysis (RQA) and recurrence network analysis.
"""
# array object and fast numerics
import numpy as np
from ..core import InteractingNetworks
from .recurrence_plot import RecurrencePlot
from .cross_recurrence_plot import CrossRecurrencePlot
#
# Class definitions
#
class InterSystemRecurrenceNetwork(InteractingNetworks):
"""
Generating and quantitatively analyzing inter-system recurrence networks.
For a inter-system recurrence network, time series x and y do not need to
have the same length! Formally, nodes are identified with state vectors in
the common phase space of both time series. Hence, the time series need to
have the same number of dimensions and identical physical units.
Undirected links are added to describe recurrences within x and y as well
as cross-recurrences between x and y. Self-loops are excluded in this
undirected network representation.
More information on the theory and applications of inter system recurrence
networks can be found in [Feldhoff2012]_.
**Examples:**
- Create an instance of InterSystemRecurrenceNetwork with fixed
recurrence thresholds and without embedding::
InterSystemRecurrenceNetwork(x, y, threshold=(0.1, 0.2, 0.1))
- Create an instance of InterSystemRecurrenceNetwork at a fixed
recurrence rate and using time delay embedding::
InterSystemRecurrenceNetwork(
x, y, dim=3, tau=(2, 1), recurrence_rate=(0.05, 0.05, 0.02))
"""
#
# Internal methods
#
def __init__(self, x, y, metric="supremum",
normalize=False, silence_level=0, **kwds):
"""
Initialize an instance of InterSystemRecurrenceNetwork (ISRN).
.. note::
For an inter system recurrence network, time series x and y need to
have the same number of dimensions!
Creates an embedding of the given time series x and y, calculates a
inter system recurrence matrix from the embedding and then creates
an InteractingNetwork object from this matrix, interpreting the inter
system recurrence matrix as the adjacency matrix of an undirected
complex network.
Either recurrence thresholds ``threshold`` or
recurrence rates ``recurrence_rate`` have to be given as keyword
arguments.
Embedding is only supported for scalar time series. If embedding
dimension ``dim`` and delay ``tau`` are **both** given as keyword
arguments, embedding is applied. Multidimensional time series are
processed as is by default.
:type x: 2D Numpy array (time, dimension)
:arg x: The time series x to be analyzed, can be scalar or
multi-dimensional.
:type y: 2D Numpy array (time, dimension)
:arg y: The time series y to be analyzed, can be scalar or
multi-dimensional.
:type metric: tuple of string
:arg metric: The metric for measuring distances in phase space
("manhattan", "euclidean", "supremum").
:arg bool normalize: Decide whether to normalize the time series to
zero mean and unit standard deviation.
:arg int silence_level: The inverse level of verbosity of the object.
:arg kwds: Additional options.
:type threshold: tuple of number (three numbers)
:keyword threshold: The recurrence threshold keyword for generating
the recurrence plot using fixed thresholds. Give
for each time series and the cross recurrence plot
separately.
:type recurrence_rate: tuple of number (three numbers)
:keyword recurrence_rate: The recurrence rate keyword for generating
the recurrence plot using a fixed recurrence
rate. Give separately for each time series.
:keyword int dim: The embedding dimension. Must be the same for both
time series.
:type tau: tuple of int
:keyword tau: The embedding delay. Give separately for each time
series.
"""
# Store time series
self.x = x.copy().astype("float32")
"""The time series x."""
self.y = y.copy().astype("float32")
"""The time series y."""
# Reshape time series
self.x.shape = (self.x.shape[0], -1)
self.y.shape = (self.y.shape[0], -1)
# Get embedding dimension and delay from **kwds
dim = kwds.get("dim")
tau = kwds.get("tau")
# Check for consistency
if self.x.shape[1] == self.y.shape[1]:
# Set silence_level
self.silence_level = silence_level
"""The inverse level of verbosity of the object."""
# Get number of nodes in subnetwork x
self.N_x = self.x.shape[0]
"""Number of nodes in subnetwork x."""
# Get number of nodes in subnetwork y
self.N_y = self.y.shape[0]
"""Number of nodes in subnetwork y."""
# Get total number of nodes of ISRN
self.N = self.N_x + self.N_y
"""Total number of nodes of ISRN."""
# Store type of metric
self.metric = metric
"""The metric used for measuring distances in phase space."""
# Normalize time series
if normalize:
RecurrencePlot.normalize_time_series(self.x)
RecurrencePlot.normalize_time_series(self.y)
# Embed time series if required
self.dim = dim
if dim is not None and tau is not None and self.x.shape[1] == 1:
self.x_embedded = \
RecurrencePlot.embed_time_series(self.x, dim, tau[0])
"""The embedded time series x."""
self.y_embedded = \
RecurrencePlot.embed_time_series(self.y, dim, tau[1])
"""The embedded time series y."""
else:
self.x_embedded = self.x
self.y_embedded = self.y
# Get threshold or recurrence rate from **kwds, construct
# ISRN accordingly
threshold = kwds.get("threshold")
recurrence_rate = kwds.get("recurrence_rate")
self.threshold = threshold
if threshold is not None:
# Calculate the ISRN using the radius of neighborhood
# threshold
ISRM = self.set_fixed_threshold(threshold)
elif recurrence_rate is not None:
# Calculate the ISRN using a fixed recurrence rate
ISRM = self.set_fixed_recurrence_rate(recurrence_rate)
else:
raise NameError("Please give either threshold or \
recurrence_rate to construct the joint \
recurrence plot!")
InteractingNetworks.__init__(self, adjacency=ISRM, directed=False,
silence_level=self.silence_level)
# No treatment of missing values yet!
self.missing_values = False
else:
raise ValueError("Both time series x and y need to have the same \
dimension!")
def __str__(self):
"""
Returns a string representation.
"""
return ('InterSystemRecurrenceNetwork: time series shapes %s, %s.\n'
'Embedding dimension %i\nThreshold %s, %s metric.\n%s') % (
self.x.shape, self.y.shape, self.dim if self.dim else 0,
self.threshold, self.metric,
InteractingNetworks.__str__(self))
#
# Service methods
#
def clear_cache(self):
"""
Clean up memory by deleting information that can be recalculated from
basic data.
Extends the clean up methods of the parent classes.
"""
# Call clean up of RecurrencePlot objects
self.rp_x.clear_cache()
self.rp_y.clear_cache()
# Call clean up of CrossRecurrencePlot object
self.crp_xy.clear_cache()
# Call clean up of InteractingNetworks
InteractingNetworks.clear_cache(self)
#
# Methods to handle inter system recurrence networks
#
def inter_system_recurrence_matrix(self):
"""
Return the current inter system recurrence matrix :math:`ISRM`.
:rtype: 2D square Numpy array
:return: the current inter system recurrence matrix :math:`ISRM`.
"""
# Shortcuts
N = self.N
N_x = self.N_x
N_y = self.N_y
# Init
ISRM = np.zeros((N, N))
# Combine to inter system recurrence matrix
ISRM[:N_x, :N_x] = self.rp_x.recurrence_matrix()
ISRM[:N_x, N_x:N] = self.crp_xy.recurrence_matrix()
ISRM[N_x:N, :N_x] = self.crp_xy.recurrence_matrix().transpose()
ISRM[N_x:N, N_x:N] = self.rp_y.recurrence_matrix()
return ISRM
def set_fixed_threshold(self, threshold):
"""
Create a inter system recurrence network at fixed thresholds.
:type threshold: tuple of number (three numbers)
:arg threshold: The three threshold parameters. Give for each
time series and the cross recurrence plot separately.
"""
# Compute recurrence matrices of x and y
self.rp_x = RecurrencePlot(time_series=self.x_embedded,
threshold=threshold[0],
metric=self.metric,
silence_level=self.silence_level)
self.rp_y = RecurrencePlot(time_series=self.y_embedded,
threshold=threshold[1],
metric=self.metric,
silence_level=self.silence_level)
# Compute cross-recurrence matrix of x and y
self.crp_xy = CrossRecurrencePlot(x=self.x_embedded, y=self.y_embedded,
threshold=threshold[2],
metric=self.metric,
silence_level=self.silence_level)
# Get combined ISRM
ISRM = self.inter_system_recurrence_matrix()
# Set diagonal of ISRM to zero to avoid self-loops
ISRM.flat[::self.N + 1] = 0
return ISRM
def set_fixed_recurrence_rate(self, density):
"""
Create a inter system recurrence network at fixed link densities (
recurrence rates).
:type density: tuple of number (three numbers)
:arg density: The three recurrence rate parameters. Give for each
time series and the cross recurrence plot separately.
"""
# Compute recurrence matrices of x and y
self.rp_x = RecurrencePlot(time_series=self.x_embedded,
recurrence_rate=density[0],
metric=self.metric,
silence_level=self.silence_level)
self.rp_y = RecurrencePlot(time_series=self.y_embedded,
recurrence_rate=density[1],
metric=self.metric,
silence_level=self.silence_level)
# Compute cross-recurrence matrix of x and y
self.crp_xy = CrossRecurrencePlot(x=self.x_embedded, y=self.y_embedded,
recurrence_rate=density[2],
metric=self.metric,
silence_level=self.silence_level)
# Get combined ISRM
ISRM = self.inter_system_recurrence_matrix()
# Set diagonal of ISRM to zero to avoid self-loops
ISRM.flat[::self.N + 1] = 0
return ISRM
#
# Methods to quantify inter system recurrence networks
#
def internal_recurrence_rates(self):
"""
Return internal recurrence rates of subnetworks x and y.
:rtype: tuple of number (float)
:return: the internal recurrence rates of subnetworks x and y.
"""
return (self.rp_x.recurrence_rate(),
self.rp_y.recurrence_rate())
def cross_recurrence_rate(self):
"""
Return cross recurrence rate between subnetworks x and y.
:rtype: number (float)
:return: the cross recurrence rate between subnetworks x and y.
"""
return self.crp_xy.cross_recurrence_rate()
def cross_global_clustering_xy(self):
"""
Return cross global clustering of x with respect to y.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross global clustering of x with respect to y.
"""
return self.cross_global_clustering(np.arange(self.N_x),
np.arange(self.N_x, self.N))
def cross_global_clustering_yx(self):
"""
Return cross global clustering of y with respect to x.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross global clustering of y with respect to x.
"""
return self.cross_global_clustering(np.arange(self.N_x, self.N),
np.arange(self.N_x))
def cross_transitivity_xy(self):
"""
Return cross transitivity of x with respect to y.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross transitivity of x with respect to y.
"""
return self.cross_transitivity(np.arange(self.N_x),
np.arange(self.N_x, self.N))
def cross_transitivity_yx(self):
"""
Return cross transitivity of y with respect to x.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross transitivity of y with respect to x.
"""
return self.cross_transitivity(np.arange(self.N_x, self.N),
np.arange(self.N_x))
| StarcoderdataPython |
4840915 | """unzip the dataset"""
import zipfile
def main():
with zipfile.ZipFile("img_align_celeba.zip","r") as zip_ref:
zip_ref.extractall()
if __name__ == "__main__":
main()
| StarcoderdataPython |
76580 | <reponame>mludolph/fogmsg
import hashlib
import os
import time
from typing import List
import zmq
from fogmsg.node.config import NodeConfig
from fogmsg.node.receiver import NodeReceiver
from fogmsg.node.sensor import Sensor
from fogmsg.utils import messaging
from fogmsg.utils.errors import NoAcknowledgementError
from fogmsg.utils.logger import configure_logger
from fogmsg.utils.queue import MessageQueue
from zmq import Context
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
class Node:
def __init__(
self,
sensors: List[Sensor],
config: NodeConfig,
):
self.logger = configure_logger("node")
self.ctx = Context.instance()
self.config = config
self.hostname = config.IP
self.port = config.PORT
self.advertised_hostname = config.ADVERTISED_HOSTNAME
self.master_hostname = config.MASTER_HOSTNAME
self.master = None
path = os.path.join(
config.PERSISTENCE_DIR,
hashlib.md5(self.master_hostname.encode("utf-8")).hexdigest(),
)
self.msg_queue = MessageQueue(config.SENDER_QUEUE_LENGTH, path)
self.running = False
self.sensors = sensors
def reconnect(self):
if self.master:
self.master.setsockopt(zmq.LINGER, 0)
self.master.close()
self.master = self.ctx.socket(zmq.REQ)
self.master.connect(self.master_hostname)
self.logger.info(f"connecting to master (hostname={self.master_hostname})")
def try_send_messages(self) -> bool:
while self.msg_queue.peek():
msg = self.msg_queue.peek()
try:
self._send_message(msg)
self.msg_queue.dequeue()
except (TimeoutError, NoAcknowledgementError):
self.reconnect()
return False
return True
def _send_message(self, msg):
self.logger.debug("sending message...")
try:
# self.master.send_json(msg, zmq.NOBLOCK)
self.master.send(msg)
except zmq.error.Again:
self.logger.warn("could not reach host!")
raise TimeoutError
if self.master.poll(self.config.SENDER_TIMEOUT) == 0:
self.logger.warn("sending of message timed out!")
raise TimeoutError
msg = self.master.recv()
msg = messaging.deserialize(msg)
if msg != "ack":
self.logger.warn("message was not ack'ed")
raise NoAcknowledgementError
def join(self):
# self.running = False
self.logger.info("unregistering node...")
try:
self._send_message(messaging.unregister_message(self.advertised_hostname))
except Exception:
pass
self.receiver.join()
self.master.close()
def handle_message(self, msg):
if msg["cmd"] == "publish":
self.logger.info(f"DATA: {msg['sensor']}@{msg['origin']}")
pp.pprint(msg)
elif msg["cmd"] == "control":
self.logger.info(f"CONTROL: {msg['action']}")
def run(self):
self.logger.info("starting fogmsg node...")
# Start local receiver to receive messages
self.receiver = NodeReceiver(
self.config,
message_callback=lambda msg: self.handle_message(msg),
ctx=self.ctx,
)
self.receiver.start()
# Start master socket
self.reconnect()
# wait for registration at master
self.msg_queue.enqueue(messaging.register_message(self.advertised_hostname))
self.running = True
self.logger.info("registered and started node")
try:
while self.running:
for sensor in self.sensors:
payload = sensor.get_reading()
if not payload:
continue
self.msg_queue.enqueue(
messaging.publish_message(
self.advertised_hostname, sensor.name(), payload
)
)
self.try_send_messages()
time.sleep(0.2)
except KeyboardInterrupt:
self.join()
| StarcoderdataPython |
1628566 | <reponame>YeemBoi/django-remote-submission
"""Provide default config when installing the application."""
# -*- coding: utf-8 -*-
from django.apps import AppConfig
import logging
logger = logging.getLogger(__name__) # pylint: disable=C0103
class DjangoRemoteSubmissionConfig(AppConfig):
"""Provide basic configuration of this app."""
name = 'django_remote_submission'
verbose_name = 'Django Remote Submission'
def ready(self):
logger.debug("DjangoRemoteSubmissionConfig Ready!")
import django_remote_submission.signals
| StarcoderdataPython |
3281852 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import pandas as pd
from os import path, environ
import pytest
import bluepandas
from test.secrets import secret_conn_string
test_assets_path = path.abspath(path.join(path.dirname(__file__), "data"))
my_urls = [
"wasb://test-container@bluepandas.blob.core.windows.net/banklist.csv",
"wasbs://test-container@bluepandas.blob.core.windows.net/banklist.csv",
"wasbs://test-container@bluepandas.blob.core.windows.net/dir/long/CrAzyT3xt%&¤&.csv",
"wasbs://test-container@greenpandas.blob.core.windows.net/banklist.csv",
"wasbs://test-container@bluepandas.blob.core.windows.net/Monthly-train.csv",
"wasb://test-container@bluepandas.blob.core.windows.net/ ",
"wasbs://test-container@bluepandas.blob.core.windows.net/",
"wasbs://test-container@bluepandas.blob.core.windows.net/banklist_written.csv"
]
account_conn_string = "DefaultEndpointsProtocol=https;AccountName=bluepandas;AccountKey=xxxx;EndpointSuffix=core.windows.net"
sas_conn_string = "BlobEndpoint=https://bluepandas.blob.core.windows.net/;QueueEndpoint=https://bluepandas.queue.core.windows.net/;FileEndpoint=https://bluepandas.file.core.windows.net/;TableEndpoint=https://bluepandas.table.core.windows.net/;SharedAccessSignature=xxxx"
def test_parse_urls():
assert bluepandas.parse_url(my_urls[0]) == ("test-container", "bluepandas",
"banklist.csv")
assert bluepandas.parse_url(my_urls[1]) == ("test-container", "bluepandas",
"banklist.csv")
assert bluepandas.parse_url(my_urls[2]) == ("test-container", "bluepandas",
"dir/long/CrAzyT3xt%&¤&.csv")
with pytest.raises(bluepandas.URLParsingError):
bluepandas.parse_url(my_urls[5])
bluepandas.parse_url(my_urls[6])
def test_get_connection_string():
with pytest.raises(bluepandas.ConnectionStringError):
environ.pop("AZ_GREENPANDAS", None) # In case environment contains this
bluepandas.get_connection_string("greenpandas")
environ["AZ_BLUEPANDAS"] = account_conn_string
conn_string = bluepandas.get_connection_string("bluepandas")
keys1 = [m.split("=")[0] for m in conn_string.split(";")]
environ["AZ_BLUEPANDAS"] = sas_conn_string
conn_string = bluepandas.get_connection_string("bluepandas")
keys2 = [m.split("=")[0] for m in conn_string.split(";")]
assert ("AccountKey" in keys1) or ("SharedAccessSignature" in keys2)
def test_read_csv():
environ["AZ_BLUEPANDAS"] = secret_conn_string
df_reference = pd.read_csv(path.join(test_assets_path, "banklist.csv"))
df_reference = bluepandas.DataFrame(df_reference.values,
columns=df_reference.columns)
df_test = bluepandas.read_csv(my_urls[1])
assert df_reference.equals(df_test)
def test_write_csv():
df = bluepandas.DataFrame([(1,2), (3,4)], columns = ['A', 'B'])
assert df.to_csv() == ",A,B\n0,1,2\n1,3,4\n"
assert df.to_csv(index=False) == "A,B\n1,2\n3,4\n"
df.to_csv(my_urls[7], index=False)
df_test = bluepandas.read_csv(my_urls[7])
assert df.equals(df_test)
| StarcoderdataPython |
1756922 | <gh_stars>1000+
import duckdb
import pytest
import tempfile
import numpy
import pandas
import datetime
try:
import pyarrow as pa
can_run = True
except:
can_run = False
def parquet_types_test(type_list):
temp = tempfile.NamedTemporaryFile()
temp_name = temp.name
for type_pair in type_list:
value_list = type_pair[0]
numpy_type = type_pair[1]
sql_type = type_pair[2]
add_cast = len(type_pair) > 3 and type_pair[3]
add_sql_cast = len(type_pair) > 4 and type_pair[4]
df = pandas.DataFrame.from_dict({
'val': numpy.array(value_list, dtype=numpy_type)
})
duckdb_cursor = duckdb.connect()
duckdb_cursor.execute(f"CREATE TABLE tmp AS SELECT val::{sql_type} val FROM df")
duckdb_cursor.execute(f"COPY tmp TO '{temp_name}' (FORMAT PARQUET)")
read_df = pandas.read_parquet(temp_name)
if add_cast:
read_df['val'] = read_df['val'].astype(numpy_type)
assert df.equals(read_df)
read_from_duckdb = duckdb_cursor.execute(f"SELECT * FROM parquet_scan('{temp_name}')").df()
assert read_df.equals(read_from_duckdb)
df.to_parquet(temp_name)
if add_sql_cast:
read_from_arrow = duckdb_cursor.execute(f"SELECT val::{sql_type} val FROM parquet_scan('{temp_name}')").df()
else:
read_from_arrow = duckdb_cursor.execute(f"SELECT * FROM parquet_scan('{temp_name}')").df()
assert read_df.equals(read_from_arrow)
class TestParquetRoundtrip(object):
def test_roundtrip_numeric(self, duckdb_cursor):
if not can_run:
return
type_list = [
([-2**7, 0, 2**7-1], numpy.int8, 'TINYINT'),
([-2**15, 0, 2**15-1], numpy.int16, 'SMALLINT'),
([-2**31, 0, 2**31-1], numpy.int32, 'INTEGER'),
([-2**63, 0, 2**63-1], numpy.int64, 'BIGINT'),
([0, 42, 2**8-1], numpy.uint8, 'UTINYINT'),
([0, 42, 2**16-1], numpy.uint16, 'USMALLINT'),
([0, 42, 2**32-1], numpy.uint32, 'UINTEGER', False, True),
([0, 42, 2**64-1], numpy.uint64, 'UBIGINT'),
([0, 0.5, -0.5], numpy.float32, 'REAL'),
([0, 0.5, -0.5], numpy.float64, 'DOUBLE'),
]
parquet_types_test(type_list)
def test_roundtrip_timestamp(self, duckdb_cursor):
if not can_run:
return
date_time_list = [
datetime.datetime(2018, 3, 10, 11, 17, 54),
datetime.datetime(1900, 12, 12, 23, 48, 42),
None,
datetime.datetime(1992, 7, 9, 7, 5, 33)
]
type_list = [
(date_time_list, 'datetime64[ns]', 'TIMESTAMP_NS'),
(date_time_list, 'datetime64[us]', 'TIMESTAMP'),
(date_time_list, 'datetime64[ms]', 'TIMESTAMP_MS'),
(date_time_list, 'datetime64[s]', 'TIMESTAMP_S'),
(date_time_list, 'datetime64[D]', 'DATE', True)
]
parquet_types_test(type_list)
def test_roundtrip_varchar(self, duckdb_cursor):
if not can_run:
return
varchar_list = [
'hello',
'this is a very long string',
'hello',
None
]
type_list = [
(varchar_list, object, 'VARCHAR')
]
parquet_types_test(type_list)
| StarcoderdataPython |
76341 | import tkinter as tk
from tkinter import ttk
class Subject_window():
def __init__(self, parent):
self.frame = ttk.Frame(parent)
self.parent = parent
self.children = {}
def grid(self):
self.frame.grid()
def add_children(self, **kwargs):
for child in kwargs:
self.children[child] = kwargs[child] | StarcoderdataPython |
1613219 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from covsirphy.util.error import SubsetNotFoundError, deprecate
from covsirphy.cleaning.cbase import CleaningBase
from covsirphy.cleaning.country_data import CountryData
from covsirphy.cleaning.jhu_complement import JHUDataComplementHandler
class JHUData(CleaningBase):
"""
Data cleaning of JHU-style dataset.
Args:
filename (str or None): CSV filename of the dataset
data (pandas.DataFrame or None):
Index
reset index
Columns
- Date: Observation date
- ISO3: ISO3 code
- Country: country/region name
- Province: province/prefecture/state name
- Confirmed: the number of confirmed cases
- Fatal: the number of fatal cases
- Recovered: the number of recovered cases
- Population: population values
citation (str or None): citation or None (empty)
Note:
Either @filename (high priority) or @data must be specified.
Note:
The number of infected cases will be (re-)calculated when data cleaning automatically.
"""
# For JHUData.from_dataframe()
_RAW_COLS_DEFAULT = [
CleaningBase.DATE, CleaningBase.ISO3, CleaningBase.COUNTRY, CleaningBase.PROVINCE,
CleaningBase.C, CleaningBase.CI, CleaningBase.F, CleaningBase.R, CleaningBase.N
]
def __init__(self, filename=None, data=None, citation=None):
variables = [self.C, self.CI, self.F, self.R, self.N]
super().__init__(filename=filename, data=data, citation=citation, variables=variables)
# Recovery period
self._recovery_period = None
@property
def recovery_period(self):
"""
int: expected value of recovery period [days]
"""
self._recovery_period = self._recovery_period or self.calculate_recovery_period()
return self._recovery_period
@recovery_period.setter
def recovery_period(self, value):
self._recovery_period = self._ensure_natural_int(value)
def cleaned(self, **kwargs):
"""
Return the cleaned dataset.
Args:
kwargs: keyword arguments will be ignored
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pandas.Timestamp): Observation date
- ISO3: ISO3 code
- Country (pandas.Category): country/region name
- Province (pandas.Category): province/prefecture/state name
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- Population: population values
"""
if "population" in kwargs.keys():
raise ValueError(
"@population was removed in JHUData.cleaned(). Please use JHUData.subset()")
df = self._cleaned_df.copy()
df[self.CI] = (df[self.C] - df[self.F] - df[self.R]).astype(np.int64)
return df
def _cleaning(self):
"""
Perform data cleaning of the raw data.
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pd.Timestamp): Observation date
- ISO3 (str): ISO3 code
- Country (pandas.Category): country/region name
- Province (pandas.Category): province/prefecture/state name
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- Population (int): population values or 0 (when raw data is 0 values)
"""
df = self._raw.loc[:, self._raw_cols]
# Datetime columns
df[self.DATE] = pd.to_datetime(df[self.DATE]).dt.round("D")
try:
df[self.DATE] = df[self.DATE].dt.tz_convert(None)
except TypeError:
pass
# Province
df[self.PROVINCE] = df[self.PROVINCE].fillna(self.UNKNOWN)
# Values
for col in [self.C, self.F, self.R, self.N]:
df[col] = df.groupby([self.COUNTRY, self.PROVINCE])[col].ffill().fillna(0).astype(np.int64)
# Calculate Infected
df[self.CI] = (df[self.C] - df[self.F] - df[self.R]).astype(np.int64)
# Update data types to reduce memory
df[self.AREA_ABBR_COLS] = df[self.AREA_ABBR_COLS].astype("category")
return df.loc[:, self._raw_cols]
@deprecate("JHUData.replace()", version="2.21.0-xi-fu1")
def replace(self, country_data):
"""
Replace a part of cleaned dataset with a dataframe.
Args:
country_data (covsirphy.CountryData): dataset object of the country
Returns:
covsirphy.JHUData: self
Note:
Citation of the country data will be added to 'JHUData.citation' description.
"""
self._ensure_instance(country_data, CountryData, name="country_data")
df = self._cleaned_df.copy()
# Read new dataset
country = country_data.country
new = country_data.cleaned()
new[self.ISO3] = self.country_to_iso3(country)
# Add population data
new[self.N] = new.loc[:, self.N] if self.N in new else None
new = new.set_index([self.COUNTRY, self.PROVINCE, self.DATE])
new.update(df.set_index([self.COUNTRY, self.PROVINCE, self.DATE]).loc[:, [self.N]])
new = new.reset_index().loc[:, self._raw_cols]
# Calculate Infected
new[self.CI] = (new[self.C] - new[self.F] - new[self.R]).astype(np.int64)
# Remove the data in the country from JHU dataset
df = df.loc[df[self.COUNTRY] != country]
# Combine JHU data and the new data
df = pd.concat([df, new], axis=0, sort=False)
# Update data types to reduce memory
df[self.AREA_ABBR_COLS] = df[self.AREA_ABBR_COLS].astype("category")
self._cleaned_df = df.copy()
# Citation
self._citation += f"\n{country_data.citation}"
return self
def _calculate_susceptible(self, subset_df, population):
"""
Return the subset of dataset.
Args:
subset_df (pandas.DataFrame)
Index
reset index
Columns
- Date (pd.Timestamp): Observation date
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- Population (int): population values or 0 values (0 will be ignored)
population (int or None): population value
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pd.Timestamp): Observation date
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- Susceptible (int): the number of susceptible cases, if calculated
Note:
If @population (high priority) is not None or population values are registered in subset,
the number of susceptible cases will be calculated.
"""
df = subset_df.copy()
df[self.S] = (population or df[self.N]) - df[self.C]
try:
df[self.S] = df[self.S].astype(np.int64)
except ValueError:
return df.loc[:, [self.DATE, self.C, self.CI, self.F, self.R]]
return df.loc[:, [self.DATE, self.C, self.CI, self.F, self.R, self.S]]
def subset(self, country, province=None, start_date=None, end_date=None,
population=None, recovered_min=1):
"""
Return the subset of dataset.
Args:
country (str): country name or ISO3 code
province (str or None): province name
start_date (str or None): start date, like 22Jan2020
end_date (str or None): end date, like 01Feb2020
population (int or None): population value
recovered_min (int): minimum number of recovered cases records must have
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pd.Timestamp): Observation date
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases (> 0)
- Susceptible (int): the number of susceptible cases, if calculated
Note:
If @population (high priority) is not None or population values are registered in subset,
the number of susceptible cases will be calculated.
"""
country_alias = self.ensure_country_name(country)
# Subset with area, start/end date
try:
subset_df = super().subset(
country=country, province=province, start_date=start_date, end_date=end_date)
except SubsetNotFoundError:
raise SubsetNotFoundError(
country=country, country_alias=country_alias, province=province,
start_date=start_date, end_date=end_date) from None
# Calculate Susceptible
df = self._calculate_susceptible(subset_df, population)
# Select records where Recovered >= recovered_min
recovered_min = self._ensure_natural_int(recovered_min, name="recovered_min", include_zero=True)
df = df.loc[df[self.R] >= recovered_min, :].reset_index(drop=True)
if df.empty:
raise SubsetNotFoundError(
country=country, country_alias=country_alias, province=province,
start_date=start_date, end_date=end_date,
message=f"with 'Recovered >= {recovered_min}'") from None
return df
@deprecate("JHUData.to_sr()", version="2.17.0-zeta")
def to_sr(self, country, province=None,
start_date=None, end_date=None, population=None):
"""
Create Susceptible/Recovered dataset without complement.
Args:
country (str): country name
province (str): province name
start_date (str or None): start date, like 22Jan2020
end_date (str or None): end date, like 01Feb2020
population (int): population value
Returns:
pandas.DataFrame
Index
Date (pd.Timestamp): Observation date
Columns
- Recovered (int): the number of recovered cases (> 0)
- Susceptible (int): the number of susceptible cases
Note:
@population must be specified.
Records with Recovered > 0 will be used.
"""
population = self._ensure_population(population)
subset_df = self.subset(
country=country, province=province,
start_date=start_date, end_date=end_date, population=population)
return subset_df.set_index(self.DATE).loc[:, [self.R, self.S]]
@classmethod
@deprecate("JHUData.from_dataframe()", new="DataLoader.read_dataframe()",
version="2.21.0-xi-fu1", ref="https://lisphilar.github.io/covid19-sir/markdown/LOADING.html")
def from_dataframe(cls, dataframe, directory="input"):
"""
Create JHUData instance using a pandas dataframe.
Args:
dataframe (pd.DataFrame): cleaned dataset
Index
reset index
Columns
- Date: Observation date
- ISO3: ISO3 code (optional)
- Country: country/region name
- Province: province/prefecture/state name
- Confirmed: the number of confirmed cases
- Infected: the number of currently infected cases
- Fatal: the number of fatal cases
- Recovered: the number of recovered cases
- Popupation: population values (optional)
directory (str): directory to save geometry information (for .map() method)
Returns:
covsirphy.JHUData: JHU-style dataset
"""
df = cls._ensure_dataframe(dataframe, name="dataframe")
df[cls.ISO3] = df[cls.ISO3] if cls.ISO3 in df else cls.UNKNOWN
df[cls.N] = df[cls.N] if cls.N in df else 0
instance = cls()
instance.directory = str(directory)
instance._cleaned_df = cls._ensure_dataframe(df, name="dataframe", columns=cls._RAW_COLS_DEFAULT)
return instance
def total(self):
"""
Calculate total number of cases and rates.
Returns:
pandas.DataFrame: group-by Date, sum of the values
Index
Date (pandas.Timestamp): Observation date
Columns
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- Fatal per Confirmed (int)
- Recovered per Confirmed (int)
- Fatal per (Fatal or Recovered) (int)
"""
df = self._cleaned_df.copy()
df = df.loc[df[self.PROVINCE] == self.UNKNOWN]
df = df.groupby(self.DATE).sum()
total_series = df.loc[:, self.C]
r_cols = self.RATE_COLUMNS[:]
df[r_cols[0]] = df[self.F] / total_series
df[r_cols[1]] = df[self.R] / total_series
df[r_cols[2]] = df[self.F] / (df[self.F] + df[self.R])
# Set the final date of the records
raw_df = self._raw.copy()
final_date = pd.to_datetime(raw_df[self.DATE]).dt.date.max()
df = df.loc[df.index.date <= final_date]
return df.loc[:, [*self.VALUE_COLUMNS, *r_cols]]
def countries(self, complement=True, **kwargs):
"""
Return names of countries where records.
Args:
complement (bool): whether say OK for complement or not
interval (int): expected update interval of the number of recovered cases [days]
kwargs: the other keyword arguments of JHUData.subset_complement()
Returns:
list[str]: list of country names
"""
df = self._cleaned_df.copy()
df = df.loc[df[self.PROVINCE] == self.UNKNOWN]
# All countries
all_set = set((df[self.COUNTRY].unique()))
# Selectable countries without complement
raw_ok_set = set(df.loc[df[self.R] > 0, self.COUNTRY].unique())
if not complement:
return sorted(raw_ok_set)
# Selectable countries
comp_ok_list = [
country for country in all_set - raw_ok_set
if not self.subset_complement(country=country, **kwargs)[0].empty]
return sorted(raw_ok_set | set(comp_ok_list))
@deprecate("JHUData.calculate_closing_period()")
def calculate_closing_period(self):
"""
Calculate mode value of closing period, time from confirmation to get outcome.
Returns:
int: closing period [days]
Note:
If no records we can use for calculation were registered, 12 [days] will be applied.
"""
# Get cleaned dataset at country level
df = self._cleaned_df.copy()
df = df.loc[df[self.PROVINCE] == self.UNKNOWN]
# Select records of countries where recovered values are reported
df = df.groupby(self.COUNTRY).filter(lambda x: x[self.R].sum() != 0)
if df.empty:
return 12
# Total number of confirmed/closed cases of selected records
df = df.groupby(self.DATE).sum()
df[self.FR] = df[[self.F, self.R]].sum(axis=1)
df = df.loc[:, [self.C, self.FR]]
# Calculate how many days to confirmed, closed
df = df.unstack().reset_index()
df.columns = ["Variable", self.DATE, "Number"]
df["Days"] = (df[self.DATE] - df[self.DATE].min()).dt.days
df = df.pivot_table(values="Days", index="Number", columns="Variable")
df = df.interpolate(limit_direction="both").fillna(method="ffill")
df["Elapsed"] = df[self.FR] - df[self.C]
df = df.loc[df["Elapsed"] > 0]
# Calculate mode value of closing period
return int(df["Elapsed"].mode().astype(np.int64).values[0])
def calculate_recovery_period(self):
"""
Calculate the median value of recovery period of all countries
where recovered values are reported.
Returns:
int: recovery period [days]
Note:
If no records we can use for calculation were registered, 17 [days] will be applied.
"""
default = 17
# Get valid data for calculation
df = self._cleaned_df.copy()
df = df.loc[df[self.PROVINCE] == self.UNKNOWN]
df = df.groupby(self.COUNTRY).filter(lambda x: x[self.R].sum() != 0)
# If no records were found the default value will be returned
if df.empty:
return default
# Calculate median value of recovery period in all countries with valid data
periods = [
self._calculate_recovery_period_country(df, country)
for country in df[self.COUNTRY].unique()
]
valid_periods = list(filter(lambda x: x >= 0, periods))
if not valid_periods:
return default
try:
return int(pd.Series(valid_periods).median())
except ValueError:
return default
def _calculate_recovery_period_country(self, valid_df, country, upper_limit_days=90,
lower_limit_days=7, upper_percentage=0.5, lower_percentage=0.5):
"""
Calculate mode value of recovery period in the country.
If many mode values were found, mean value of mode values will be returned.
Args:
valid_df (pandas.DataFrame):
Index
reset_index
Columns
Date, Confirmed, Recovered, Fatal
country(str): country name or ISO3 code
upper_limit_days (int): maximum number of valid partial recovery periods [days]
lower_limit_days (int): minimum number of valid partial recovery periods [days]
upper_percentage (float): fraction of partial recovery periods with value greater than upper_limit_days
lower_percentage (float): fraction of partial recovery periods with value less than lower_limit_days
Returns:
int: mode value of recovery period [days]
"""
# Select country data
df = valid_df.copy()
df = df.loc[df[self.COUNTRY] == country].groupby(self.DATE).sum()
# Calculate "Confirmed - Fatal"
df["diff"] = df[self.C] - df[self.F]
df = df.loc[:, ["diff", self.R]]
# Calculate how many days passed to reach the number of cases
df = df.unstack().reset_index()
df.columns = ["Variable", "Date", "Number"]
df["Days"] = (df[self.DATE] - df[self.DATE].min()).dt.days
# Calculate recovery period (mode value because bimodal)
df = df.pivot_table(values="Days", index="Number", columns="Variable")
df = df.interpolate(limit_area="inside").dropna().astype(np.int64)
df["Elapsed"] = df[self.R] - df["diff"]
df = df.loc[df["Elapsed"] > 0]
# Check partial recovery periods
per_up = (df["Elapsed"] > upper_limit_days).sum()
per_lw = (df["Elapsed"] < lower_limit_days).sum()
if df.empty or per_up / len(df) >= upper_percentage or per_lw / len(df) >= lower_percentage:
return -1
return df["Elapsed"].mode().mean()
def subset_complement(self, country, province=None,
start_date=None, end_date=None, population=None, **kwargs):
"""
Return the subset of dataset and complement recovered data, if necessary.
Records with Recovered > 0 will be selected.
Args:
country(str): country name or ISO3 code
province(str or None): province name
start_date(str or None): start date, like 22Jan2020
end_date(str or None): end date, like 01Feb2020
population(int or None): population value
kwargs: keyword arguments of JHUDataComplementHandler(), control factors of complement
Returns:
tuple(pandas.DataFrame, str or bool):
pandas.DataFrame:
Index
reset index
Columns
- Date(pd.Timestamp): Observation date
- Confirmed(int): the number of confirmed cases
- Infected(int): the number of currently infected cases
- Fatal(int): the number of fatal cases
- Recovered (int): the number of recovered cases ( > 0)
- Susceptible(int): the number of susceptible cases, if calculated
str or bool: kind of complement or False
Note:
If @population (high priority) is not None or population values are registered in subset,
the number of susceptible cases will be calculated.
"""
# Subset with area
country_alias = self.ensure_country_name(country)
try:
subset_df = super().subset(country=country, province=province, start_date=None, end_date=None)
except SubsetNotFoundError:
raise SubsetNotFoundError(
country=country, country_alias=country_alias, province=province) from None
# Complement, if necessary
self._recovery_period = self._recovery_period or self.calculate_recovery_period()
handler = JHUDataComplementHandler(recovery_period=self._recovery_period, **kwargs)
df, status, _ = handler.run(subset_df)
# Subsetting with dates
if start_date is not None:
df = df.loc[df[self.DATE] >= self._ensure_date(start_date, name="start_date")]
if end_date is not None:
df = df.loc[df[self.DATE] <= self._ensure_date(end_date, name="end_date")]
if df.empty:
raise SubsetNotFoundError(
country=country, country_alias=country_alias, province=province,
start_date=start_date, end_date=end_date) from None
# Calculate Susceptible
df.loc[:, self.N] = subset_df.loc[:, self.N]
df = self._calculate_susceptible(df, population)
# Kind of complement or False
is_complemented = status or False
# Select records where Recovered > 0
df = df.loc[df[self.R] > 0, :].reset_index(drop=True)
return (df, is_complemented)
def records(self, country, province=None, start_date=None, end_date=None, population=None,
auto_complement=True, **kwargs):
"""
JHU-style dataset for the area from the start date to the end date.
Records with Recovered > 0 will be selected.
Args:
country(str): country name or ISO3 code
province(str or None): province name
start_date(str or None): start date, like 22Jan2020
end_date(str or None): end date, like 01Feb2020
population(int or None): population value
auto_complement (bool): if True and necessary, the number of cases will be complemented
kwargs: the other arguments of JHUData.subset_complement()
Raises:
SubsetNotFoundError: failed in subsetting because of lack of data
Returns:
tuple(pandas.DataFrame, bool):
pandas.DataFrame:
Index
reset index
Columns
- Date(pd.Timestamp): Observation date
- Confirmed(int): the number of confirmed cases
- Infected(int): the number of currently infected cases
- Fatal(int): the number of fatal cases
- Recovered (int): the number of recovered cases ( > 0)
- Susceptible(int): the number of susceptible cases, if calculated
str or bool: kind of complement or False
Note:
If @population (high priority) is not None or population values are registered in subset,
the number of susceptible cases will be calculated.
Note:
If necessary and @auto_complement is True, complement recovered data.
"""
country_alias = self.ensure_country_name(country)
subset_arg_dict = {
"country": country, "province": province,
"start_date": start_date, "end_date": end_date, "population": population,
}
if auto_complement:
df, is_complemented = self.subset_complement(**subset_arg_dict, **kwargs)
if not df.empty:
return (df, is_complemented)
try:
return (self.subset(**subset_arg_dict), False)
except ValueError:
raise SubsetNotFoundError(
country=country, country_alias=country_alias, province=province,
start_date=start_date, end_date=end_date, message="with 'Recovered > 0'") from None
def show_complement(self, country=None, province=None,
start_date=None, end_date=None, **kwargs):
"""
To monitor effectivity and safety of complement on JHU subset,
we need to know what kind of complement was done for JHU subset
for each country (if country/countries specified) or for all countries.
Args:
country (str or list[str] or None): country/countries name or None (all countries)
province(str or None): province name
start_date(str or None): start date, like 22Jan2020
end_date(str or None): end date, like 01Feb2020
kwargs: keyword arguments of JHUDataComplementHandler(), control factors of complement
Raises:
ValueError: @province was specified when @country is not a string
covsirphy.SubsetNotFoundError: No records were registered for the area/dates
Returns:
pandas.DataFrame
Index
reset index
Columns
- country (str): country name
- province (str): province name
- Monotonic_confirmed (bool): True if applied for confirmed cases or False otherwise
- Monotonic_fatal (bool): True if applied for fatal cases or False otherwise
- Monotonic_recovered (bool): True if applied for recovered or False otherwise
- Full_recovered (bool): True if applied for recovered or False otherwise
- Partial_recovered (bool): True if applied for recovered or False otherwise
"""
self._recovery_period = self._recovery_period or self.calculate_recovery_period()
# Area name
if country is None:
country = [c for c in self._cleaned_df[self.COUNTRY].unique() if c != "Others"]
province = province or self.UNKNOWN
if not isinstance(country, str) and province != self.UNKNOWN:
raise ValueError("@province cannot be specified when @country is not a string.")
if not isinstance(country, list):
country = [country]
# Create complement handler
handler = JHUDataComplementHandler(recovery_period=self._recovery_period, **kwargs)
# Check each country
complement_df = pd.DataFrame(
columns=[self.COUNTRY, self.PROVINCE, *JHUDataComplementHandler.SHOW_COMPLEMENT_FULL_COLS])
complement_df.set_index(self.COUNTRY, inplace=True)
for cur_country in country:
try:
subset_df = super().subset(
country=cur_country, province=province, start_date=start_date, end_date=end_date)
except SubsetNotFoundError:
raise SubsetNotFoundError(
country=cur_country, province=province,
start_date=start_date, end_date=end_date) from None
* _, complement_dict = handler.run(subset_df)
complement_dict_values = pd.Series(complement_dict.values(), dtype=bool).values
complement_df.loc[cur_country] = [province, *complement_dict_values]
return complement_df.reset_index()
def map(self, country=None, variable="Confirmed", date=None, **kwargs):
"""
Create global colored map to show the values.
Args:
country (str or None): country name or None (global map)
variable (str): variable name to show
date (str or None): date of the records or None (the last value)
kwargs: arguments of ColoredMap() and ColoredMap.plot()
Note:
When @country is None, country level data will be shown on global map.
When @country is a country name, province level data will be shown on country map.
"""
# Date
date_str = date or self.cleaned()[self.DATE].max().strftime(self.DATE_FORMAT)
country_str = country or "Global"
title = f"{country_str}: the number of {variable.lower()} cases on {date_str}"
# Global map
if country is None:
return self._colored_map_global(
variable=variable, title=title, date=date, **kwargs)
# Country-specific map
return self._colored_map_country(
country=country, variable=variable, title=title, date=date, **kwargs)
| StarcoderdataPython |
1636648 | <reponame>sleepingAnt/viewfinder
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Secrets test.
Test secrets module. user vs shared, encrypted vs plain.
"""
__author__ = '<EMAIL> (<NAME>)'
import getpass
import json
import logging
import mock
import os
import shutil
import tempfile
import unittest
from tornado import options
from viewfinder.backend.base import ami_metadata, base_options, secrets, testing
from viewfinder.backend.base.exceptions import CannotReadEncryptedSecretError
class SecretsTestCase(unittest.TestCase):
def setUp(self):
# Fake out the keyring to None for the entire test.
self._prev_keyring = secrets.keyring
secrets.keyring = None
self._domain = options.options.domain
self._prev_user_dir = options.options.user_secrets_dir
self._prev_shared_dir = options.options.secrets_dir
self._prev_devbox = options.options.devbox
# Create tmp directories and set flag values.
self._user_dir = tempfile.mkdtemp()
options.options.user_secrets_dir = self._user_dir
os.mkdir(os.path.join(self._user_dir, self._domain))
self._shared_dir = tempfile.mkdtemp()
options.options.secrets_dir = self._shared_dir
os.mkdir(os.path.join(self._shared_dir, self._domain))
def tearDown(self):
# Recursively delete temp directories and restore flag values.
shutil.rmtree(self._user_dir)
shutil.rmtree(self._shared_dir)
options.options.user_secrets_dir = self._prev_user_dir
options.options.secrets_dir = self._prev_shared_dir
options.options.devbox = self._prev_devbox
secrets.keyring = self._prev_keyring
secrets._user_secrets_manager = None
secrets._shared_secrets_manager = None
def testNoDomainDir(self):
"""Test secrets manager without a domain dir."""
mgr = secrets.SecretsManager('test', 'fake_domain', self._shared_dir)
# We do not fail on Init since we want to be able to support non-existent user secrets.
mgr.Init()
# Behaves just like an empty secrets manager.
self.assertEqual(len(mgr.ListSecrets()), 0)
# Trying to add a secret fails.
self.assertRaises(IOError, mgr.PutSecret, 'foo', 'codeforfoo')
def testPlain(self):
"""Test secrets manager with plain-text secrets."""
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
# Empty directory, Init will not require a passphrase.
mgr.Init()
self.assertEqual(len(mgr.ListSecrets()), 0)
self.assertRaises(KeyError, mgr.GetSecret, 'foo')
self.assertFalse(mgr.HasSecret('foo'))
# Put a secret, but underlying directory doesn't exist (switch domains first).
mgr.PutSecret('foo', 'codeforfoo')
self.assertTrue(mgr.HasSecret('foo'))
self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo')
self.assertEqual(len(mgr.ListSecrets()), 1)
# Now check that the underlying file exists.
with open(os.path.join(self._shared_dir, self._domain, 'foo')) as f:
self.assertEqual(f.read(), 'codeforfoo')
# Overwrite secret.
mgr.PutSecret('foo', 'newcodeforfoo')
self.assertEqual(mgr.GetSecret('foo'), 'newcodeforfoo')
self.assertEqual(len(mgr.ListSecrets()), 1)
# Now check that the underlying file exists.
with open(os.path.join(self._shared_dir, self._domain, 'foo')) as f:
self.assertEqual(f.read(), 'newcodeforfoo')
# Create a new secrets manager.
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init()
self.assertTrue(mgr.HasSecret('foo'))
self.assertEqual(mgr.GetSecret('foo'), 'newcodeforfoo')
self.assertEqual(len(mgr.ListSecrets()), 1)
# Passing a passphrase as a flag does not impact plain-text secrets.
options.options.passphrase = '<PASSWORD>'
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init()
self.assertEqual(mgr.GetSecret('foo'), 'newcodeforfoo')
def testEncrypted(self):
"""Test secrets manager with encrypted secrets."""
# The only way to make a secret manager encrypt when empty is to ask it
# to prompt for a passphrase. It does so using getpass.getpass.
passphrase = '<PASSWORD>!'
with mock.patch.object(secrets.getpass, 'getpass') as getpass:
getpass.return_value = passphrase
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init(should_prompt=True)
# Secret will be encrypted.
mgr.PutSecret('foo', 'codeforfoo')
self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo')
with open(os.path.join(self._shared_dir, self._domain, 'foo')) as f:
contents = f.read()
self.assertNotEqual(contents, 'codeforfoo')
(cipher, ciphertext) = json.loads(contents)
self.assertEqual(cipher, 'AES')
# TODO(marc): maybe we should test the encryption itself.
# Now create a new secrets manager. We do not ask it to prompt, it will figure it out
# all by itself. It does this in a number of ways:
##################### --devbox=False ########################
options.options.devbox = False
# Set stdin to raise an exception, just to make sure we're not using it.
with mock.patch.object(secrets.getpass, 'getpass') as getpass:
getpass.side_effect = Exception('you should not be using stdin in --devbox=False mode')
# Uses --passphrase if specified.
options.options.passphrase = passphrase
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init()
self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo')
# We get an assertion error when a passphrase is supplied but bad. This is because it fails on sha sum.
options.options.passphrase = 'bad passphrase'
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
self.assertRaises(AssertionError, mgr.Init)
# Uses AMI metadata otherwise.
options.options.passphrase = None
# No AMI fetched, or passphrase not one of the fetched fields.
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
self.assertRaisesRegexp(CannotReadEncryptedSecretError, 'failed to fetch passphrase from AWS instance metadata',
mgr.Init)
# Good passphrase from AMI metadata.
ami_metadata.SetAMIMetadata({'user-data/passphrase': passphrase})
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init()
self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo')
# Bad passphrase from AMI metadata.
ami_metadata.SetAMIMetadata({'user-data/passphrase': '<PASSWORD>.'})
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
self.assertRaises(AssertionError, mgr.Init)
##################### --devbox=True ########################
options.options.devbox = True
# Set bad AMI metadata just to show that we never use it.
ami_metadata.SetAMIMetadata({'user-data/passphrase': '<PASSWORD>.'})
# Uses --passphrase if specified.
options.options.passphrase = passphrase
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init()
self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo')
# If --passphrase is None and we cannot prompt, we have no way of getting the passphrase.
options.options.passphrase = None
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
self.assertRaisesRegexp(CannotReadEncryptedSecretError, 'passphrase is required but was not provided',
mgr.Init, can_prompt=False)
# Passphrase is read from stdin if prompting is allowed.
with mock.patch.object(secrets.getpass, 'getpass') as getpass:
getpass.return_value = passphrase
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
mgr.Init()
self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo')
# Pass a bad passphrase on stdin.
with mock.patch.object(secrets.getpass, 'getpass') as getpass:
getpass.return_value = 'not a good passphrase'
mgr = secrets.SecretsManager('test', self._domain, self._shared_dir)
self.assertRaises(AssertionError, mgr.Init)
def testMultipleManagers(self):
"""Test the secrets managers in their natural habitat: automatic selection of user vs shared based on flags."""
# these may not be None if we've been running other tests using run-tests.
secrets._user_secrets_manager = None
secrets._shared_secrets_manager = None
# Devbox mode: init user secrets, and lazily init shared secrets is requesting a secret on in user secrets.
options.options.devbox = True
secrets.InitSecrets()
self.assertIsNotNone(secrets._user_secrets_manager)
self.assertIsNone(secrets._shared_secrets_manager)
# Request a secret contained in user secrets: shared secrets remain uninitialized.
secrets._user_secrets_manager.PutSecret('foo', 'codeforfoo')
self.assertEqual(secrets.GetSecret('foo'), 'codeforfoo')
self.assertIsNotNone(secrets._user_secrets_manager)
self.assertIsNone(secrets._shared_secrets_manager)
# Request a secret not contained anywhere. As soon as we notice that it's not in user secrets, we initialize
# the shared secrets and look there, which fails.
self.assertRaises(KeyError, secrets.GetSecret, 'bar')
self.assertIsNotNone(secrets._user_secrets_manager)
self.assertIsNotNone(secrets._shared_secrets_manager)
# Non-devbox mode: user secrets are never used. shared secrets are initialized right away.
options.options.devbox = False
secrets._user_secrets_manager = None
secrets._shared_secrets_manager = None
secrets.InitSecrets()
self.assertIsNone(secrets._user_secrets_manager)
self.assertIsNotNone(secrets._shared_secrets_manager)
# Lookup whatever we want, we still won't use the user secrets.:w
secrets._shared_secrets_manager.PutSecret('foo', 'codeforfoo')
self.assertEqual(secrets.GetSecret('foo'), 'codeforfoo')
self.assertRaises(KeyError, secrets.GetSecret, 'bar')
self.assertIsNone(secrets._user_secrets_manager)
self.assertIsNotNone(secrets._shared_secrets_manager)
| StarcoderdataPython |
3315547 | <reponame>FZJ-INM5/JuHPLC<filename>JuHPLC/API/Calibration.py
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.models import Permission, User
from django.shortcuts import get_object_or_404
from JuHPLC.models import *
def delete(request,calibrationid):
if request.user and request.user.is_authenticated and request.user.has_perm('chromatogram.chromatogram_delete'):
c = Calibration.objects.get(pk=calibrationid)
c.delete()
return HttpResponse()
else:
return HttpResponse(status=401)
| StarcoderdataPython |
3369627 | <gh_stars>0
import logging
import os
import subprocess
import sys
import threading
import time
import traceback
import datetime
import requests
import spur
from requests.exceptions import ConnectTimeout, ConnectionError
FNULL = open(os.devnull, 'w')
__LAUNCH_EXTERNAL_VIEWER__ = [True]
def quote_if_necessary(s):
if ' ' in s:
return '"' + s + '"'
else:
return s
def rsync(ec2_ip, key_file, ec2_file, local_file, upload=True):
if ec2_ip == 'localhost':
rsync_local(local_file, ec2_file)
else:
rsync_remote(ec2_ip, key_file, ec2_file, local_file, upload=upload)
def rsync_local(source, destination):
stdout = LogWrapper("localhost", LogWrapper.stdout)
stderr = LogWrapper("localhost", LogWrapper.stderr)
spur.LocalShell().run(["rsync", "-vazrq", source, destination], stderr=stderr, stdout=stdout, allow_error=True)
def rsync_remote(ec2_ip, key_file, ec2_file, local_file, upload=False):
stdout = LogWrapper("localhost", LogWrapper.stdout)
stderr = LogWrapper("localhost", LogWrapper.stderr)
remote_file = "%s:%s" % (ec2_ip, ec2_file)
source = local_file if upload else remote_file
destination = remote_file if upload else local_file
command = ["rsync", "-azrq", "-e",
"ssh -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" % key_file, source, destination]
spur.LocalShell().run(command, stderr=stderr, stdout=stdout, allow_error=True)
def now():
return datetime.datetime.now().replace(microsecond=0)
def disable_external_tool():
global __LAUNCH_EXTERNAL_VIEWER__
__LAUNCH_EXTERNAL_VIEWER__[0] = False
def open_with_external_tool(resources_to_open):
"""
Opens the specified resources with an external tool, based on the OS
:param resources_to_open: The resources to open
:type resources_to_open: list(str)
:return: Nothing
:rtype: None
"""
# Open the resulting images using the system "open" or "see" command
global __LAUNCH_EXTERNAL_VIEWER__
if __LAUNCH_EXTERNAL_VIEWER__[0]:
if not try_open_with('open', resources_to_open):
# open failed, try see
if not try_open_with('see', resources_to_open):
# On linux the gnome-open and xdg-open takes only one file at a time
for resource_to_open in resources_to_open:
# see failed, try gnome-open
if not try_open_with('gnome-open', resource_to_open):
# gnome-open failed, try xdg-open
if not try_open_with('xdg-open', resource_to_open):
# all failed, print the names of the images
print("Output images: %s" % resource_to_open)
def poll_url(url, max_poll_time_seconds, success_callback):
log = logging.getLogger("util")
start_time = now()
log.info("Polling url: %s" % url)
while True:
try:
response = requests.get(url, timeout=60)
success = success_callback(response)
log.debug(".")
if success:
log.info("Achieved desired response from url %s in %s seconds", url, now() - start_time)
return True
time.sleep(15)
except (ConnectTimeout, ConnectionError):
pass
finally:
if (now() - start_time).seconds > max_poll_time_seconds:
log.warn("Timed out polling for response from %s", url)
return False
def try_open_with(utility, resources_to_open):
"""
Try and open the specified resource with the specified utility
:param utility: The utility to use (i.e. open, see, gnome-open)
:param resources_to_open: A list of resources to open
:type resources_to_open: list(str)
:return: True if opened, else False
:rtype: bool
"""
try:
if isinstance(resources_to_open, list):
cmd = [utility] + resources_to_open
elif isinstance(resources_to_open, str):
cmd = [utility] + [resources_to_open]
else:
return False
logging.debug("Running command: %s" % cmd)
subprocess.call(cmd)
return True
except StandardError: # The base exception for most exception types
return False
class LogWrapper:
stdout = "STDOUT"
stderr = "STDERR"
"""
Note this class is _not_ threadsafe it wraps a logger with a "write" method so that the output of remote commands
can be streamed to the logger
"""
def __init__(self, hostname, name):
self.parent_thread = threading.currentThread().getName()
self._log = logging.getLogger("command")
self.hostname = hostname
self.buffer = []
self.extra_log_arguments = {'parent_thread': str(self.parent_thread),
'hostname': str(self.hostname),
'out_name': name}
def write(self, message):
try:
if message == '\n':
message = "".join(self.buffer)
if isinstance(message, unicode):
self._log.debug(unicode.decode(message, errors="ignore"), extra=self.extra_log_arguments)
else:
self._log.debug(message, extra=self.extra_log_arguments)
self.buffer = []
else:
self.buffer.append(message)
except Exception as ex:
traceback.print_exc(file=sys.stderr)
print "Exception logging remote command %s" % ex
| StarcoderdataPython |
1742035 | <reponame>velocist/TS4CheatsInfo
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\generic_affordance_chooser.py
# Compiled at: 2015-01-14 22:36:47
# Size of source mod 2**32: 3778 bytes
from event_testing.resolver import SingleSimResolver
from event_testing.results import TestResult
from interactions.base.immediate_interaction import ImmediateSuperInteraction
from interactions.base.super_interaction import SuperInteraction
from ui.ui_dialog import UiDialogOkCancel
import interactions
class GenericChooseBetweenTwoAffordancesSuperInteraction(ImmediateSuperInteraction):
INSTANCE_TUNABLES = {'choice_dialog':UiDialogOkCancel.TunableFactory(description='\n A Dialog that prompts the user with a two button dialog. The\n chosen button will result in one of two affordances being chosen.\n '),
'accept_affordance':SuperInteraction.TunablePackSafeReference(description='\n The affordance to push on the sim if the user clicks on the \n accept/ok button.\n '),
'reject_affordance':SuperInteraction.TunablePackSafeReference(description='\n The affordance to push on the Sim if the user chooses to click\n on the reject/cancel button.\n ')}
@classmethod
def _test(cls, target, context, **interaction_parameters):
if cls.accept_affordance is None:
if cls.reject_affordance is None:
return TestResult(False, 'The accept and reject affordances are unavailable with the currently installed packs.')
return (super()._test)(target, context, **interaction_parameters)
def _run_interaction_gen(self, timeline):
context = self.context.clone_for_sim((self.sim), insert_strategy=(interactions.context.QueueInsertStrategy.LAST))
if self.accept_affordance is None or self.reject_affordance is None:
affordance = self.accept_affordance or self.reject_affordance
self.sim.push_super_affordance(affordance, target=(self.target), context=context)
return
def _on_response(dialog):
affordance = self.accept_affordance if dialog.accepted else self.reject_affordance
self.sim.push_super_affordance(affordance, target=(self.target),
context=context)
dialog = self.choice_dialog((self.sim), resolver=(SingleSimResolver(self.sim)))
dialog.show_dialog(on_response=_on_response)
if False:
yield None | StarcoderdataPython |
3263924 | <filename>orthogonal/topologyShapeMetric/OrthogonalException.py
class OrthogonalException(Exception):
pass
| StarcoderdataPython |
1745313 | <gh_stars>0
from functools import lru_cache
from os.path import realpath, join, dirname
import json
PROJECT_PATH = realpath(join(dirname(__file__), './../'))
@lru_cache(maxsize=1)
def get_config():
with open(join(PROJECT_PATH, 'config.json'), 'r') as f:
return json.load(f)
def _value(key):
return get_config()[key]
TELEGRAM_TOKEN = _value('TELEGRAM_TOKEN')
WG_APP_ID = _value('WG_APP_ID')
| StarcoderdataPython |
3254075 | <filename>meiduo/meiduo/apps/meiduo_admin/views/image.py
from django.conf import settings
from rest_framework.viewsets import ModelViewSet
from rest_framework.views import APIView
from rest_framework.response import Response
from goods.models import SKUImage, SKU
from meiduo_admin.utils import PageNum
from meiduo_admin.serializers.image import ImageSerializer, SKUSerializer
from fdfs_client.client import Fdfs_client
#获取图片列表, 获取修改图片的详情信息RetrieveModelMixin实现
class ImageView(ModelViewSet):
serializer_class = ImageSerializer
queryset = SKUImage.objects.all()
pagination_class = PageNum
# 保存图片数据
#重写拓展类的保存业务逻辑
def create(self, request, *args, **kwargs):
#创建FASTDFS链接对象
client = Fdfs_client(settings.FDFS_CLIENT_CONF)
#获取前端传递的image文件
image = request.FILES.get('image')
#上传图片到fastDFS
image_data = client.upload_by_buffer(image.read())
if image_data['Status'] != 'Upload successed.':
return Response(status=403)
image_url = image_data['Remote file_id']
sku_id = request.data.get('sku')
img = SKUImage.objects.create(sku_id=sku_id, image=image_url)
#img.image返回的是数据库保存的storage的地址,
#img.image.url是调用imagefile的files的url属性, url属性会调用storage的url方法
return Response(
{'id':img.id, 'sku':sku_id, 'image':img.image.url},
status=201
)
#更新图片, put请求
# 重写拓展类的更新业务逻辑
def update(self, request, *args, **kwargs):
#获取pk对应的对象
img = self.get_object()
# 创建FASTDFS链接对象
client = Fdfs_client(settings.FDFS_CLIENT_CONF)
# 获取前端传递的image文件
image = request.FILES.get('image')
# 上传图片到fastDFS
image_data = client.upload_by_buffer(image.read())
if image_data['Status'] != 'Upload successed.':
return Response(status=403)
image_url = image_data['Remote file_id']
img.image = image_url
img.save()
sku_id = request.data.get('sku')
# img.image返回的是数据库保存的storage的地址,
# img.image.url是调用imagefile的files的url属性, url属性会调用storage的url方法
return Response(
{'id': img.id, 'sku': sku_id, 'image': img.image.url},
status=201
)
#获取图片关联的sku的id
class SKUView(APIView):
def get(self,request):
skus = SKU.objects.all()
serializer = SKUSerializer(skus,many=True)
return Response(serializer.data)
| StarcoderdataPython |
193117 | <gh_stars>10-100
import pytest
import autofit as af
from autofit.exc import PriorLimitException
@pytest.fixture(
name="prior"
)
def make_prior():
return af.GaussianPrior(
mean=3.0,
sigma=5.0,
lower_limit=0.0
)
def test_intrinsic_lower_limit(prior):
with pytest.raises(
PriorLimitException
):
prior.value_for(0.0)
def test_prior_factor(prior):
prior.factor(1.0)
with pytest.raises(
PriorLimitException
):
prior.factor(-1.0)
def test_optional(prior):
prior.value_for(
0.0,
ignore_prior_limits=True
)
@pytest.fixture(
name="model"
)
def make_model(prior):
return af.Model(
af.Gaussian,
centre=prior
)
def test_vector_from_unit_vector(model):
with pytest.raises(
PriorLimitException
):
model.vector_from_unit_vector([
0, 0, 0
])
def test_vector_ignore_limits(model):
model.vector_from_unit_vector(
[0, 0, 0],
ignore_prior_limits=True
)
@pytest.mark.parametrize(
"prior",
[
af.LogUniformPrior(),
af.UniformPrior(),
af.GaussianPrior(
mean=0,
sigma=1,
lower_limit=0.0,
upper_limit=1.0,
)
]
)
@pytest.mark.parametrize(
"value",
[-1.0, 2.0]
)
def test_all_priors(
prior,
value
):
with pytest.raises(
PriorLimitException
):
prior.value_for(value)
prior.value_for(
value,
ignore_prior_limits=True
)
| StarcoderdataPython |
1714268 | <reponame>yarenty/mindsdb
from mindsdb.api.mongo.classes import Responder
import mindsdb.api.mongo.functions as helpers
class Responce(Responder):
when = {'getFreeMonitoringStatus': helpers.is_true}
result = {
'state': 'undecided',
'ok': 1
}
responder = Responce()
| StarcoderdataPython |
1748393 | <reponame>ashleycampion/p-and-s
# Programme for calculating BMI
# BMI formula is: weight-in-kgs / height-in-metres ** 2
# First store the user's input into variables
height = float(input("Enter height in centimetres:"))
weight = float(input("Enter weight in kgs:"))
# then plug them into the formula
bmi = weight / (height/100)**2
# and finaly print result to the screen
print("Your BMI is {:.2F}".format(bmi))
| StarcoderdataPython |
3354421 | """Define a fake kvstore
This kvstore is used when running in the standalone mode
"""
from .. import backend as F
class KVClient(object):
''' The fake KVStore client.
This is to mimic the distributed KVStore client. It's used for DistGraph
in standalone mode.
'''
def __init__(self):
self._data = {}
self._push_handlers = {}
self._pull_handlers = {}
def barrier(self):
'''barrier'''
def register_push_handler(self, name, func):
'''register push handler'''
self._push_handlers[name] = func
def register_pull_handler(self, name, func):
'''register pull handler'''
self._pull_handlers[name] = func
def add_data(self, name, tensor):
'''add data to the client'''
self._data[name] = tensor
def init_data(self, name, shape, dtype, _, init_func):
'''add new data to the client'''
self._data[name] = init_func(shape, dtype)
def data_name_list(self):
'''get the names of all data'''
return list(self._data.keys())
def get_data_meta(self, name):
'''get the metadata of data'''
return F.dtype(self._data[name]), F.shape(self._data[name]), None
def push(self, name, id_tensor, data_tensor):
'''push data to kvstore'''
if name in self._push_handlers:
self._push_handlers[name](self._data, name, id_tensor, data_tensor)
else:
F.scatter_row_inplace(self._data[name], id_tensor, data_tensor)
def pull(self, name, id_tensor):
'''pull data from kvstore'''
if name in self._pull_handlers:
return self._pull_handlers[name](self._data, name, id_tensor)
else:
return F.gather_row(self._data[name], id_tensor)
| StarcoderdataPython |
1719166 | <gh_stars>1-10
# coding: utf-8
input = ["red", "green", "blue", "yellow"]
# del input[2:]
# del input[-5:]
# del input[0:]
# del input[1:2]
# input[1:len(input)] = ["orange"]
input[-1:1] = ["black", "maroon"]
print(input)
| StarcoderdataPython |
1600920 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import numpy as np
import warnings
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
warnings.filterwarnings("ignore")
class AdienceDataset(Dataset):
def __init__(self, image_dirs, transforms=None):
"""
Dataset for Adience Database
Args:
image_dir (List of string): Directory with all the images.
transforms: transforms could be on the input images
"""
self.image_dirs = image_dirs
self.image_files = []
self.targets = []
self.transforms = None
for image_dir in self.image_dirs:
image_files = next(os.walk(image_dir))[2]
for image_file in image_files:
img_file = os.path.join(image_dir, image_file)
self.image_files.append(img_file)
self.targets.append(image_file[3])
if (transforms != None):
self.transforms = transforms
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
img_name = self.image_files[idx]
targets = torch.LongTensor([int(self.targets[idx])])
inputs = Image.open(img_name)
if (self.transforms != None):
inputs = self.transforms(inputs)
sample = (inputs, targets)
return sample | StarcoderdataPython |
3205067 | <reponame>sarveshwar-s/stockcast
from textblob import TextBlob
import tweepy as tw
import requests as req
import os
import pandas as pd
consumer_key= 'YOUR_CONSUMER_KEY'
consumer_secret= 'YOUR_CONSUMER_SECRET_KEY'
access_token= '<PASSWORD>_ACCESS_TOKEN'
access_token_secret= 'YOUR_SECRET_ACCESS_TOKEN'
def twitter_analysis(compname):
auth = tw.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tw.API(auth, wait_on_rate_limit=True)
search_words = compname
search_words = search_words + " " + "-filter:retweets"
date_since = "2020-03-05"
tweets = tw.Cursor(api.search,
q=search_words,
lang="en",
since=date_since).items(10)
total = 0
positive = 0
negative = 0
neutral = 0
for items in tweets:
print("====================================================================")
print(items.text)
blobs = TextBlob(items.text)
for sentence in blobs.sentences:
print(sentence.sentiment.polarity)
if(sentence.sentiment.polarity > 0 ):
positive+=1
elif(sentence.sentiment.polarity < 0):
negative+=1
else:
neutral+=1
total+=sentence.sentiment.polarity
positive_perentage = (positive/10)*100
negative_percentage = (negative/10)*100
neutral_percentage = 100-(positive_perentage + negative_percentage)
print("positive %", positive_perentage, "negative %", negative_percentage, "neutral %", neutral_percentage)
print(total)
persentlist = []
persentlist.append(positive_perentage)
persentlist.append(negative_percentage)
persentlist.append(neutral_percentage)
return persentlist
# vals = twitter_analysis("amazon")
# for i in vals:
# print(i)
| StarcoderdataPython |
3320329 | <reponame>jpoirierlavoie/Diplomacy
turns = {
'spring_1901':1
}
turn_history = {
'spring_1901': {
'Deadline': '2022-02-19',
'Austria': {
'Vienna': 'A',
'Budapest': 'A',
'Trieste': 'F'},
'England': {
'London': 'F',
'Edinburgh': 'F',
'Liverpool': 'A'},
'France': {
'Paris': 'A',
'Marseilles': 'A',
'Brest': 'F'},
'Germany': {
'Berlin': 'A',
'Munich': 'A',
'Kiel': 'F'},
'Italy': {
'Roma': 'A',
'Venizia': 'A',
'Napoli': 'F'},
'Russia': {
'Moscow': 'A',
'Warsaw': 'A',
'Sevastopol': 'F',
'Saint Petersburg (SC)': 'F'},
'Turkey': {
'Smyrna': 'A',
'Constantinople': 'A',
'Ankara': 'F'}
}
}
supply_centers = [
"Norway",
"Sweden",
"Denmark",
"Holland",
"Belgium",
"Spain",
"Portugal",
"Tunisia",
"Serbia",
"Rumania",
"Bulgaria",
"Greece",
"Constantinople",
"Ankara",
"Smyrna",
"Budapest",
"Vienna",
"Trieste",
"Venezia",
"Roma",
"Napoli",
"Kiel",
"Berlin",
"Munich",
"Picardy",
"Brest",
"Marseilles",
"London",
"Liverpool",
"Edinburgh",
"Saint Petersburg",
"Moscow",
"Warsaw",
"Sevastopol"
]
| StarcoderdataPython |
104548 | class Luhn:
def __init__(self, card_num: str):
self._reversed_card_num = card_num.replace(' ', '')[::-1]
self._even_digits = self._reversed_card_num[1::2]
self._odd_digits = self._reversed_card_num[::2]
def valid(self) -> bool:
if str.isnumeric(self._reversed_card_num) and len(self._reversed_card_num) > 1:
return self._sum_card() % 10 == 0
else:
return False
def _sum_card(self) -> int:
even_digits_sum = 0
for digit in self._even_digits:
x = int(digit) * 2
even_digits_sum += x if x <= 9 else x - 9
return even_digits_sum + sum([int(x) for x in self._odd_digits])
| StarcoderdataPython |
4816243 | """Handles RNG"""
import random
from typing import Sequence
RNG = True
def choice(sequence: Sequence):
if RNG:
return random.choice(sequence)
return sequence[0]
| StarcoderdataPython |
3392952 | <gh_stars>1-10
# Autogenerated file. Do not edit.
from jacdac.bus import Bus, SensorClient
from .constants import *
from typing import Optional
class AirQualityIndexClient(SensorClient):
"""
The Air Quality Index is a measure of how clean or polluted air is. From min, good quality, to high, low quality.
* The range of AQI may vary between countries (https://en.wikipedia.org/wiki/Air_quality_index).
Implements a client for the `Air Quality Index <https://microsoft.github.io/jacdac-docs/services/airqualityindex>`_ service.
"""
def __init__(self, bus: Bus, role: str, *, missing_aqi_index_value: float = None) -> None:
super().__init__(bus, JD_SERVICE_CLASS_AIR_QUALITY_INDEX, JD_AIR_QUALITY_INDEX_PACK_FORMATS, role, preferred_interval = 60000)
self.missing_aqi_index_value = missing_aqi_index_value
@property
def aqi_index(self) -> Optional[float]:
"""
Air quality index, typically refreshed every second., _: AQI
"""
self.refresh_reading()
return self.register(JD_AIR_QUALITY_INDEX_REG_AQI_INDEX).value(self.missing_aqi_index_value)
@property
def aqi_index_error(self) -> Optional[float]:
"""
(Optional) Error on the AQI measure., _: AQI
"""
return self.register(JD_AIR_QUALITY_INDEX_REG_AQI_INDEX_ERROR).value()
@property
def min_aqi_index(self) -> Optional[float]:
"""
Minimum AQI reading, representing a good air quality. Typically 0., _: AQI
"""
return self.register(JD_AIR_QUALITY_INDEX_REG_MIN_AQI_INDEX).value()
@property
def max_aqi_index(self) -> Optional[float]:
"""
Maximum AQI reading, representing a very poor air quality., _: AQI
"""
return self.register(JD_AIR_QUALITY_INDEX_REG_MAX_AQI_INDEX).value()
| StarcoderdataPython |
3208221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# coderdojo_library.py
#
# Copyright 2015 CoderDojo - GPL Licence
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import time
# Import the GPIOEGalileo class from the wiringx86 module.
from wiringx86 import GPIOGalileo as GPIO
'''
PIN mapping for Analog:
A0 - 14
A1 - 15
A2 - 16
A3 - 17
A4 - 18
A5 - 19
'''
# define our class for project
class CoderDojoGalileo(object):
def __init__(self):
self.board = GPIO()
self.pin_temperature = 14 # A0
self.pin_uvout = 15 # A1
self.pin_reference3v = 16 # A2
self.pin_humidity = 17 # A3
self.pin_pressure = 18 # A4 - DL
self.pin_rain_analog = 19 # A5
self.pin_rain_digital = 2 # digital 2 - rain is....
self.pin_digital_A = 2
self.pin_digital_B = 4
self.pin_digital_C = 7
self.temperature = 0
self.uvIntensity = 0 # UV/cm2
self.humidity = 0
self.rawhumanidity = 0
self.pressure = 0
self.rain_intensity = 0
# now we will set all analog pins as INPUT
for pinA in range(14,20):
self.board.pinMode(pinA, self.board.ANALOG_INPUT)
# now setting pin 2 as INPUT for rain detection
self.board.pinMode(2, self.board.INPUT)
# now wi will light OFF all the possible digital leds
for pinX in range(3,14):
self.board.pinMode(pinX, self.board.OUTPUT)
self.board.digitalWrite(pinX, self.board.LOW)
def __str__(self):
print "Object to read sensors - for CoderDojo by <NAME>"
def ledA_ON(self):
self.board.digitalWrite(self.pin_digital_A, self.board.HIGH)
def ledB_ON(self):
self.board.digitalWrite(self.pin_digital_B, self.board.HIGH)
def ledC_ON(self):
self.board.digitalWrite(self.pin_digital_C, self.board.HIGH)
def ledA_OFF(self):
self.board.digitalWrite(self.pin_digital_A, self.board.LOW)
def ledB_OFF(self):
self.board.digitalWrite(self.pin_digital_B, self.board.LOW)
def ledC_OFF(self):
self.board.digitalWrite(self.pin_digital_C, self.board.LOW)
def getTemperature(self):
value = self.board.analogRead(self.pin_temperature)
self.temperature = round ( ( ( ( value * 5 / 1024.0 ) - 0.5 ) / 0.01 ), 2 )
return self.temperature
def getRawTemperature(self):
self.raw_temperature = self.board.analogRead(self.pin_temperature)
return self.raw_temperature
def getLastTemperature(self):
return self.temperature
def getUVIndex(self):
def averageAnalogRead(pin,avg=9):
read_value = 0
for read in range(1,avg):
read_value += self.board.analogRead(pin)
return float(read_value/avg)
def mapfloat(x, in_min, in_max, out_min, out_max):
#special function to compute UV Index
return float((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)
uvLevel = averageAnalogRead(self.pin_uvout)
refLevel = averageAnalogRead(self.pin_reference3v)
# Use the 3.3V power pin as a reference to get a very accurate output value from sensor
outputVoltage = 3.3 / refLevel * uvLevel
# Convert the voltage to a UV intensity level
# based on information from tutorial:
# https://learn.sparkfun.com/tutorials/ml8511-uv-sensor-hookup-guide/using-the-ml8511
self.uvIntensity = round( mapfloat(outputVoltage, 0.99, 2.8, 0.0, 15.0) ,2)
return self.uvIntensity
def getLastUVIndex(self):
return self.uvIntensity
def getHumidity(self):
value = self.board.analogRead(self.pin_humidity)
self.humidity = round(value * 0.2 ,2) # change read to %
return self.humidity # in %
def getRawHumidity(self):
self.rawhumidity = self.board.analogRead(self.pin_humidity)
return self.rawhumidity
def getPressure(self):
raw_value = self.board.analogRead(self.pin_pressure)
# reads from 0 = 15 kPa to 1023 = 115 kPa
# so 115-15 = 100, divided by 1023 = 0,097 kPA is 1 read on Analog input
# analog output shoud be about 4,59 mV / hPa
pressure_kpa = raw_value * 0.097 # (in kPA)
pressure_hpa = 150 + ( pressure_kpa * 10 )
self.pressure = round(pressure_hpa,2)
return self.pressure
def getRainIntensity(self):
raw_value = self.board.analogRead(self.pin_rain_analog)
to_compute = 515 - raw_value # 515 is the highest value == 0%, total dry
self.rain_intensity = round(to_compute/5.15,2) # 5.15 is 1% of rain intensity
return self.rain_intensity
if __name__ == '__main__':
print "This is module - it sould not be executed itself..."
| StarcoderdataPython |
147973 | """
Module with functions and methods for NEXUS files.
Parsing and writing of NEXUS files is currently done with a very simple,
string manipulation strategy.
"""
# TODO: allow to output taxa and character names between quotes, if necessary
# TODO: sort using assumptions (charset) if provided
# TODO: support comments (will need changes to the parser)
# Import Python standard libraries
from collections import defaultdict
from enum import Enum, auto
from itertools import chain
import re
# Import from local modules
from .internal import PhyloData
from .common import indexes2ranges
def parse_nexus(source: str) -> dict:
"""
Parse the information in a NEXUS string.
Parsing is currently done with a manually coded automaton that keeps track of
state and buffers information until it can be used. It is not as advanced as
some NEXUS parsing libraries, but this solution requires no additional package
and can be gradually extended to our needs.
:param source: The full NEXUS source code.
:return: A dictionary with all the block information in the source.
"""
# Auxiliary state enumeration for the automaton
class ParserState(Enum):
NONE = auto()
OUT_OF_BLOCK = auto()
IN_BLOCK = auto()
# Data that will be filled during parsing; in the future, this could be expanded
# to an actual class, with sanity checks etc.
# TODO: make an object?
nexus_data = {
"ntax": None,
"nchar": None,
"datatype": None,
"missing": None,
"gap": None,
"symbols": None,
"charstatelabels": {},
"matrix": {},
"charset": [],
}
# TODO: keep track of line numbers by counting newline chars, using them in debug
buffer = ""
block_name = ""
parser_state = ParserState.NONE
for idx, char in enumerate(source):
# Extend buffer first and then process according to automaton state
buffer += char
if parser_state == ParserState.NONE:
# Make sure we have a file that identifies as NEXUS
if buffer.strip() == "#NEXUS":
buffer = ""
parser_state = ParserState.OUT_OF_BLOCK
elif parser_state == ParserState.OUT_OF_BLOCK:
# Make sure we have a block
if char == ";":
match = re.match(r"BEGIN\s+(.+)\s*;", buffer.upper().strip())
if match:
block_name = match.group(1)
buffer = ""
parser_state = ParserState.IN_BLOCK
else:
raise ValueError(f"Unable to parse NEXUS block at char {idx}.")
elif parser_state == ParserState.IN_BLOCK:
# Read all contents until we hit a semicolon, which will be processed individually
if char == ";":
# Check if we are at then of the block, otherwise process
if re.sub(r"\s", "", buffer.upper().strip()) == "END;":
buffer = ""
parser_state = ParserState.OUT_OF_BLOCK
else:
# Parse the command inside the block, which can be a single, simple
# line or a large subblock (like charstatelabels)
command = re.sub("\s+", " ", buffer.strip()).split()[0].upper()
if command == "DIMENSIONS":
ntax_match = re.search(r"NTAX\s*=\s*(\d+)", buffer.upper())
nchar_match = re.search(r"NCHAR\s*=\s*(\d+)", buffer.upper())
if ntax_match:
nexus_data["ntax"] = int(ntax_match.group(1))
if nchar_match:
nexus_data["nchar"] = int(nchar_match.group(1))
elif command == "FORMAT":
datatype_match = re.search(
r"DATATYPE\s*=\s*(\w+)", buffer.upper()
)
missing_match = re.search(r"MISSING\s*=\s*(.)", buffer.upper())
gap_match = re.search(r"GAP\s*=\s*(.)", buffer.upper())
symbols_match = re.search(
r"SYMBOLS\s*=\s*\"([^\"]+)\"", buffer.upper()
)
if datatype_match:
nexus_data["datatype"] = datatype_match.group(1)
if missing_match:
nexus_data["missing"] = missing_match.group(1)
if gap_match:
nexus_data["gap"] = gap_match.group(1)
if symbols_match:
# TODO: deal with space separated symbols
nexus_data["symbols"] = symbols_match.group(1)
elif command == "CHARSTATELABELS":
# Get each individual charstatelabel and parse it
# TODO: use a single parser for binary and multistate?
charstate_buffer = re.sub("\s+", " ", buffer.strip())
start_idx = charstate_buffer.find(
" ", charstate_buffer.find("CHARSTATELABELS")
)
for charstatelabel in charstate_buffer[start_idx:-1].split(","):
charstatelabel = re.sub("\s+", " ", charstatelabel.strip())
if "/" in charstatelabel:
# TODO: implement
raise ValueError("Not implemented")
else:
idx, charlabel = charstatelabel.split()
nexus_data["charstatelabels"][int(idx)] = charlabel
elif command == "MATRIX":
start_idx = buffer.find("MATRIX") + len("MATRIX")
for entry in buffer[start_idx + 1 : -1].strip().split("\n"):
entry = re.sub("\s+", " ", entry.strip())
taxon, vector = entry.split()
nexus_data["matrix"][taxon] = vector
elif command == "CHARSET":
# TODO: implement other syntaxes
# TODO: make sure it is performed only under the "assumption" block
match = re.search(
r"charset\s*(\w+)\s*=\s*(\d+)\s*-\s*(\d+)", buffer
)
if match:
charset_label, start, end = match.groups()
nexus_data["charset"].append(
{
"charset": charset_label,
"start": int(start),
"end": int(end),
}
)
# Clean the buffer and continue
buffer = ""
return nexus_data
def read_data_nexus(source: str, args) -> PhyloData:
"""
Parse a NEXUS source into an internal representation.
:param source: A string with the source data representation.
:param args:
:return: An object with the internal representation of the data.
"""
# Parse the NEXUS source
nexus_data = parse_nexus(source)
# Build internal representation
# TODO: deal with multistate {}
# TODO: transform all binary in multistate internal representation
# TODO: this is currently handling only binary and needs charsets
# Build inverse map from position in the alignment to the charset and collect states
alm2charset = {}
for charset in nexus_data["charset"]:
for idx in range(charset["start"], charset["end"] + 1):
alm2charset[idx] = charset["charset"]
states = defaultdict(set)
for taxon, vector in nexus_data["matrix"].items():
for idx, state in enumerate(vector):
if state == nexus_data["missing"]:
states[taxon, alm2charset[idx + 1]].add("?")
elif state != "0":
states[taxon, alm2charset[idx + 1]].add(
nexus_data["charstatelabels"][idx + 1]
)
# Build the PhyloData object and return
phyd = PhyloData()
for (taxon, character), state_set in states.items():
for state in state_set:
phyd.add_state(taxon, character, state)
return phyd
def build_taxa_block(phyd: PhyloData) -> str:
"""
Build a NEXUS TAXA block.
:param phyd: The PhyloData object used as source of the taxa block.
:return: A textual representation of the NEXUS taxa block.
"""
buffer = """
BEGIN TAXA;
DIMENSIONS NTAX=%i;
TAXLABELS
%s
;
END;""" % (
len(phyd.taxa),
"\n".join([" %s" % taxon for taxon in sorted(phyd.taxa)]),
)
return buffer.strip()
# TODO: don't output charstatelabels values if all are binary
def build_character_block(phyd: PhyloData) -> str:
"""
Build a NEXUS CHARACTER block.
:param phyd: The PhyloData object used as source of the character block.
:return: A textual representation of the NEXUS character block.
"""
# Express the actual labels only we have any state which is not a binary "0" or "1"
# TODO: what about mixed data?
states = sorted(set(chain.from_iterable(phyd.charstates.values())))
if tuple(states) == ("0", "1"):
charstatelabels = [
" %i %s," % (charstate_idx + 1, character)
for charstate_idx, (character, _) in enumerate(phyd.charstates.items())
]
else:
# TODO: make sure this sorted order matches the one from phyd.matrix
charstatelabels = [
" %i %s /%s,"
% (charstate_idx + 1, character, " ".join(sorted(state_set)))
for charstate_idx, (character, state_set) in enumerate(
phyd.charstates.items()
)
]
# TODO: keeping the final comma in charstatelabels should be an option
buffer = """
BEGIN CHARACTERS;
DIMENSIONS NCHAR=%i;
FORMAT DATATYPE=STANDARD MISSING=? GAP=- SYMBOLS="%s";
CHARSTATELABELS
%s
;
%s
END;""" % (
len(phyd.charstates),
" ".join(phyd.symbols),
"\n".join(charstatelabels),
build_matrix_command(phyd),
)
return buffer.strip()
def build_matrix_command(phyd: PhyloData) -> str:
"""
Build a NEXUS MATRIX command.
:param phyd: The PhyloData object used as source of the matrix command.
:return: A textual representation of the NEXUS matrix command.
"""
# Obtain the matrix and the maximum taxon length for formatting
matrix = phyd.matrix
taxon_length = max([len(entry["taxon"]) for entry in matrix])
# Build buffer
buffer = """
MATRIX
%s
;""" % (
"\n".join(
[
"%s %s" % (entry["taxon"].ljust(taxon_length), entry["vector"])
for entry in matrix
]
)
)
return buffer.strip()
def build_assumption_block(phyd: PhyloData) -> str:
"""
Build a NEXUS ASSUMPTION block.
:param phyd: The PhyloData object used as source of the assumption block.
:return: A textual representation of the NEXUS assumption block.
"""
if not phyd.charset:
return ""
# Get the individual indexes first, and then build the string representation
character_list = sorted(phyd.charstates.keys())
indexes = {
charset: [character_list.index(char) + 1 for char in characters]
for charset, characters in phyd.charset.items()
}
##############
# TODO; make sure it is sorted
buffer = """
BEGIN ASSUMPTIONS;
%s
END;
""" % (
"\n".join(
[
" CHARSET %s = %s;" % (charset, indexes2ranges(indexes[charset]))
for charset in sorted(indexes)
]
)
)
for charset, char_ranges in indexes.items():
print(charset, char_ranges, indexes2ranges(char_ranges))
return buffer
def build_nexus(phyd: PhyloData, args) -> str:
"""
Build a NEXUS data representation.
:param phyd: The PhyloData object used as source of the data representation.
:param args:
:return: A textual representation of the NEXUS data representation.
"""
# TODO: this only implements multistate
# TODO: not rendering polymorphy
components = [
"#NEXUS",
build_taxa_block(phyd),
build_character_block(phyd),
build_assumption_block(phyd),
]
buffer = "\n\n".join([comp for comp in components if comp])
return buffer
| StarcoderdataPython |
1678989 | <reponame>ezragoss/typewriter
from __future__ import absolute_import, print_function
import json
import shlex
import subprocess
from lib2to3.pytree import Node
from typing import Any, Dict, List, Optional, Tuple
from .fix_annotate_json import BaseFixAnnotateFromSignature
class FixAnnotateCommand(BaseFixAnnotateFromSignature):
"""Inserts annotations based on a command run in a subprocess for each
location. The command is expected to output a json string in the same
format output by `dmypy suggest` and `pyannotate_tool --type-info`
"""
def get_command(self, funcname, filename, lineno):
# type: (str, str, int) -> List[str]
command = self.options['typewriter']['command']
return shlex.split(command.format(filename=filename, lineno=lineno,
funcname=funcname))
def get_types(self, node, results, funcname):
# type: (Node, Dict[str, Any], str) -> Optional[Tuple[List[str], str]]
cmd = self.get_command(funcname, self.filename, node.get_lineno())
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
# dmypy suggest exits 2 anytime it can't generate a suggestion,
# even for somewhat expected cases like when --no-any is enabled:
if err.returncode != 2:
self.log_message("Line %d: Failed calling %r: %s" %
(node.get_lineno(), cmd,
err.output.rstrip().encode()))
return None
except OSError as err:
self.log_message("Line %d: Failed calling %r: %s" %
(node.get_lineno(), cmd, err))
return None
data = json.loads(out)
signature = data[0]['signature']
return signature['arg_types'], signature['return_type']
| StarcoderdataPython |
99848 | import tensorflow as tf
import numpy as np
from tqdm import tqdm
from tf_metric_learning.utils.index import AnnoyDataIndex
class AnnoyEvaluatorCallback(AnnoyDataIndex):
"""
Callback, extracts embeddings, add them to AnnoyIndex and evaluate them as recall.
"""
def __init__(
self,
model,
data_store,
data_search,
save_dir=None,
eb_size=256,
metric="euclidean",
freq=1,
batch_size=None,
normalize_eb=True,
normalize_fn=None,
progress=True,
**kwargs
):
super().__init__(eb_size, data_store["labels"], metric=metric, save_dir=save_dir, progress=progress)
self.base_model = model
self.data_store = data_store
self.data_search = data_search
self.batch_size = batch_size
self.freq = int(freq)
self.normalize_eb = normalize_eb
self.normalize_fn = normalize_fn
self.results = {}
def on_epoch_begin(self, epoch, logs=None):
if self.freq and epoch % self.freq == 0:
self.compute_data()
def batch(self, iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def compute_data(self):
self.create_index()
i = 0
with tqdm(total=len(self.data_store["images"]), desc="Indexing ... ") as pbar:
for batch in self.batch(self.data_store["images"], n=self.batch_size*10):
store_images = self.normalize_fn(batch) if self.normalize_fn is not None else batch
embeddings_store = self.base_model.predict(store_images, batch_size=self.batch_size)
if self.normalize_eb:
embeddings_store = tf.nn.l2_normalize(embeddings_store, axis=1).numpy()
for embedding in embeddings_store:
self.add_to_index(i, embedding)
i += 1
pbar.update(len(batch))
self.build(k=5)
self.evaluate(self.data_search["images"])
def evaluate(self, images):
self.results = {"default": []}
i = 0
with tqdm(total=len(images), desc="Evaluating ... ") as pbar:
for batch in self.batch(images, n=self.batch_size*10):
search_images = self.normalize_fn(batch) if self.normalize_fn is not None else batch
embeddings_search = self.base_model.predict(search_images, batch_size=self.batch_size)
if self.normalize_eb:
embeddings_search = tf.nn.l2_normalize(embeddings_search, axis=1).numpy()
for embedding in embeddings_search:
annoy_results = self.search(embedding, n=20, include_distances=False)
annoy_results = [self.get_label(result) for result in annoy_results]
recalls = self.eval_recall(annoy_results, self.data_search["labels"][i], [1, 5, 10, 20])
self.results["default"].append(recalls)
i += 1
pbar.update(len(batch))
print("\nRecall@[1, 3, 5, 10, 20] Computed:", np.mean(np.asarray(self.results["default"]), axis=0), "\n")
def eval_recall(self, annoy_results, label, recalls):
return [1 if label in annoy_results[:recall_n] else 0 for recall_n in recalls]
| StarcoderdataPython |
188423 | '''
May 2020 by <NAME>
<EMAIL>
https://www.github.com/sebbarb/
'''
import sys
sys.path.append('../lib/')
import numpy as np
import pandas as pd
from lifelines import CoxPHFitter
from utils import *
import feather
from hyperparameters import Hyperparameters
from pdb import set_trace as bp
def main():
# Load data
print('Load data...')
hp = Hyperparameters()
data = np.load(hp.data_pp_dir + 'data_arrays_' + hp.gender + '.npz')
print('Use all data for model fitting...')
x = data['x']
time = data['time']
event = data['event']
cols_list = load_obj(hp.data_pp_dir + 'cols_list.pkl')
df = pd.DataFrame(x, columns=cols_list)
df['TIME'] = time
df['EVENT'] = event
###################################################################
print('Fitting all data...')
cph = CoxPHFitter()
cph.fit(df, duration_col='TIME', event_col='EVENT', show_progress=True, step_size=0.5)
cph.print_summary()
print('Saving...')
df_summary = cph.summary
df_summary['PREDICTOR'] = cols_list
df_summary.to_csv(hp.results_dir + 'hr_' + hp.gender + '.csv', index=False)
###################################################################
print('Test on each fold (train on swapped)...')
for fold in range(hp.num_folds):
for swap in range(2):
print('Fold: {} Swap: {}'.format(fold, swap))
idx = (data['fold'][:, fold] == (1-swap))
x = data['x'][idx]
time = data['time'][idx]
event = data['event'][idx]
df = pd.DataFrame(x, columns=cols_list)
df['TIME'] = time
df['EVENT'] = event
print('Fitting all data...')
cph = CoxPHFitter()
cph.fit(df, duration_col='TIME', event_col='EVENT', show_progress=True, step_size=0.5)
print('done')
idx = (data['fold'][:, fold] == swap)
x = data['x'][idx]
df_cox = pd.DataFrame({'LPH': np.dot(x-cph._norm_mean.values, cph.params_)})
print('Saving log proportional hazards for fold...')
df_cox.to_feather(hp.results_dir + 'df_cox_' + hp.gender + '_fold_' + str(fold) + '_' + str(swap) + '.feather')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1650117 | # -*- coding: utf-8 -*-
"""
This module contains the core of the optimization model, containing
the definiton of problem (variables,constraints,objective function,...)
in CVXPY for planning and operation modes.
"""
import pandas as pd
import cvxpy as cp
import numpy as np
from collections import namedtuple
from hypatia.utility.utility import (
invcosts,
invcosts_annuity,
salvage_factor,
newcap_accumulated,
line_newcap_accumulated,
_calc_variable_overall,
_calc_production_overall,
fixcosts,
line_varcost,
decomcap,
line_decomcap,
available_resource_prod,
annual_activity,
storage_state_of_charge,
get_regions_with_storage,
storage_max_flow,
)
import logging
logger = logging.getLogger(__name__)
RESULTS = [
"variables",
"cost_fix",
"cost_variable",
"totalcapacity",
"cost_fix_tax",
"cost_fix_sub",
"emission_cost",
"CO2_equivalent",
"demand",
]
PLANNING_RESULTS = [
"cost_decom",
"decommissioned_capacity",
"cost_inv",
"salvage_inv",
"cost_inv_tax",
"cost_inv_sub",
]
class BuildModel:
"""Class that builds the variables and equations of the model
Attributes
-----------
sets:
The instance of the Readsets class for delivering the structural inputs
including regions, technologies, years, timesteps, mapping tables
variables: dict
a nested dictionary of all the decision variables including the new capacity, production
by each technology, use (consumption) by each technology, imports and exports
"""
def __init__(self, sets):
self.sets = sets
self.constr = []
timeslice_fraction = self.sets.timeslice_fraction
if not isinstance(timeslice_fraction, int):
timeslice_fraction.shape = (len(self.sets.time_steps), 1)
self.timeslice_fraction = timeslice_fraction
self._set_variables()
# calling the methods based on the defined mode by the user
if self.sets.mode == "Planning":
self._calc_variable_planning()
self._balance_()
self._constr_totalcapacity_regional()
self._constr_newcapacity_regional()
self._constr_balance()
self._constr_resource_tech_availability()
self._constr_tech_efficiency()
self._constr_prod_annual()
self._constr_emission_cap()
self._calc_variable_storage_SOC()
self._constr_storage_max_min_charge()
self._constr_storage_max_flow_in_out()
self._set_regional_objective_planning()
if len(self.sets.regions) == 1:
self._set_final_objective_singlenode()
elif len(self.sets.regions) > 1:
self._calc_variable_planning_line()
self._constr_totalcapacity_line()
self._constr_totalcapacity_overall()
self._constr_newcapacity_overall()
self._constr_line_availability()
self._constr_trade_balance()
self._constr_prod_annual_overall()
self._set_lines_objective_planning()
self._set_final_objective_multinode()
elif self.sets.mode == "Operation":
self._calc_variable_operation()
self._balance_()
self._constr_balance()
self._constr_resource_tech_availability()
self._constr_tech_efficiency()
self._constr_prod_annual()
self._constr_emission_cap()
self._calc_variable_storage_SOC()
self._constr_storage_max_min_charge()
self._constr_storage_max_flow_in_out()
self._set_regional_objective_operation()
if len(self.sets.regions) == 1:
self._set_final_objective_singlenode()
elif len(self.sets.regions) > 1:
self._calc_variable_operation_line()
self._constr_line_availability()
self._constr_trade_balance()
self._constr_prod_annual_overall()
self._set_lines_objective_operation()
self._set_final_objective_multinode()
def _solve(self, verbosity, solver, **kwargs):
"""
Creates a CVXPY problem instance, if the output status is optimal,
returns the results to the interface
"""
objective = cp.Minimize(self.global_objective)
problem = cp.Problem(objective, self.constr)
problem.solve(solver=solver, verbose=verbosity, **kwargs)
if problem.status == "optimal":
# Reshape the demand
self.demand = {
reg: self.sets.data[reg]["demand"] for reg in self.sets.regions
}
res = RESULTS.copy()
to_add = []
if self.sets.multi_node:
if self.sets.mode == "Planning":
to_add = [
"line_totalcapacity",
"line_decommissioned_capacity",
"cost_inv_line",
"cost_fix_line",
"cost_decom_line",
"cost_variable_line",
]
else:
to_add = [
"line_totalcapacity",
"cost_fix_line",
"cost_variable_line",
]
if self.sets.mode == "Planning":
to_add.extend(PLANNING_RESULTS)
res.extend(to_add)
result_collector = namedtuple("result", res)
results = result_collector(
**{result: getattr(self, result) for result in res}
)
return results
else:
print(
"No solution found and no result will be uploaded to the model",
"critical",
)
def _set_variables(self):
"""
Creates the matrixed-based variables of the problem
in a nested dict format for each region and each technology category.
"""
technology_prod = {}
technology_use = {}
new_capacity = {}
line_newcapacity = {}
line_import = {}
line_export = {}
for reg in self.sets.regions:
regional_prod = {}
regional_use = {}
for key in self.sets.Technologies[reg].keys():
if key != "Demand":
regional_prod[key] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.Technologies[reg][key]),
),
nonneg=True,
)
if key != "Demand" and key != "Supply":
regional_use[key] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.Technologies[reg][key]),
),
nonneg=True,
)
technology_prod[reg] = regional_prod
technology_use[reg] = regional_use
export_ = {}
import_ = {}
for reg_ in self.sets.regions:
if reg_ != reg:
export_[reg_] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.glob_mapping["Carriers_glob"].index),
),
nonneg=True,
)
import_[reg_] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.glob_mapping["Carriers_glob"].index),
),
nonneg=True,
)
line_export[reg] = export_
line_import[reg] = import_
self.variables = {
"productionbyTechnology": technology_prod,
"usebyTechnology": technology_use,
}
if len(self.sets.regions) > 1:
self.variables.update(
{"line_export": line_export, "line_import": line_import,}
)
if self.sets.mode == "Planning":
for reg in self.sets.regions:
regional_newcap = {}
for key in self.sets.Technologies[reg].keys():
if key != "Demand":
regional_newcap[key] = cp.Variable(
shape=(
len(self.sets.main_years),
len(self.sets.Technologies[reg][key]),
),
nonneg=True,
)
new_capacity[reg] = regional_newcap
self.variables.update({"newcapacity": new_capacity})
if len(self.sets.regions) > 1:
for line in self.sets.lines_list:
line_newcapacity[line] = cp.Variable(
shape=(
len(self.sets.main_years),
len(self.sets.glob_mapping["Carriers_glob"].index),
),
nonneg=True,
)
self.variables.update({"line_newcapacity": line_newcapacity})
def _calc_variable_planning(self):
"""
Calculates all the cost components of the objective function and the
intermediate variables in the planning mode, for each region
"""
self.cost_inv = {}
self.cost_inv_tax = {}
self.cost_inv_sub = {}
self.cost_inv_fvalue = {}
self.salvage_inv = {}
self.accumulated_newcapacity = {}
self.totalcapacity = {}
self.cost_fix = {}
self.cost_fix_tax = {}
self.cost_fix_sub = {}
self.decommissioned_capacity = {}
self.cost_decom = {}
self.cost_variable = {}
self.CO2_equivalent = {}
self.emission_cost = {}
self.production_annual = {}
for reg in self.sets.regions:
cost_inv_regional = {}
cost_inv_tax_regional = {}
cost_inv_sub_regional = {}
cost_fvalue_regional = {}
salvage_inv_regional = {}
accumulated_newcapacity_regional = {}
totalcapacity_regional = {}
cost_fix_regional = {}
cost_fix_tax_regional = {}
cost_fix_Sub_regional = {}
decomcapacity_regional = {}
cost_decom_regional = {}
cost_variable_regional = {}
CO2_equivalent_regional = {}
emission_cost_regional = {}
production_annual_regional = {}
for key in self.variables["newcapacity"][reg].keys():
(
cost_inv_regional[key],
cost_inv_tax_regional[key],
cost_inv_sub_regional[key],
) = invcosts(
self.sets.data[reg]["tech_inv"][key],
self.variables["newcapacity"][reg][key],
self.sets.data[reg]["inv_taxsub"]["Tax"][key],
self.sets.data[reg]["inv_taxsub"]["Sub"][key],
)
salvage_inv_regional[key] = cp.multiply(
salvage_factor(
self.sets.main_years,
self.sets.Technologies[reg][key],
self.sets.data[reg]["tech_lifetime"].loc[:, key],
self.sets.data[reg]["interest_rate"].loc[:, key],
self.sets.data[reg]["discount_rate"],
self.sets.data[reg]["economic_lifetime"].loc[:, key],
),
cost_inv_regional[key],
)
accumulated_newcapacity_regional[key] = newcap_accumulated(
self.variables["newcapacity"][reg][key],
self.sets.Technologies[reg][key],
self.sets.main_years,
self.sets.data[reg]["tech_lifetime"].loc[:, key],
)
totalcapacity_regional[key] = (
accumulated_newcapacity_regional[key]
+ self.sets.data[reg]["tech_residual_cap"].loc[:, key]
)
(
cost_fix_regional[key],
cost_fix_tax_regional[key],
cost_fix_Sub_regional[key],
) = fixcosts(
self.sets.data[reg]["tech_fixed_cost"][key],
totalcapacity_regional[key],
self.sets.data[reg]["fix_taxsub"]["Tax"][key],
self.sets.data[reg]["fix_taxsub"]["Sub"][key],
)
decomcapacity_regional[key] = decomcap(
self.variables["newcapacity"][reg][key],
self.sets.Technologies[reg][key],
self.sets.main_years,
self.sets.data[reg]["tech_lifetime"].loc[:, key],
)
cost_decom_regional[key] = cp.multiply(
self.sets.data[reg]["tech_decom_cost"].loc[:, key].values,
decomcapacity_regional[key],
)
production_annual_regional[key] = annual_activity(
self.variables["productionbyTechnology"][reg][key],
self.sets.main_years,
self.sets.time_steps,
)
cost_variable_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["tech_var_cost"].loc[:, key],
)
if key != "Transmission" and key != "Storage":
CO2_equivalent_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["specific_emission"].loc[:, key],
)
emission_cost_regional[key] = cp.multiply(
CO2_equivalent_regional[key],
self.sets.data[reg]["carbon_tax"].loc[:, key],
)
cost_fvalue_regional[key] = invcosts_annuity(
cost_inv_regional[key],
self.sets.data[reg]["interest_rate"].loc[:, key],
self.sets.data[reg]["economic_lifetime"].loc[:, key],
self.sets.Technologies[reg][key],
self.sets.main_years,
self.sets.data[reg]["discount_rate"],
)
self.cost_inv[reg] = cost_inv_regional
self.cost_inv_tax[reg] = cost_inv_tax_regional
self.cost_inv_sub[reg] = cost_inv_sub_regional
self.salvage_inv[reg] = salvage_inv_regional
self.totalcapacity[reg] = totalcapacity_regional
self.cost_fix[reg] = cost_fix_regional
self.cost_fix_tax[reg] = cost_fix_tax_regional
self.cost_fix_sub[reg] = cost_fix_Sub_regional
self.decommissioned_capacity[reg] = decomcapacity_regional
self.cost_decom[reg] = cost_decom_regional
self.cost_variable[reg] = cost_variable_regional
self.CO2_equivalent[reg] = CO2_equivalent_regional
self.emission_cost[reg] = emission_cost_regional
self.cost_inv_fvalue[reg] = cost_fvalue_regional
self.production_annual[reg] = production_annual_regional
def _calc_variable_planning_line(self):
"""
Calculates all the cost and intermediate variables related to the inter-
regional links in the planning mode
"""
self.cost_inv_line = {}
self.line_accumulated_newcapacity = {}
self.line_totalcapacity = {}
self.cost_fix_line = {}
self.line_decommissioned_capacity = {}
self.cost_decom_line = {}
for key in self.variables["line_newcapacity"].keys():
self.cost_inv_line[key] = cp.multiply(
self.sets.trade_data["line_inv"].loc[:, key].values,
self.variables["line_newcapacity"][key],
)
self.line_accumulated_newcapacity[key] = line_newcap_accumulated(
self.variables["line_newcapacity"][key],
self.sets.glob_mapping["Carriers_glob"]["Carrier"],
self.sets.main_years,
self.sets.trade_data["line_lifetime"].loc[:, key],
)
self.line_totalcapacity[key] = (
self.line_accumulated_newcapacity[key]
+ self.sets.trade_data["line_residual_cap"].loc[:, key].values
)
self.cost_fix_line[key] = cp.multiply(
self.sets.trade_data["line_fixed_cost"].loc[:, key].values,
self.line_totalcapacity[key],
)
self.line_decommissioned_capacity[key] = line_decomcap(
self.variables["line_newcapacity"][key],
self.sets.glob_mapping["Carriers_glob"]["Carrier"],
self.sets.main_years,
self.sets.trade_data["line_lifetime"].loc[:, key],
)
self.cost_decom_line[key] = cp.multiply(
self.sets.trade_data["line_decom_cost"].loc[:, key].values,
self.line_decommissioned_capacity[key],
)
self.cost_variable_line = line_varcost(
self.sets.trade_data["line_var_cost"],
self.variables["line_import"],
self.sets.regions,
self.sets.main_years,
self.sets.time_steps,
self.sets.lines_list,
)
def _calc_variable_operation(self):
"""
Calculates all the cost components of the objective function and the
intermediate variables in the operation mode, for each region
"""
self.totalcapacity = {}
self.cost_fix = {}
self.cost_fix_tax = {}
self.cost_fix_sub = {}
self.cost_variable = {}
self.CO2_equivalent = {}
self.emission_cost = {}
self.production_annual = {}
for reg in self.sets.regions:
totalcapacity_regional = {}
cost_fix_regional = {}
cost_fix_tax_regional = {}
cost_fix_Sub_regional = {}
cost_variable_regional = {}
CO2_equivalent_regional = {}
emission_cost_regional = {}
production_annual_regional = {}
for key in self.sets.Technologies[reg].keys():
if key != "Demand":
totalcapacity_regional[key] = (
self.sets.data[reg]["tech_residual_cap"].loc[:, key].values
)
(
cost_fix_regional[key],
cost_fix_tax_regional[key],
cost_fix_Sub_regional[key],
) = fixcosts(
self.sets.data[reg]["tech_fixed_cost"][key],
totalcapacity_regional[key],
self.sets.data[reg]["fix_taxsub"]["Tax"][key],
self.sets.data[reg]["fix_taxsub"]["Sub"][key],
)
production_annual_regional[key] = annual_activity(
self.variables["productionbyTechnology"][reg][key],
self.sets.main_years,
self.sets.time_steps,
)
cost_variable_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["tech_var_cost"].loc[:, key],
)
if key != "Transmission" and key != "Storage":
CO2_equivalent_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["specific_emission"].loc[:, key],
)
emission_cost_regional[key] = cp.multiply(
CO2_equivalent_regional[key],
self.sets.data[reg]["carbon_tax"].loc[:, key],
)
self.totalcapacity[reg] = totalcapacity_regional
self.cost_fix[reg] = cost_fix_regional
self.cost_fix_tax[reg] = cost_fix_tax_regional
self.cost_fix_sub[reg] = cost_fix_Sub_regional
self.cost_variable[reg] = cost_variable_regional
self.CO2_equivalent[reg] = CO2_equivalent_regional
self.emission_cost[reg] = emission_cost_regional
self.production_annual[reg] = production_annual_regional
def _calc_variable_operation_line(self):
"""
Calculates all the cost and intermediate variables related to the inter-
regional links in the operation mode
"""
self.line_totalcapacity = {}
self.cost_fix_line = {}
for key in self.sets.lines_list:
self.line_totalcapacity[key] = (
self.sets.trade_data["line_residual_cap"].loc[:, key].values
)
self.cost_fix_line[key] = cp.multiply(
self.sets.trade_data["line_fixed_cost"].loc[:, key].values,
self.line_totalcapacity[key],
)
self.cost_variable_line = line_varcost(
self.sets.trade_data["line_var_cost"],
self.variables["line_import"],
self.sets.regions,
self.sets.main_years,
self.sets.time_steps,
self.sets.lines_list,
)
def _calc_variable_storage_SOC(self):
"""
Calculates the annual state of charge of the on grid storage technologies,
in the models with hourly temporal resolution
"""
self.storage_SOC = {}
for reg in get_regions_with_storage(self.sets):
self.storage_SOC[reg] = storage_state_of_charge(
self.sets.data[reg]["storage_initial_SOC"],
self.variables["usebyTechnology"][reg]["Storage"],
self.variables["productionbyTechnology"][reg]["Storage"],
self.sets.main_years,
self.sets.time_steps,
)
def _balance_(self):
"""
Creates the dictionaries for the annual total production by each technology,
total consumption by each technology, total import,total exports and total final demand
of each energy carrier within each region
"""
self.totalusebycarrier = {}
self.totalprodbycarrier = {}
self.totalimportbycarrier = {}
self.totalexportbycarrier = {}
self.totaldemandbycarrier = {}
for reg in self.sets.regions:
totalusebycarrier_regional = {}
totalprodbycarrier_regional = {}
totalimportbycarrier_regional = {}
totalexportbycarrier_regional = {}
totaldemandbycarrier_regional = {}
for carr in self.sets.glob_mapping["Carriers_glob"]["Carrier"]:
totalusebycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totalprodbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totalimportbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totalexportbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totaldemandbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
for key in self.sets.Technologies[reg].keys():
for indx, tech in enumerate(self.sets.Technologies[reg][key]):
if (
carr
in self.sets.mapping[reg]["Carrier_input"]
.loc[
self.sets.mapping[reg]["Carrier_input"]["Technology"]
== tech
]["Carrier_in"]
.values
):
if key == "Conversion_plus":
totalusebycarrier_regional[carr] += cp.multiply(
self.variables["usebyTechnology"][reg][key][
:, indx
],
self.sets.data[reg]["carrier_ratio_in"][
(tech, carr)
].values,
)
elif key == "Demand":
totaldemandbycarrier_regional[carr] += self.sets.data[
reg
]["demand"][tech].values
elif key != "Supply":
totalusebycarrier_regional[carr] += self.variables[
"usebyTechnology"
][reg][key][:, indx]
if (
carr
in self.sets.mapping[reg]["Carrier_output"]
.loc[
self.sets.mapping[reg]["Carrier_output"]["Technology"]
== tech
]["Carrier_out"]
.values
):
if key == "Conversion_plus":
totalprodbycarrier_regional[carr] += cp.multiply(
self.variables["productionbyTechnology"][reg][key][
:, indx
],
self.sets.data[reg]["carrier_ratio_out"][
(tech, carr)
].values,
)
else:
totalprodbycarrier_regional[carr] += self.variables[
"productionbyTechnology"
][reg][key][:, indx]
if len(self.sets.regions) > 1:
for key in self.variables["line_import"][reg].keys():
if "{}-{}".format(reg, key) in self.sets.lines_list:
line_eff = (
pd.concat(
[
self.sets.trade_data["line_eff"][
("{}-{}".format(reg, key), carr)
]
]
* len(self.sets.time_steps)
)
.sort_index()
.values
)
elif "{}-{}".format(key, reg) in self.sets.lines_list:
line_eff = (
pd.concat(
[
self.sets.trade_data["line_eff"][
("{}-{}".format(key, reg), carr)
]
]
* len(self.sets.time_steps)
)
.sort_index()
.values
)
totalimportbycarrier_regional[carr] += cp.multiply(
self.variables["line_import"][reg][key][
:,
list(
self.sets.glob_mapping["Carriers_glob"]["Carrier"]
).index(carr),
],
line_eff,
)
totalexportbycarrier_regional[carr] += self.variables[
"line_export"
][reg][key][
:,
list(
self.sets.glob_mapping["Carriers_glob"]["Carrier"]
).index(carr),
]
self.totalusebycarrier[reg] = totalusebycarrier_regional
self.totalprodbycarrier[reg] = totalprodbycarrier_regional
self.totalimportbycarrier[reg] = totalimportbycarrier_regional
self.totalexportbycarrier[reg] = totalexportbycarrier_regional
self.totaldemandbycarrier[reg] = totaldemandbycarrier_regional
def _constr_balance(self):
"""
Ensures the energy balance of each carrier within each region
"""
for reg in self.sets.regions:
for carr in self.sets.glob_mapping["Carriers_glob"]["Carrier"]:
self.totalusebycarrier[reg][carr] = cp.reshape(
self.totalusebycarrier[reg][carr],
self.totalprodbycarrier[reg][carr].shape,
)
self.constr.append(
self.totalprodbycarrier[reg][carr]
+ self.totalimportbycarrier[reg][carr]
- self.totalusebycarrier[reg][carr]
- self.totalexportbycarrier[reg][carr]
- self.totaldemandbycarrier[reg][carr]
== 0
)
def _constr_trade_balance(self):
"""
Ensure sthe trade balance among any pairs of regions before the transmission
loss
"""
for reg in self.sets.regions:
for key in self.variables["line_import"][reg].keys():
self.constr.append(
self.variables["line_import"][reg][key]
- self.variables["line_export"][key][reg]
== 0
)
def _constr_resource_tech_availability(self):
"""
Guarantees the adequecy of total capacity of each technology based on
the technology capacity factor and resource availability
"""
for reg in self.sets.regions:
for key in self.variables["productionbyTechnology"][reg].keys():
if key != "Storage":
for indx, year in enumerate(self.sets.main_years):
self.available_prod = available_resource_prod(
self.totalcapacity[reg][key][indx : indx + 1, :],
self.sets.data[reg]["res_capacity_factor"]
.loc[(year, slice(None)), (key, slice(None))]
.values,
self.timeslice_fraction,
self.sets.data[reg]["annualprod_per_unitcapacity"]
.loc[:, (key, slice(None))]
.values,
)
self.constr.append(
self.available_prod
- self.variables["productionbyTechnology"][reg][key][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
cp.multiply(
cp.sum(self.available_prod, axis=0),
self.sets.data[reg]["tech_capacity_factor"].loc[
year, (key, slice(None))
],
)
- cp.sum(
self.variables["productionbyTechnology"][reg][key][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
],
axis=0,
)
>= 0
)
def _constr_line_availability(self):
"""
Guarantees the adequecy of inter-regional link capacities based on their
capacity factor
"""
for reg in self.sets.regions:
for key, value in self.variables["line_import"][reg].items():
for indx, year in enumerate(self.sets.main_years):
if "{}-{}".format(reg, key) in self.sets.lines_list:
capacity_factor = (
self.sets.trade_data["line_capacity_factor"]
.loc[year, ("{}-{}".format(reg, key), slice(None))]
.values
)
capacity_to_production = (
self.sets.trade_data["annualprod_per_unitcapacity"]
.loc[:, ("{}-{}".format(reg, key), slice(None))]
.values
)
capacity = self.line_totalcapacity["{}-{}".format(reg, key)][
indx : indx + 1, :
]
elif "{}-{}".format(key, reg) in self.sets.lines_list:
capacity_factor = (
self.sets.trade_data["line_capacity_factor"]
.loc[year, ("{}-{}".format(key, reg), slice(None))]
.values
)
capacity_to_production = (
self.sets.trade_data["annualprod_per_unitcapacity"]
.loc[:, ("{}-{}".format(key, reg), slice(None))]
.values
)
capacity = self.line_totalcapacity["{}-{}".format(key, reg)][
indx : indx + 1, :
]
line_import = cp.sum(
value[
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
],
axis=0,
)
line_import = cp.reshape(line_import, capacity_to_production.shape)
capacity_factor.shape = capacity_to_production.shape
self.constr.append(
cp.multiply(
cp.multiply(capacity, capacity_to_production),
self.timeslice_fraction,
)
- value[
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
cp.multiply(
cp.multiply(capacity, capacity_factor),
capacity_to_production,
)
- line_import
>= 0
)
def _constr_totalcapacity_regional(self):
"""
Defines the annual upper and lower limit on the total capacity
of each technology within each region
"""
for reg in self.sets.regions:
for key, value in self.totalcapacity[reg].items():
self.constr.append(
value - self.sets.data[reg]["tech_mintotcap"].loc[:, key].values
>= 0
)
self.constr.append(
value - self.sets.data[reg]["tech_maxtotcap"].loc[:, key] <= 0
)
def _constr_totalcapacity_overall(self):
"""
Defines the annual upper and lower limit on the aggregated total capacity
of each technology over all the regions
"""
self.totalcapacity_overall = _calc_variable_overall(
self.sets.glob_mapping["Technologies_glob"],
self.sets.regions,
self.sets.main_years,
self.sets.Technologies,
self.totalcapacity,
)
for tech, value in self.totalcapacity_overall.items():
self.constr.append(
value - self.sets.global_data["global_mintotcap"].loc[:, tech].values
>= 0
)
self.constr.append(
value - self.sets.global_data["global_maxtotcap"].loc[:, tech].values
<= 0
)
def _constr_totalcapacity_line(self):
"""
Defines the upper and lower limit on the annual total capacity of the
inter-regional links
"""
for key, value in self.line_totalcapacity.items():
self.constr.append(
value <= self.sets.trade_data["line_maxtotcap"][key].values
)
self.constr.append(
value >= self.sets.trade_data["line_mintotcap"][key].values
)
def _constr_newcapacity_regional(self):
"""
Defines the upper and lower limit on the annual new installed capacity
of each technology within each region
"""
for reg in self.sets.regions:
for key, value in self.variables["newcapacity"][reg].items():
self.constr.append(
value >= self.sets.data[reg]["tech_min_newcap"].loc[:, key]
)
self.constr.append(
value <= self.sets.data[reg]["tech_max_newcap"].loc[:, key]
)
def _constr_newcapacity_overall(self):
"""
Defines the upper and lower limit on the aggregated new installed capacity
of each technology over all the regions
"""
self.newcapacity_overall = _calc_variable_overall(
self.sets.glob_mapping["Technologies_glob"],
self.sets.regions,
self.sets.main_years,
self.sets.Technologies,
self.variables["newcapacity"],
)
for tech, value in self.newcapacity_overall.items():
self.constr.append(
value - self.sets.global_data["global_min_newcap"].loc[:, tech] >= 0
)
self.constr.append(
value - self.sets.global_data["global_max_newcap"].loc[:, tech] <= 0
)
def _constr_newcapacity_line(self):
"""
Defines the upper and lower limit on the annual new installed capacity
of the inter-regional links
"""
for key, value in self.variables["newcapaciy"].items():
self.constr.append(value <= self.sets.trade_data["line_max_newcap"][key])
self.constr.append(value >= self.sets.trade_data["line_min_newcap"][key])
def _constr_tech_efficiency(self):
"""
Defines the relationship between the input and output activity of
conversion, transmission and conversion-plus technologies
"""
for reg in self.sets.regions:
for key, value in self.variables["productionbyTechnology"][reg].items():
if key != "Supply" and key != "Storage":
tech_efficiency_reshape = pd.concat(
[self.sets.data[reg]["tech_efficiency"][key]]
* len(self.sets.time_steps)
).sort_index()
self.constr.append(
value
- cp.multiply(
self.variables["usebyTechnology"][reg][key],
tech_efficiency_reshape.values,
)
== 0
)
def _constr_prod_annual(self):
"""
Defines the upper and lower limit for the annual production of the technologies
within each region
"""
for reg in self.sets.regions:
for key, value in self.variables["productionbyTechnology"][reg].items():
production_annual = annual_activity(
value, self.sets.main_years, self.sets.time_steps,
)
if key != "Transmission" and key != "Storage":
self.constr.append(
production_annual
- self.sets.data[reg]["tech_max_production"].loc[
:, (key, slice(None))
]
<= 0
)
self.constr.append(
production_annual
- self.sets.data[reg]["tech_min_production"].loc[
:, (key, slice(None))
]
>= 0
)
def _constr_prod(self):
"""
Defines the upper and lower limit for the hourly production of the technologies
within each region
"""
for reg in self.sets.regions:
for key, value in self.variables["productionbyTechnology"][reg].items():
if key != "Transmission" and key != "Storage":
self.constr.append(
value
- self.sets.data[reg]["tech_max_production_h"].loc[
:, (key, slice(None))
]
<= 0
)
self.constr.append(
value
- self.sets.data[reg]["tech_min_production_h"].loc[
:, (key, slice(None))
]
>= 0
)
def _constr_prod_annual_overall(self):
"""
Defines the upper and lower limit for the aggregated annual production
of the technologies over all the regions
"""
self.production_overall = _calc_production_overall(
self.sets.glob_mapping["Technologies_glob"],
self.sets.regions,
self.sets.main_years,
self.sets.Technologies,
self.production_annual,
)
for tech, value in self.production_overall.items():
self.constr.append(
value - self.sets.global_data["global_min_production"].loc[:, tech] >= 0
)
self.constr.append(
value - self.sets.global_data["global_max_production"].loc[:, tech] <= 0
)
def _constr_emission_cap(self):
"""
Defines the CO2 emission cap within each region and over all the regions
"""
self.regional_emission = {}
self.global_emission = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps), 1)
)
for reg in self.sets.regions:
self.regional_emission[reg] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps), 1)
)
for key, value in self.CO2_equivalent[reg].items():
self.regional_emission[reg] += cp.sum(value, axis=1)
emission_cap = self.sets.data[reg]["emission_cap_annual"].values
emission_cap.shape = self.regional_emission[reg].shape
self.global_emission += self.regional_emission[reg]
self.constr.append(emission_cap - self.regional_emission[reg] >= 0)
if len(self.sets.regions) > 1:
global_emission_cap = self.sets.global_data[
"global_emission_cap_annual"
].values
global_emission_cap.shape = self.global_emission.shape
self.constr.append(global_emission_cap - self.global_emission >= 0)
def _constr_storage_max_min_charge(self):
"""
Defines the maximum and minumum alllowed storage state of charge in each
timestep of the year based on the total nominal capacity and the minimum
state of charge factor
"""
for reg in get_regions_with_storage(self.sets):
for indx, year in enumerate(self.sets.main_years):
self.constr.append(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :]
- self.storage_SOC[reg][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
self.storage_SOC[reg][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
- cp.multiply(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :],
self.sets.data[reg]["storage_min_SOC"].values[
indx : indx + 1, :
],
)
>= 0
)
def _constr_storage_max_flow_in_out(self):
"""
Defines the maximum and minimum allowed storage inflow and outflow in each
hour of the year based on the total capacity, the capacity factor and
the storage charge and discharge time
"""
for reg in get_regions_with_storage(self.sets):
for indx, year in enumerate(self.sets.main_years):
max_storage_flow_in = storage_max_flow(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :],
self.sets.data[reg]["storage_charge_time"].values,
self.sets.data[reg]["tech_capacity_factor"]["Storage"].values[
indx : indx + 1, :
],
self.timeslice_fraction,
)
max_storage_flow_out = storage_max_flow(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :],
self.sets.data[reg]["storage_discharge_time"].values,
self.sets.data[reg]["tech_capacity_factor"]["Storage"].values[
indx : indx + 1, :
],
self.timeslice_fraction,
)
self.constr.append(
max_storage_flow_in
- self.variables["usebyTechnology"][reg]["Storage"][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
max_storage_flow_out
- self.variables["productionbyTechnology"][reg]["Storage"][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
def _set_regional_objective_planning(self):
"""
Calculates the regional objective function in the planning mode
"""
self.totalcost_allregions = np.zeros((len(self.sets.main_years), 1))
self.inv_allregions = 0
years = -1 * np.arange(len(self.sets.main_years))
for reg in self.sets.regions:
totalcost_regional = np.zeros((len(self.sets.main_years), 1))
for ctgry in self.sets.Technologies[reg].keys():
if ctgry != "Demand":
totalcost_regional += cp.sum(
self.cost_inv_tax[reg][ctgry]
- self.cost_inv_sub[reg][ctgry]
+ self.cost_fix[reg][ctgry]
+ self.cost_fix_tax[reg][ctgry]
- self.cost_fix_sub[reg][ctgry]
+ self.cost_variable[reg][ctgry]
+ self.cost_decom[reg][ctgry]
- self.salvage_inv[reg][ctgry],
axis=1,
)
self.inv_allregions += self.cost_inv_fvalue[reg][ctgry]
if ctgry != "Transmission" and ctgry != "Storage":
totalcost_regional += cp.sum(
self.emission_cost[reg][ctgry], axis=1
)
discount_factor = (
1 + self.sets.data[reg]["discount_rate"]["Annual Discount Rate"].values
)
totalcost_regional_discounted = cp.multiply(
totalcost_regional, np.power(discount_factor, years)
)
self.totalcost_allregions += totalcost_regional_discounted
def _set_regional_objective_operation(self):
"""
Calculates the regional objective function in the operation mode
"""
self.totalcost_allregions = 0
for reg in self.sets.regions:
totalcost_regional = 0
for ctgry in self.sets.Technologies[reg].keys():
if ctgry != "Demand":
totalcost_regional += cp.sum(
self.cost_fix[reg][ctgry]
+ self.cost_fix_tax[reg][ctgry]
- self.cost_fix_sub[reg][ctgry]
+ self.cost_variable[reg][ctgry]
)
if ctgry != "Transmission" and ctgry != "Storage":
totalcost_regional += cp.sum(
self.emission_cost[reg][ctgry], axis=1
)
self.totalcost_allregions += totalcost_regional
def _set_lines_objective_planning(self):
"""
Calculates the objective function of the inter-regional links in the
planning mode
"""
years = -1 * np.arange(len(self.sets.main_years))
self.totalcost_lines = np.zeros((len(self.sets.main_years), 1))
for line in self.sets.lines_list:
self.totalcost_lines += cp.sum(
self.cost_inv_line[line]
+ self.cost_fix_line[line]
+ self.cost_decom_line[line],
axis=1,
)
for reg in self.sets.regions:
for key, value in self.cost_variable_line[reg].items():
self.totalcost_lines += cp.sum(value, axis=1)
discount_factor_global = (
1
+ self.sets.global_data["global_discount_rate"][
"Annual Discount Rate"
].values
)
self.totalcost_lines_discounted = cp.multiply(
self.totalcost_lines, np.power(discount_factor_global, years)
)
def _set_lines_objective_operation(self):
"""
Calculates the objective function of the inter-regional links in the
operation mode
"""
self.totalcost_lines = np.zeros((len(self.sets.main_years), 1))
for line in self.sets.lines_list:
self.totalcost_lines += cp.sum(self.cost_fix_line[line], axis=1)
for reg in self.sets.regions:
for key, value in self.cost_variable_line[reg].items():
self.totalcost_lines += cp.sum(value, axis=1)
def _set_final_objective_singlenode(self):
"""
Calculates the overall objective function in a single-node model
"""
if self.sets.mode == "Planning":
self.global_objective = (
cp.sum(self.totalcost_allregions) + self.inv_allregions
)
elif self.sets.mode == "Operation":
self.global_objective = self.totalcost_allregions
def _set_final_objective_multinode(self):
"""
Calculates the overall objective function as the summation of all the
regional and inter-regional links objective functions in a multi-node
model
"""
if self.sets.mode == "Planning":
self.global_objective = (
cp.sum(self.totalcost_lines_discounted + self.totalcost_allregions)
+ self.inv_allregions
)
elif self.sets.mode == "Operation":
self.global_objective = self.totalcost_allregions + self.totalcost_lines
| StarcoderdataPython |
1697322 | <reponame>dasyad00/talking-color
from .camera import Camera
from .pi import PiCamera
from .webcam import Webcam
__all__ = ['Webcam', 'Camera', 'PiCamera']
| StarcoderdataPython |
1655622 | <filename>back/bookclub/db/queries/books.py
GET_ALL_BOOKS = """
SELECT b.title, b.author, b.slug, g.name as "genre"
FROM books b
INNER JOIN genres g on b.genre_id = g.id
"""
INSERT_BOOK = """
INSERT INTO books (title, author, slug, genre_id)
SELECT :title, :author, :slug, id FROM genres WHERE name = :genre
"""
GET_BOOK_BY_SLUG = """
SELECT b.title, b.author, b.slug, g.name as "genre"
FROM books b
INNER JOIN genres g on b.genre_id = g.id
WHERE b.slug = :slug
"""
GET_BOOK_CHOICES_BY_CLUB_ID = """
SELECT b.title, b.author, g.name as "genre", b.slug, m.username, bc.month, bc.year
FROM book_choices bc
INNER JOIN books b on b.id=bc.book_id
INNER JOIN members m on m.id=bc.member_id
INNER JOIN genres g on b.genre_id=g.id
WHERE bc.club_id = :club_id
ORDER BY bc.year DESC, bc.month DESC
"""
| StarcoderdataPython |
3217681 | <filename>Plot/text2files.py
#!../../anaconda2/bin/python
import pickle
txtList = pickle.load(open("cessation_submissionTXT_2013-2018.p","rb"))
print(txtList[0])
for i in range(len(txtList)):
fp = open('cessation/'+str(i)+'.txt','w')
fp.write(txtList[i].encode('utf-8'))
fp.close()
| StarcoderdataPython |
4802819 | from stp_core.common.log import getlogger
from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies
from plenum.test.node_catchup.helper import waitNodeDataEquality, \
waitNodeDataInequality, checkNodeDataForEquality
from plenum.test.pool_transactions.helper import \
disconnect_node_and_ensure_disconnected, reconnect_node_and_ensure_connected
# Do not remove the next import
from plenum.test.node_catchup.conftest import whitelist
logger = getlogger()
txnCount = 5
# TODO: Refactor tests to minimize module-scoped fixtures.They make tests
# depend on each other
def testNodeCatchupAfterDisconnect(newNodeCaughtUp, txnPoolNodeSet,
nodeSetWithNodeAddedAfterSomeTxns):
"""
A node that disconnects after some transactions should eventually get the
transactions which happened while it was disconnected
:return:
"""
looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
logger.debug("Stopping node {} with pool ledger size {}".
format(newNode, newNode.poolManager.txnSeqNo))
disconnect_node_and_ensure_disconnected(
looper, txnPoolNodeSet, newNode, stopNode=False)
# TODO: Check if the node has really stopped processing requests?
logger.debug("Sending requests")
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
# Make sure new node got out of sync
waitNodeDataInequality(looper, newNode, *txnPoolNodeSet[:-1])
logger.debug("Starting the stopped node, {}".format(newNode))
reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, newNode)
logger.debug("Waiting for the node to catch up, {}".format(newNode))
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])
logger.debug("Sending more requests")
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
checkNodeDataForEquality(newNode, *txnPoolNodeSet[:-1])
| StarcoderdataPython |
3329422 | <reponame>williamshen-nz/predicators
"""An approach that learns predicates from a teacher."""
from typing import Set, List, Optional, Tuple, Callable, Sequence
import dill as pkl
import numpy as np
from gym.spaces import Box
from predicators.src import utils
from predicators.src.approaches import NSRTLearningApproach, \
ApproachTimeout, ApproachFailure, RandomOptionsApproach
from predicators.src.structs import State, Predicate, ParameterizedOption, \
Type, Task, Dataset, GroundAtom, LowLevelTrajectory, InteractionRequest, \
InteractionResult, Action, GroundAtomsHoldQuery, GroundAtomsHoldResponse, \
Query
from predicators.src.torch_models import LearnedPredicateClassifier, \
MLPClassifier
from predicators.src.settings import CFG
class InteractiveLearningApproach(NSRTLearningApproach):
"""An approach that learns predicates from a teacher."""
def __init__(self, initial_predicates: Set[Predicate],
initial_options: Set[ParameterizedOption], types: Set[Type],
action_space: Box, train_tasks: List[Task]) -> None:
super().__init__(initial_predicates, initial_options, types,
action_space, train_tasks)
# Track score of best atom seen so far.
self._best_score = -np.inf
# Initialize things that will be set correctly in offline learning.
self._dataset = Dataset([], [])
self._predicates_to_learn: Set[Predicate] = set()
self._online_learning_cycle = 0
def _get_current_predicates(self) -> Set[Predicate]:
return self._initial_predicates | self._predicates_to_learn
######################## Semi-supervised learning #########################
def learn_from_offline_dataset(self, dataset: Dataset) -> None:
# Special case: empty offline dataset. Annotations may be None.
if not dataset.trajectories:
return
# First, go through the dataset's annotations and figure out the
# set of predicates to learn. Note that their classifiers were
# stripped away during the creation of the annotations.
for ground_atom_traj in dataset.annotations:
for ground_atom_sets in ground_atom_traj:
assert len(ground_atom_sets) == 2
for atom in ground_atom_sets[0] | ground_atom_sets[1]:
assert atom.predicate not in self._initial_predicates
self._predicates_to_learn.add(atom.predicate)
self._dataset = Dataset(dataset.trajectories, dataset.annotations)
# Learn predicates and NSRTs.
self._relearn_predicates_and_nsrts(online_learning_cycle=None)
def load(self, online_learning_cycle: Optional[int]) -> None:
super().load(online_learning_cycle)
save_path = utils.get_approach_save_path_str()
with open(f"{save_path}_{online_learning_cycle}.DATA", "rb") as f:
save_dict = pkl.load(f)
self._dataset = save_dict["dataset"]
self._predicates_to_learn = save_dict["predicates_to_learn"]
self._best_score = save_dict["best_score"]
def _relearn_predicates_and_nsrts(
self, online_learning_cycle: Optional[int]) -> None:
"""Learns predicates and NSRTs in a semi-supervised fashion."""
print("\nRelearning predicates and NSRTs...")
# Learn predicates
for pred in self._predicates_to_learn:
input_examples = []
output_examples = []
for (traj, traj_annotations) in zip(self._dataset.trajectories,
self._dataset.annotations):
assert len(traj.states) == len(traj_annotations)
for (state, state_annotation) in zip(traj.states,
traj_annotations):
assert len(state_annotation) == 2
for target_class, examples in enumerate(state_annotation):
for atom in examples:
if not atom.predicate == pred:
continue
x = state.vec(atom.objects)
input_examples.append(x)
output_examples.append(target_class)
num_positives = sum(y == 1 for y in output_examples)
num_negatives = sum(y == 0 for y in output_examples)
assert num_positives + num_negatives == len(output_examples)
print(f"Generated {num_positives} positive and "
f"{num_negatives} negative examples for "
f"predicate {pred}")
# Train MLP
X = np.array(input_examples)
Y = np.array(output_examples)
model = MLPClassifier(X.shape[1],
CFG.predicate_mlp_classifier_max_itr)
model.fit(X, Y)
# Construct classifier function, create new Predicate, and save it
classifier = LearnedPredicateClassifier(model).classifier
new_pred = Predicate(pred.name, pred.types, classifier)
self._predicates_to_learn = \
(self._predicates_to_learn - {pred}) | {new_pred}
# Learn NSRTs via superclass
self._learn_nsrts(self._dataset.trajectories, online_learning_cycle)
# Save the things we need other than the NSRTs, which were already
# saved in the above call to self._learn_nsrts()
save_path = utils.get_approach_save_path_str()
with open(f"{save_path}_{online_learning_cycle}.DATA", "wb") as f:
pkl.dump(
{
"dataset": self._dataset,
"predicates_to_learn": self._predicates_to_learn,
"best_score": self._best_score,
}, f)
########################### Active learning ###############################
def get_interaction_requests(self) -> List[InteractionRequest]:
# We will create a single interaction request.
# Determine the train task that we will be using.
train_task_idx = self._select_interaction_train_task_idx()
# Determine the action policy and termination function.
act_policy, termination_function = \
self._create_interaction_action_strategy(train_task_idx)
# Determine the query policy.
query_policy = self._create_interaction_query_policy(train_task_idx)
return [
InteractionRequest(train_task_idx, act_policy, query_policy,
termination_function)
]
def _score_atom_set(self, atom_set: Set[GroundAtom],
state: State) -> float:
"""Score an atom set based on how much we would like to know the values
of all the atoms in the set in the given state.
Higher scores are better.
"""
del state # not currently used, but will be by future score functions
if CFG.interactive_score_function == "frequency":
return self._score_atom_set_frequency(atom_set)
if CFG.interactive_score_function == "trivial":
return 0.0 # always return the same score
raise NotImplementedError("Unrecognized interactive_score_function:"
f" {CFG.interactive_score_function}.")
def _select_interaction_train_task_idx(self) -> int:
# At the moment, we only have one way to select a train task idx:
# choose one uniformly at random. In the future, we may want to
# try other strategies. But one nice thing about random selection
# is that we're not making a hard commitment to the agent having
# control over which train task it gets to use.
return self._rng.choice(len(self._train_tasks))
def _create_interaction_action_strategy(
self, train_task_idx: int
) -> Tuple[Callable[[State], Action], Callable[[State], bool]]:
"""Returns an action policy and a termination function."""
if CFG.interactive_action_strategy == "glib":
return self._create_glib_interaction_strategy(train_task_idx)
if CFG.interactive_action_strategy == "random":
return self._create_random_interaction_strategy(train_task_idx)
raise NotImplementedError("Unrecognized interactive_action_strategy:"
f" {CFG.interactive_action_strategy}")
def _create_interaction_query_policy(
self, train_task_idx: int) -> Callable[[State], Optional[Query]]:
"""Returns a query policy."""
del train_task_idx # unused right now, but future policies may use
if CFG.interactive_query_policy == "strict_best_seen":
return self._create_best_seen_query_policy(strict=True)
if CFG.interactive_query_policy == "nonstrict_best_seen":
return self._create_best_seen_query_policy(strict=False)
raise NotImplementedError("Unrecognized interactive_query_policy:"
f" {CFG.interactive_query_policy}")
def _create_glib_interaction_strategy(
self, train_task_idx: int
) -> Tuple[Callable[[State], Action], Callable[[State], bool]]:
"""Find the most interesting reachable ground goal and plan to it."""
init = self._train_tasks[train_task_idx].init
# Detect and filter out static predicates.
static_preds = utils.get_static_preds(self._nsrts,
self._predicates_to_learn)
preds = self._predicates_to_learn - static_preds
# Sample possible goals to plan toward.
ground_atom_universe = utils.all_possible_ground_atoms(init, preds)
# If there are no possible goals, fall back to random immediately.
if not ground_atom_universe:
print("No possible goals, falling back to random")
return self._create_random_interaction_strategy(train_task_idx)
possible_goals = utils.sample_subsets(
ground_atom_universe,
num_samples=CFG.interactive_num_babbles,
min_set_size=1,
max_set_size=CFG.interactive_max_num_atoms_babbled,
rng=self._rng)
# Sort the possible goals based on how interesting they are.
# Note: we're using _score_atom_set_frequency here instead of
# _score_atom_set because _score_atom_set in general could depend
# on the current state. While babbling goals, we don't have any
# current state because we don't know what the state will be if and
# when we get to the goal.
goal_list = sorted(possible_goals,
key=self._score_atom_set_frequency,
reverse=True) # largest to smallest
task_list = [Task(init, goal) for goal in goal_list]
try:
task, act_policy = self._find_first_solvable(task_list)
except ApproachFailure:
# Fall back to a random exploration strategy if no solvable task
# can be found.
print("No solvable task found, falling back to random")
return self._create_random_interaction_strategy(train_task_idx)
assert task.init is init
def _termination_function(s: State) -> bool:
# Stop the episode if we reach the goal that we babbled.
return all(goal_atom.holds(s) for goal_atom in task.goal)
return act_policy, _termination_function
def _create_random_interaction_strategy(
self, train_task_idx: int
) -> Tuple[Callable[[State], Action], Callable[[State], bool]]:
"""Sample and execute random initiable options until timeout."""
random_options_approach = RandomOptionsApproach(
self._get_current_predicates(), self._initial_options, self._types,
self._action_space, self._train_tasks)
task = self._train_tasks[train_task_idx]
act_policy = random_options_approach.solve(task, CFG.timeout)
def _termination_function(s: State) -> bool:
# Termination is left to the environment, as in
# CFG.max_num_steps_interaction_request.
del s # not used
return False
return act_policy, _termination_function
def _create_best_seen_query_policy(
self, strict: bool) -> Callable[[State], Optional[Query]]:
"""Only query if the atom has the best score seen so far."""
def _query_policy(s: State) -> Optional[GroundAtomsHoldQuery]:
# Decide whether to ask about each possible atom.
ground_atoms = utils.all_possible_ground_atoms(
s, self._predicates_to_learn)
atoms_to_query = set()
for atom in ground_atoms:
score = self._score_atom_set({atom}, s)
# Ask about this atom if it is the best seen so far.
if (strict and score > self._best_score) or \
(not strict and score >= self._best_score):
atoms_to_query.add(atom)
self._best_score = score
return GroundAtomsHoldQuery(atoms_to_query)
return _query_policy
def learn_from_interaction_results(
self, results: Sequence[InteractionResult]) -> None:
assert len(results) == 1
result = results[0]
for state, response in zip(result.states, result.responses):
assert isinstance(response, GroundAtomsHoldResponse)
state_annotation: List[Set[GroundAtom]] = [set(), set()]
for query_atom, atom_holds in response.holds.items():
state_annotation[atom_holds].add(query_atom)
traj = LowLevelTrajectory([state], [])
self._dataset.append(traj, [state_annotation])
self._relearn_predicates_and_nsrts(
online_learning_cycle=self._online_learning_cycle)
self._online_learning_cycle += 1
def _find_first_solvable(
self,
task_list: List[Task]) -> Tuple[Task, Callable[[State], Action]]:
for task in task_list:
try:
print("Solving for policy...")
policy = self.solve(task, timeout=CFG.timeout)
return task, policy
except (ApproachTimeout, ApproachFailure) as e:
print(f"Approach failed to solve with error: {e}")
continue
raise ApproachFailure("Failed to sample a task that approach "
"can solve.")
def _score_atom_set_frequency(self, atom_set: Set[GroundAtom]) -> float:
"""Score an atom set as inversely proportional to the number of
examples seen during training."""
count = 1 # Avoid division by 0
for ground_atom_traj in self._dataset.annotations:
for ground_atom_sets in ground_atom_traj:
assert len(ground_atom_sets) == 2
_, pos_examples = ground_atom_sets
count += 1 if atom_set.issubset(pos_examples) else 0
return 1.0 / count
| StarcoderdataPython |
1742321 | <gh_stars>1-10
import os.path
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.models import load_model
from data import *
from seq2seq_tool_wear.model import *
import numpy as np
import matplotlib.pyplot as plt
model = None
INPUT_NUMBER = 2
OUTPUT_NUMBER = 5
# ---- PREPARATION ----
# ---- need func --
def get_loss_value(real,pred):
from sklearn.metrics import mean_squared_error,mean_absolute_error
cnt = 0
for i in pred:
if i == None:
cnt += 1
else:
break
post_prex = 0
for i in range(len(pred)):
if pred[-i] == None:
post_prex += 1
else:
break
length = min(len(pred)-post_prex,len(real))
print(cnt,length)
return mean_squared_error(real[cnt:length],pred[cnt:length]),mean_absolute_error(real[cnt:length],pred[cnt:length])
def plot_curve(start_point,model,predict_line_obj,direct_knife=2):
# LOOP TO PREDICT THE WHOLE CURVE
print("Draw for",start_point)
for knife_number in range(direct_knife-1,direct_knife):
knife_data = tool_wear_data[knife_number * 315:(knife_number + 1) * 315]
# print("Current Knife shape:", knife_data.shape)
START_POINT = start_point
next = model.predict(knife_data[START_POINT:START_POINT + INPUT_NUMBER].reshape(1, INPUT_NUMBER, 1)).reshape(OUTPUT_NUMBER)
# life book
life_total_data = [None for index in range(800)]
cnt_total_data = [0 for index in range(800)]
life_total_data[START_POINT:START_POINT + INPUT_NUMBER] = knife_data[START_POINT:START_POINT + INPUT_NUMBER]
for _ in range(INPUT_NUMBER):
cnt_total_data[START_POINT+_] += 1
previous = next
for every_start in range(knife_data.shape[0]):
# predicted = model.predict(knife_data[every_start:every_start+2].reshape(1,2,1)).reshape(5)
previous = np.array(life_total_data[every_start + START_POINT:every_start + START_POINT + INPUT_NUMBER])
# print(previous)
next = model.predict(previous.reshape(1, INPUT_NUMBER, 1)).reshape(OUTPUT_NUMBER)
for next_cur in range(OUTPUT_NUMBER):
if life_total_data[every_start + START_POINT + INPUT_NUMBER + next_cur] == None:
# print("DIRECTLY GIVEN")
life_total_data[every_start + START_POINT + next_cur + INPUT_NUMBER] = next[next_cur]
else:
# print(life_total_data[every_start + START_POINT + next_cur + 2],
# cnt_total_data[every_start + START_POINT + 2 + next_cur])
life_total_data[every_start + START_POINT + next_cur + INPUT_NUMBER] = \
(next[next_cur] + life_total_data[every_start + START_POINT + next_cur + INPUT_NUMBER] * cnt_total_data[
every_start + START_POINT + INPUT_NUMBER + next_cur]) \
/ (cnt_total_data[every_start + START_POINT + INPUT_NUMBER + next_cur] + 1)
cnt_total_data[every_start + START_POINT + INPUT_NUMBER + next_cur] += 1
# plt.plot(knife_data, label="REAL")
predict_line_obj.set_data([index for index in range(800)],life_total_data)
plt.legend()
# plt.savefig("../res/PURE_c%s.svg" % (knife_number + 1))
# plt.show()
# ---- CONF ------
LOG_DIR = "MAX_KERAS_ROI_LOG/"
PREDICT = True
# ---- GEN Data ----
data = RNNSeriesDataSet(INPUT_NUMBER,OUTPUT_NUMBER)
# x,y = data.get_rnn_data()
x,y,test_x,test_y = data.get_separate_rnn_data()
# ---- shuffle -----
import random
# set random seeds so that we can get the same random data!
SEED = 12347
random.seed(SEED)
index = [i for i in range(len(y))]
random.shuffle(index)
train_y = y[index]
train_x = x[index]
print("Size :",train_x.shape,train_y.shape,test_x.shape,test_y.shape)
for DEPTH in [5]:
HIDDEN_DIM = 128
TRAIN_NAME = "Simple_%s_%s_Separate_RNN_Depth_%s_hidden_dim_%s" % (INPUT_NUMBER,OUTPUT_NUMBER,DEPTH,HIDDEN_DIM)
MODEL_NAME = "%s.kerasmodel" % (TRAIN_NAME)
MODEL_WEIGHT_NAME = "%s.kerasweight" % (TRAIN_NAME)
MODEL_CHECK_PT = "%s.kerascheckpts" % (TRAIN_NAME)
# model = build_model(1, 2, HIDDEN_DIM, 3, 1, DEPTH)
model = build_simple_RNN((INPUT_NUMBER,1),OUTPUT_NUMBER,1)
print(model.summary())
print("Model has been built.")
if not PREDICT:
print("In [TRAIN] mode")
# CALLBACK
tb_cb = TensorBoard(log_dir=LOG_DIR + TRAIN_NAME)
ckp_cb = ModelCheckpoint(MODEL_CHECK_PT, monitor='val_loss', save_weights_only=True, verbose=1,
save_best_only=True, period=5)
if os.path.exists(MODEL_CHECK_PT):
model.load_weights(MODEL_CHECK_PT)
print("load checkpoint successfully")
else:
print("No checkpoints found !")
print("Start to train the model")
model.fit(train_x,train_y,batch_size=16,epochs=5000,callbacks=[tb_cb,ckp_cb],validation_data=(test_x,test_y),shuffle=True)
model.model.save(MODEL_NAME)
model.save_weights(MODEL_WEIGHT_NAME)
else:
if os.path.exists(MODEL_CHECK_PT):
model.load_weights(MODEL_CHECK_PT)
print("load checkpoint successfully")
else:
print("No checkpoints found! try to load model directly")
model = load_model(MODEL_NAME)
a = CNNMonitoredDataSet(INPUT_NUMBER,OUTPUT_NUMBER)
tool_wear_data = a.cnn_max_predict_wear
real_tool_wear_data = data.max_tool_wear_data
for i in range(3):
print("LOSS:",i,get_loss_value(real_tool_wear_data[i*315:(i+1)*315],tool_wear_data[i*315:(i+1)*315]))
for knife_number in range(3):
knife_data = tool_wear_data[knife_number*315:(knife_number+1)*315]
real_knife_data = real_tool_wear_data[knife_number * 315:(knife_number + 1) * 315]
print("Current Knife shape:",knife_data.shape)
first_list = [None,None,]
second_list = [None,None,None]
third_list = [None,None,None,None]
fourth_list = [None for _ in range(5)]
fifth_list = [None for _ in range(6)]
fig = plt.figure()
for every_start in range(knife_data.shape[0]-5):
predicted = model.predict(knife_data[every_start:every_start+INPUT_NUMBER].reshape(1,INPUT_NUMBER,1)).reshape(OUTPUT_NUMBER)
first_list.append(predicted[0])
second_list.append(predicted[1])
third_list.append(predicted[2])
fourth_list.append(predicted[3])
fifth_list.append(predicted[4])
plt.plot(real_knife_data,label="REAL")
plt.scatter([i for i in range(len(first_list))], first_list, label="1st forecast value", s=2, marker="x")
plt.scatter([i for i in range(len(second_list))], second_list, label="2nd forecast value", s=2, marker="o")
plt.scatter([i for i in range(len(third_list))], third_list, label="3rd forecast value", s=2, marker="v")
plt.scatter([i for i in range(len(fourth_list))], fourth_list, label="4th forecast value", s=2, marker=",")
plt.scatter([i for i in range(len(fifth_list))], fifth_list, label="5th forecast value", s=2, marker=".")
plt.legend()
# calculate MSE
print(get_loss_value(knife_data, first_list))
print(get_loss_value(knife_data, second_list))
print(get_loss_value(knife_data, third_list))
print(get_loss_value(knife_data, fourth_list))
print(get_loss_value(knife_data, fifth_list))
plt.xlabel("Run")
plt.ylabel("Tool wear ($\mu m$)")
plt.savefig("../res/CNN_short_invovled_c%s.pdf"%(knife_number+1))
plt.show()
# from matplotlib import pyplot as plt
# import numpy as np
# import matplotlib.animation as animation
#
# fig, ax = plt.subplots()
# line, = ax.plot(tool_wear_data[315*1:315*2],label="real")
# predict_line_obj, = ax.plot(tool_wear_data[315:315*2],label="RNN_curve")
#
# # plot_curve(model,15)
#
# ani = animation.FuncAnimation(fig, plot_curve, frames=300, fargs=(model, predict_line_obj))
# plt.legend()
# ani.save('LONG_TERM_cut_2.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
# plt.show()
| StarcoderdataPython |
3226920 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 3 11:36:51 2019
@author: <NAME>
"""
#brute force
#on the report i will also tell that we can achieve the result of this brute force by searching in B only
import numpy as np
from itertools import combinations
q=0.7 #default failure probability per component
p=1-q #default success probability per component
components=np.full(100,p)#initialize component with default probability
components_failure=np.full(100,q)
def series1_40():
print("1-40 Series")
prob_series=1
for x in range(40):
print(x)
prob_series=prob_series*(1-components_failure[x])
return prob_series;
def parallel41_50():
print("41-50 Parallel")
prob_parallelfail=1
for x in range(40,50):
print(x)
prob_parallelfail=prob_parallelfail*components_failure[x]
prob_parallel=1-prob_parallelfail
return prob_parallel;
def series51_80():
print("51-80 Series")
prob_series=1
for x in range(50,80):
print(x)
prob_series=prob_series*(1-components_failure[x])
return prob_series;
def parallel81_100():
print("81-100 Parallel")
prob_parallelfail=1
for x in range(80,100):
print(x)
prob_parallelfail=prob_parallelfail*components_failure[x]
prob_parallel=1-prob_parallelfail
return prob_parallel;
def system_operational(components_failure):
p_a=series1_40()*parallel41_50() #condition A
p_b=series51_80()*parallel81_100() #condition B
p_system=1-((1-p_a)*(1-p_b))
return p_a,p_b,p_system;
print(system_operational(components_failure)) | StarcoderdataPython |
3317345 | """Code for recognising disk drives."""
from .structs import DiskInfo, DiskType, DiskUUID
from .type_calculator import DiskTypeCalculator
__all__ = [
"DiskInfo",
"DiskType",
"DiskTypeCalculator",
"DiskUUID",
]
| StarcoderdataPython |
1615023 | <filename>Verlet_IC_EMS.py
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 24 22:29:44 2016
@author: Admin
"""
from __future__ import division
import numpy as np
AU = 149597871000
Ms = 1.989e30
Me = 5.972e24
Mm = 7.342e22
"Defining Variables"
N = 2
t_max = 3.1556e7; t = 0
dt_max = t_max/5000
v = (2*np.pi*AU)/t_max
m = 384399000; vm = v + 1022
mass = np.array([Ms,Me])
pos = np.zeros((N,3))
vel = np.zeros((N,3))
pos[1] = np.array([0,AU,0])
#pos[2] = np.array([0,AU + m,0])
vel[1] = np.array([v,0,0])
#vel[2] = np.array([vm,0,0])
e = 0.0005*AU; n = 0.1
a = []; Ta = []
b = []; Tb = []
c = []; Tc = []
Tsum = []
T = []; dT = [] | StarcoderdataPython |
3370298 | from dataclasses import dataclass
from .t_event_definition import TEventDefinition
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TTerminateEventDefinition(TEventDefinition):
class Meta:
name = "tTerminateEventDefinition"
| StarcoderdataPython |
1710006 | <reponame>ndalsanto/pyorb<filename>pyorb_core/pde_problem/fom_problem.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 12:02:21 2018
@author: <NAME>
@email : <EMAIL>
"""
import pyorb_core.error_manager as em
import numpy as np
import pyorb_core.algebraic_utils as alg_ut
def default_theta_function( _param, _q ):
em.error_raiser( 'SystemError', 'default_theta_function', "You are using the default theta function, please provide specific ones for your problem " )
pass
def default_full_theta_function( _param ):
em.error_raiser( 'SystemError', 'default_full_theta_function', "You are using the default full theta function, please provide specific ones for your problem " )
pass
class fom_problem( ):
def __init__( self, _parameter_handler, _external_engine = None, _fom_specifics = None ):
if _external_engine is not None and _fom_specifics is not None:
self.configure_fom( _external_engine, _fom_specifics )
self.define_theta_functions( )
self.M_parameter_handler = _parameter_handler
return
def get_theta_a( self, _param, _q ):
return self.M_theta_a( _param, _q )
def get_theta_f( self, _param, _q ):
return self.M_theta_f( _param, _q )
def get_full_theta_a( self, _param ):
return self.M_full_theta_a( _param )
def get_full_theta_f( self, _param ):
return self.M_full_theta_f( _param )
def define_theta_functions( self ):
em.error_raiser( 'SystemError', 'fom_problem::define_theta_functions', "You should define the theta function specific for your problem in the inherited class." )
return
# initialize anything which needs to be specified for using the external engine
def configure_fom( self, _external_engine, _fom_specifics ):
self.M_external_engine = _external_engine
self.set_fom_specifics( _fom_specifics )
self.M_external_engine.initialize_fom_simulation( _fom_specifics )
self.M_configured_fom = True
self.assemble_fom_natural_norm_matrix( self.M_fom_specifics )
return
def assemble_fom_natural_norm_matrix( self, _fom_specifics ):
self.check_configured_fom( )
self.M_natural_norm_matrix = self.M_external_engine.assemble_fom_natural_norm_matrix( self.M_fom_specifics )
def set_fom_specifics( self, _fom_specifics ):
self.M_fom_specifics = _fom_specifics
return
def update_fom_specifics( self, _fom_specifics_update ):
# self.M_fom_specifics.update( _fom_specifics_update )
print( "Updating the fom specifics dictionary" )
for key in _fom_specifics_update:
self.M_fom_specifics[key] = _fom_specifics_update[key]
return
def clear_fom_specifics( self, _fom_specifics_update ):
print( "Clearing the fom specifics dictionary" )
for key in _fom_specifics_update:
self.M_fom_specifics.pop( key )
return
def check_configured_fom( self ):
if self.M_configured_fom == False:
em.error_raiser( 'SystemError', 'fom_problem::retrieve_fom_data', "The fom problem has not been configured." )
return
# def compute_natural_norm( self, _solution ):
# self.check_configured_fom( )
# sol = self.M_external_engine.compute_natural_norm( _solution, self.M_fom_specifics )
#
# return sol
def solve_fom_problem( self, _param ):
self.check_configured_fom( )
sol = self.M_external_engine.solve_parameter( _param, self.M_fom_specifics )
return sol
def compute_fom_product( self, _basis, _q, _operator ):
print( "Performing compute_fom_product" )
product = self.M_external_engine.build_rb_affine_component( _basis, _q, _operator, self.M_fom_specifics )
return product.array
def retrieve_fom_affine_components( self, _operator, _num_affine_components ):
self.check_configured_fom( )
return self.M_external_engine.build_fom_affine_components( _operator, _num_affine_components, self.M_fom_specifics )
def retrieve_rb_affine_components( self, _operator ):
self.check_configured_fom( )
return self.M_external_engine.build_rb_affine_components( _operator, self.M_fom_specifics )
def assemble_fom_matrix( self, _param, _elements=[], _indices=[] ):
self.check_configured_fom( )
return self.M_external_engine.assemble_fom_matrix( _param, self.M_fom_specifics, _elements, _indices )
def assemble_fom_rhs( self, _param, _elements=[], _indices=[] ):
self.check_configured_fom( )
return self.M_external_engine.assemble_fom_rhs( _param, self.M_fom_specifics, _elements, _indices )
def get_num_parameters( self ):
return self.M_parameter_handler.get_num_parameters( )
def generate_parameter( self ):
return self.M_parameter_handler.generate_parameter( )
def get_parameter( self ):
self.M_current_parameter = self.M_parameter_handler.get_parameter( )
return self.M_current_parameter
def get_parameter_handler( self ):
return self.M_parameter_handler
def find_mdeim_elements_fom_specifics( self, _indices_mat ):
self.check_configured_fom( )
return self.M_external_engine.find_mdeim_elements_fom_specifics( self.M_fom_specifics, _indices_mat )
def find_deim_elements_fom_specifics( self, _indices ):
self.check_configured_fom( )
return self.M_external_engine.find_deim_elements_fom_specifics( self.M_fom_specifics, _indices )
def compute_natural_norm( self, _uh ):
Auh = alg_ut.sparse_matrix_vector_mul( self.M_natural_norm_matrix, _uh )
uh_norm = _uh.T.dot( Auh )
return np.sqrt( uh_norm )
M_parameter_handler = None
M_configured_fom = False
# engine used to perform offline computation relying on an external engine
M_external_engine = None
M_fom_specifics = None
M_natural_norm_matrix = None
# theta functions
M_theta_a = default_theta_function
M_theta_f = default_theta_function
M_full_theta_a = default_full_theta_function
M_full_theta_f = default_full_theta_function
M_current_parameter = np.zeros( 0 )
| StarcoderdataPython |
1683331 | '''
Created on 12 April 2017
@author: <NAME>
Setup script
'''
#from setuptools import setup, find_packages
from distutils.core import setup
setup(name='tools21cm',
version='2.0.1',
author='<NAME>',
author_email='<EMAIL>',
package_dir = {'tools21cm' : 't2c'},
packages=['tools21cm'],
# package_data={'share':['*'],},
package_data={'tools21cm': ['input_data/*']},
install_requires=['numpy','scipy','scikit-learn','scikit-image', 'tqdm', 'joblib'],
# include_package_data=True,
)
| StarcoderdataPython |
3201750 | <reponame>Jasha10/pyright<filename>packages/pyright-internal/src/tests/samples/loops5.py<gh_stars>1000+
# This sample tests a case where a potential type alias
# ("a") is involved in a recursive type dependency
# ("a" depends on "test" which depends on "a").
# pyright: strict
test = {"key": "value"}
while True:
a = test
reveal_type(a, expected_text="dict[str, str]")
test = a.copy()
reveal_type(test, expected_text="dict[str, str]")
| StarcoderdataPython |
3277276 | import sys
from PyQt4 import QtCore, QtNetwork, QtGui
import os
from win32file import CreateFile, ReadDirectoryChangesW
import win32con
PORT = 8000
DEFAULT_PATH = r"C:\ProgramData\FAForever\bin"
class FileWatcherThread(QtCore.QThread):
fileChanged = QtCore.pyqtSignal(str)
def __init__(self):
QtCore.QThread.__init__(self)
self.path_to_watch = DEFAULT_PATH
self.hDir = CreateFile (
self.path_to_watch,
0x0001, # dunno, magic. FILE_LIST_DIRECTORY
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
def setPath(self,string):
self.path_to_watch = string
def run(self):
while True:
results = ReadDirectoryChangesW (
self.hDir,
1024,
False,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
None,
None
)
if (3,r'game.log') in results:
f = open(r'C:\ProgramData\FAForever\bin\game.log', "r");
f.flush()
foundBeat = False
while not foundBeat:
try:
f.seek(256)
except IOError:
break
for line in f.readlines():
pos = line.find("warning: Beat:")
print line
if pos>=0:
foundBeat = line[pos:]
break
if foundBeat:
self.fileChanged.emit(foundBeat[14:])
else:
self.fileChanged.emit("nincsmeg")
class ReplayClient(QtGui.QMainWindow):
def __init__(self, *args, **kwargs):
QtGui.QMainWindow.__init__(self, *args, **kwargs)
self.socket = QtNetwork.QTcpSocket()
self.watcherThread = FileWatcherThread()
self.setupGUI()
self.watcherThread.fileChanged.connect(self.fileChanged)
@QtCore.pyqtSlot(str)
def fileChanged(self,string):
self.statusBar().showMessage("Beat: " + string)
def startWatcherThread(self):
self.watcherThread.start()
def setupGUI(self):
self.setWindowTitle("ReplayCommander Client")
self.commandLine = QtGui.QLineEdit()
self.serverLine = QtGui.QLineEdit("localhost")
self.connectButton = QtGui.QPushButton("Connect")
self.startButton = QtGui.QPushButton("Start")
self.startButton.setDisabled(True)
self.startButton.clicked.connect(self.sendStart)
self.stopButton = QtGui.QPushButton("Stop")
self.stopButton.setDisabled(True)
self.stopButton.clicked.connect(self.sendStop)
self.connectButton.clicked.connect(self.connectToServer)
self.commandLine.returnPressed.connect(self.issueRequest)
self.socket.readyRead.connect(self.readFromServer)
self.socket.disconnected.connect(self.serverHasStopped)
self.FApathLine = QtGui.QLineEdit(os.path.join(DEFAULT_PATH,"ForgedAlliance.exe"))
self.FApathButton = QtGui.QPushButton("find fa.exe")
self.FApathButton.clicked.connect(self.findFa)
self.FAlauncherButton = QtGui.QPushButton("Connect to livereplay")
self.FAlauncherButton.clicked.connect(self.startFa)
self.faLauncherBox = QtGui.QGroupBox("Fa launcher")
faLauncherBoxLayout = QtGui.QGridLayout()
faLauncherBoxLayout.addWidget(self.FApathLine,0,0)
faLauncherBoxLayout.addWidget(self.FApathButton,0,1)
faLauncherBoxLayout.addWidget(self.FAlauncherButton,1,0,1,2)
self.faLauncherBox.setLayout(faLauncherBoxLayout)
self.replayCommanderBox = QtGui.QGroupBox("Replay server manager")
replayCommanderBoxLayout = QtGui.QGridLayout()
replayCommanderBoxLayout.addWidget(self.startButton,0,0)
replayCommanderBoxLayout.addWidget(self.stopButton,0,1)
replayCommanderBoxLayout.addWidget(QtGui.QLabel(),1,0)
replayCommanderBoxLayout.addWidget(self.serverLine,2,0)
replayCommanderBoxLayout.addWidget(self.connectButton,2,1)
self.replayCommanderBox.setLayout(replayCommanderBoxLayout)
layout = QtGui.QGridLayout()
layout.addWidget(self.faLauncherBox,0,0,1,2)
layout.addWidget(self.replayCommanderBox,1,0,1,2)
self.commandLine.setFocus()
window = QtGui.QWidget()
window.setLayout(layout)
self.setCentralWidget(window)
def findFa(self):
file = QtGui.QFileDialog()
filename = file.getOpenFileName(None,"Search ForgedAlliance.exe for me pls <3","","ForgedAlliance.exe","ForgedAlliance.exe")
if filename:
self.FApathLine.setText(filename)
self.watcherThread.setPath(os.path.basename(filename))
def startFa(self):
sid = ""
for i in os.urandom(3):
sid+=str(hex(ord(i)).split("x")[1])
command = []
command.append(os.path.join(DEFAULT_PATH,"ForgedAlliance.exe"))
command.append("/replay")
command.append("gpgnet://" + str(self.serverLine.text()) + ":" + str(PORT) + "/" + sid)
command.append("/log game.log")
command.append("/init init_faf.lua")
command.append("/showlog")
import subprocess
subprocess.Popen(command,cwd=DEFAULT_PATH)
#self.startWatcherThread()
def connectToServer(self):
address = self.serverLine.text()
if address:
self.socket.connectToHost(address, PORT)
if self.socket.isOpen():
self.connectButton.setEnabled(False)
self.serverLine.setEnabled(False)
self.startButton.setEnabled(True)
self.stopButton.setEnabled(True)
self.statusBar().showMessage("Connected to: " + address + ":" + str(PORT))
def issueRequest(self):
command = self.commandLine.text()
self.commandLine.clear()
self.socket.writeData(command)
print "Command sent: " + command
def sendStart(self):
self.socket.writeData("START")
def sendStop(self):
self.socket.writeData("STOP")
def readFromServer(self):
if self.socket.bytesAvailable > 0:
data = self.socket.readAll()
if data.startsWith("tick:"):
seconds = 1 + int(data.split(":")[1]) / 10
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
self.statusBar().showMessage("You should be at " + "%d:%02d:%02d" % (h, m, s))
def serverHasStopped(self):
self.socket.close()
self.connectButton.setEnabled(True)
app = QtGui.QApplication(sys.argv)
form = ReplayClient()
form.show()
app.exec_() | StarcoderdataPython |
172458 | # placeholder to make setup(..., include_package_data=True) include this folder
| StarcoderdataPython |
1765915 | <gh_stars>0
while True:
tab = int(input('Quer ver a tabuada de qual valor? '))
print('-' * 50)
if tab < 0:
break
for t in range(1, 11):
print(f'{tab} X {t} = {tab * t}')
print('-' * 50)
print('PROGRAMA TABUADA ENCERRADO. Volte sempre!')
| StarcoderdataPython |
3306470 | <reponame>sanchitcop19/web-api-async<filename>vizier/api/webservice/task.py
# Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vizier Task API - Implements all methods of the API to interact with running
tasks in vizier projects.
"""
from vizier.core.timestamp import to_datetime
import vizier.api.serialize.deserialize as deserialize
import vizier.api.serialize.labels as labels
import vizier.viztrail.module.base as states
class VizierTaskApi(object):
"""The Vizier task API implements the methods that interact with active
task for vizier projects.
"""
def __init__(self, engine):
"""Initialize the API components.
Parameters
----------
engine: vizier.engine.base.VizierEngine
Instance of the API engine
"""
self.engine = engine
def update_task_state(self, task_id, state, body):
"""Update that state pf a given task. The contents of the request body
depend on the value of the new task state.
Raises a ValueError if the request body is invalid. The result is None
if the task is unknown. Otherwise, the result is a dictionary with a
single result value. The result is 0 if the task state did not change.
A positive value signals a successful task state change.
Parameters
----------
task_id: string
Unique task identifier
state: int
The new state of the task
body: dict
State-dependent additional information
Returns
-------
dict
"""
# Depending on the requested state change call the respective method
# after extracting additional parameters from the request body.
result = None
if state == states.MODULE_RUNNING:
if labels.STARTED_AT in body:
result = self.engine.set_running(
task_id=task_id,
started_at=to_datetime(body[labels.STARTED_AT])
)
else:
result = self.engine.set_running(task_id=task_id)
elif state == states.MODULE_ERROR:
finished_at = None
if labels.FINISHED_AT in body:
finished_at = to_datetime(body[labels.FINISHED_AT])
outputs = None
if labels.OUTPUTS in body:
outputs = deserialize.OUTPUTS(body[labels.OUTPUTS])
result = self.engine.set_error(
task_id=task_id,
finished_at=finished_at,
outputs=outputs
)
elif state == states.MODULE_SUCCESS:
finished_at = None
if labels.FINISHED_AT in body:
finished_at = to_datetime(body[labels.FINISHED_AT])
outputs = None
if labels.OUTPUTS in body:
outputs = deserialize.OUTPUTS(body[labels.OUTPUTS])
provenance = None
if labels.PROVENANCE in body:
provenance = deserialize.PROVENANCE(body[labels.PROVENANCE])
result = self.engine.set_success(
task_id=task_id,
finished_at=finished_at,
outputs=outputs,
provenance=provenance
)
else:
raise ValueError('invalid state change')
# Create state change result
if not result is None:
return {labels.RESULT: result}
return None
| StarcoderdataPython |
3288373 | #
# Copyright 2022 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import humps
from typing import Optional
from hsml import util
from hsml.constants import INFERENCE_BATCHER
class InferenceBatcher:
"""Configuration of an inference batcher for a predictor.
# Arguments
enabled: Whether the inference batcher is enabled or not. The default value is `false`.
max_batch_size: Maximum requests batch size.
max_latency: Maximum latency for request batching.
timeout: Maximum waiting time for request batching.
# Returns
`InferenceLogger`. Configuration of an inference logger.
"""
def __init__(
self,
enabled: Optional[bool] = None,
max_batch_size: Optional[int] = None,
max_latency: Optional[int] = None,
timeout: Optional[int] = None,
):
self._enabled = enabled if enabled is not None else INFERENCE_BATCHER.ENABLED
self._max_batch_size = max_batch_size if max_batch_size is not None else None
self._max_latency = max_latency if max_latency is not None else None
self._timeout = timeout if timeout is not None else None
def describe(self):
"""Print a description of the inference batcher"""
util.pretty_print(self)
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
return cls.from_json(json_decamelized)
@classmethod
def from_json(cls, json_decamelized):
return InferenceBatcher(*cls.extract_fields_from_json(json_decamelized))
@classmethod
def extract_fields_from_json(cls, json_decamelized):
config = (
json_decamelized.pop("batching_configuration")
if "batching_configuration" in json_decamelized
else json_decamelized
)
enabled = util.extract_field_from_json(config, ["batching_enabled", "enabled"])
max_batch_size = util.extract_field_from_json(config, "max_batch_size")
max_latency = util.extract_field_from_json(config, "max_latency")
timeout = util.extract_field_from_json(config, "timeout")
return enabled, max_batch_size, max_latency, timeout
def update_from_response_json(self, json_dict):
json_decamelized = humps.decamelize(json_dict)
self.__init__(self.extract_fields_from_json(json_decamelized))
return self
def json(self):
return json.dumps(self, cls=util.MLEncoder)
def to_dict(self):
json = {"batchingEnabled": self._enabled}
if self._max_batch_size is not None:
json["maxBatchSize"] = self._max_batch_size
if self._max_latency is not None:
json["maxLatency"] = self._max_latency
if self._timeout is not None:
json["timeout"] = self._timeout
return {"batchingConfiguration": json}
@property
def enabled(self):
"""Whether the inference batcher is enabled or not."""
return self._enabled
@enabled.setter
def enabled(self, enabled: bool):
self._enabled = enabled
@property
def max_batch_size(self):
"""Maximum requests batch size."""
return self._max_batch_size
@max_batch_size.setter
def max_batch_size(self, max_batch_size: int):
self._max_batch_size = max_batch_size
@property
def max_latency(self):
"""Maximum latency."""
return self._max_latency
@max_latency.setter
def max_latency(self, max_latency: int):
self._max_latency = max_latency
@property
def timeout(self):
"""Maximum timeout."""
return self._timeout
@timeout.setter
def timeout(self, timeout: int):
self._timeout = timeout
def __repr__(self):
return f"InferenceBatcher(enabled: {self._enabled!r})"
| StarcoderdataPython |
4804253 | <filename>tools/dump_database.py
import os
import sys
import django
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from museum.private import DATABASES # noqa: E402
def main():
user = DATABASES["default"]["USER"]
password = DATABASES["default"]["PASSWORD"]
name = DATABASES["default"]["NAME"]
filename = "museum_db_dump.sql" if len(sys.argv) < 2 else sys.argv[-1]
print("Dumping {} to {}".format(name, filename))
command = "mysqldump -u {} -p{} {} > {}".format(user, password, name, filename)
os.system(command)
print("DONE.")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3352932 | <gh_stars>0
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from pandas import read_csv, DataFrame, Series, concat
from sklearn.preprocessing import LabelEncoder
from sklearn import cross_validation, svm, grid_search
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score, roc_auc_score
import pylab as pl
import matplotlib.pyplot as plt
def plot_train():
print 'Plot data...'
data = read_csv('./train.csv', sep = ',')
for k in range(1, 5):
param = 'Wilderness_Area%s' % k
f = plt.figure(figsize = (8, 6))
p = data.pivot_table('Id', param, 'Cover_Type', 'count').plot(kind = 'barh', stacked = True, ax = f.gca())
img = './wilderness_area_cover_type_plot/Wilderness_Area%s_cover_type.png' % k
f.savefig(img)
for k in range(1, 41):
param = 'Soil_Type%s' % k
f = plt.figure(figsize = (8, 6))
p = data.pivot_table('Id', param, 'Cover_Type', 'count').plot(kind = 'barh', stacked = True, ax = f.gca())
img = './soil_type_cover_type_plot/Soil_Type%s_cover_type.png' % k
f.savefig(img)
def plot_elevation():
data = read_csv('./train.csv')
data = data.sort(['Elevation'])
print 'Plot Elevation...'
fig, axes = plt.subplots(ncols=1)
e = data.pivot_table('Id', ['Elevation'], 'Cover_Type', 'count').plot(ax=axes, title='Elevation')
f = e.get_figure()
f.savefig('./train_data_plot/elevation_cover_type.png')
def plot_aspect():
data = read_csv('./train.csv')
data = data.sort(['Aspect'])
print 'Plot Aspect...'
fig, axes = plt.subplots(ncols=1)
e = data.pivot_table('Id', ['Aspect'], 'Cover_Type', 'count').plot(ax=axes, title='Aspect')
f = e.get_figure()
f.savefig('./train_data_plot/aspect_cover_type.png')
def plot_slope():
data = read_csv('./train.csv')
data = data.sort(['Slope'])
print 'Plot Slope...'
fig, axes = plt.subplots(ncols=1)
e = data.pivot_table('Id', ['Slope'], 'Cover_Type', 'count').plot(ax=axes, title='Slope')
f = e.get_figure()
f.savefig('./train_data_plot/slope_cover_type.png')
def plot_horizontal_distance_to_hydrology():
data = read_csv('./train.csv')
data = data.sort(['Horizontal_Distance_To_Hydrology'])
print 'Plot Horizontal_Distance_To_Hydrology...'
fig, axes = plt.subplots(ncols=1)
e = data.pivot_table('Id', ['Horizontal_Distance_To_Hydrology'], 'Cover_Type', 'count').plot(ax=axes, title='Horizontal Distance To Hydrology')
f = e.get_figure()
f.savefig('./train_data_plot/horizontal_distance_to_hydrology_cover_type.png')
def plot_vertical_distance_to_hydrology():
data = read_csv('./train.csv')
data = data.sort(['Vertical_Distance_To_Hydrology'])
print 'Plot Vertical_Distance_To_Hydrology...'
fig, axes = plt.subplots(ncols=1)
e = data.pivot_table('Id', ['Vertical_Distance_To_Hydrology'], 'Cover_Type', 'count').plot(ax=axes, title='Vertical Distance To Hydrology')
f = e.get_figure()
f.savefig('./train_data_plot/vertical_distance_to_hydrology_cover_type.png')
def plot_horizontal_distance_to_roadways():
data = read_csv('./train.csv')
data = data.sort(['Horizontal_Distance_To_Roadways'])
print 'Plot Horizontal_Distance_To_Roadways...'
fig, axes = plt.subplots(ncols=1)
e = data.pivot_table('Id', ['Horizontal_Distance_To_Roadways'], 'Cover_Type', 'count').plot(ax=axes, title='Horizontal Distance To Roadways')
f = e.get_figure()
f.savefig('./train_data_plot/horizontal_distance_to_roadways_cover_type.png')
def plot_box():
data = read_csv("./train.csv")
headers = ['Elevation', 'Slope', 'Aspect', 'Horizontal_Distance_To_Hydrology', 'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways']
headers += ['Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm', 'Horizontal_Distance_To_Fire_Points']
for k in headers:
print "box plot %s..." % k.lower().replace("_", " ")
df = concat([data[k], data['Cover_Type']], axis=1, keys=[k, 'Cover_Type'])
f = plt.figure(figsize=(8, 6))
p = df.boxplot(by='Cover_Type', ax = f.gca())
f.savefig('./train_data_plot/box_%s_cover_type.png' % k.lower())
def get_train_data():
print 'Get train data...'
data = read_csv('./train.csv')
data = data.drop(['Id'], axis = 1)
# удаляем столбец Wilderness_Area2
data = data.drop(['Wilderness_Area2', 'Vertical_Distance_To_Hydrology', 'Slope'], axis = 1)
# data = data.drop(['Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm'], axis = 1)
# удаляем столбцы SoilType1,...,SoilType40
drop_soil_type_cols = []
for k in range(1, 41):
cname = 'Soil_Type%s' % k
drop_soil_type_cols.append(cname)
data = data.drop(drop_soil_type_cols, axis = 1)
return data
def get_test_data():
print 'Get test data...'
data = read_csv('./test.csv')
result = DataFrame(data.Id)
# удаляем столбцы Id, Wilderness_Area2
data = data.drop(['Id', 'Wilderness_Area2', 'Vertical_Distance_To_Hydrology', 'Slope'], axis = 1)
# data = data.drop(['Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm'], axis = 1)
# удаляем столбцы SoilType1,...,SoilType40
drop_soil_type_cols = []
for k in range(1, 41):
cname = 'Soil_Type%s' % k
drop_soil_type_cols.append(cname)
data = data.drop(drop_soil_type_cols, axis = 1)
return (data, result)
def cross_validation_test():
data = get_train_data()
target = data.Cover_Type
train = data.drop(['Cover_Type'], axis = 1)
kfold = 10
cross_val_final = {}
print 'Cross validation test...'
model_rfc = RandomForestClassifier(n_estimators = 1024, criterion='entropy', n_jobs = -1)
model_knc = KNeighborsClassifier(n_neighbors = 128)
model_lr = LogisticRegression(penalty='l1', C=1e5)
scores = cross_validation.cross_val_score(model_rfc, train, target, cv = kfold)
cross_val_final['RFC'] = scores.mean()
print 'RFC: ', scores.mean()
scores = cross_validation.cross_val_score(model_knc, train, target, cv = kfold)
cross_val_final['KNC'] = scores.mean()
print 'KNC: ', scores.mean()
scores = cross_validation.cross_val_score(model_lr, train, target, cv = kfold)
cross_val_final['LR'] = scores.mean()
print 'LR: ', scores.mean()
f = plt.figure(figsize = (8, 6))
p = DataFrame.from_dict(data = cross_val_final, orient='index').plot(kind='barh', legend=False, ax = f.gca())
f.savefig('./test_plot/cross_validation_rfc_1024.png')
# финальная функция
def go():
data = get_train_data()
model_rfc = RandomForestClassifier(n_estimators = 2500, criterion = 'entropy', n_jobs = -1)
# так как лучшие результаты у Random Forest
print 'Go!!!'
print 'RFC...'
test, result = get_test_data()
target = data.Cover_Type
test = test.drop(['Aspect'], axis = 1)
train = data.drop(['Cover_Type', 'Aspect'], axis = 1)
print "..."
model_rfc.fit(train, target)
result.insert(1,'Cover_Type', model_rfc.predict(test))
result.to_csv('./test_rfc_2500_new.csv', index=False)
def go_gbc():
data = get_train_data()
model_gbc = GradientBoostingClassifier(n_estimators = 1600)
print 'Go!!!'
print 'GBC...'
test, result = get_test_data()
target = data.Cover_Type
train = data.drop(['Cover_Type'], axis = 1)
model_gbc.fit(train, target)
result.insert(1,'Cover_Type', model_gbc.predict(test))
result.to_csv('./test_gbc_1600.csv', index=False)
def grid_search_test():
data = get_train_data()
target = data.Cover_Type
train = data.drop(['Cover_Type'], axis = 1)
model_rfc = RandomForestClassifier()
params = {"n_estimators" : [100, 250, 500, 625], "criterion" : ('entropy', 'gini')}
clf = grid_search.GridSearchCV(model_rfc, params)
clf.fit(train, target)
# summarize the results of the grid search
print(clf.best_score_)
print(clf.best_estimator_.criterion)
print(clf.best_estimator_.n_estimators)
# plot_elevation()
# plot_aspect()
# plot_slope()
# plot_horizontal_distance_to_hydrology()
# plot_vertical_distance_to_hydrology()
# plot_horizontal_distance_to_roadways()
# plot_train()
# cross_validation_test()
# grid_search_test()
# go()
# go_gbc()
# plot_box()
# data = get_train_data()
# train = data.drop(['Cover_Type', 'Aspect'], axis = 1)
# print train.head()
| StarcoderdataPython |
173348 | from Crypto.Util.number import *
from math import gcd
import json
ct = 17320751473362084127402636657144071375427833219607663443601124449781249403644322557541872089652267070211212915903557690040206709235417332498271540915493529128300376560226137139676145984352993170584208658625255938806836396696141456961179529532070976247738546045494839964768476955634323305122778089058798906645471526156569091101098698045293624474978286797899191202843389249922173166570341752053592397746313995966365207638042347023262633148306194888008613632757146845037310325643855138147271259215908333877374609302786041209284422691820450450982123612630485471082506484250009427242444806889873164459216407213750735305784
pubkey = json.loads(open("pubkey.json").read())
e = pubkey['e']
d = pubkey['n']
cf = pubkey['cf']
upper_lim = min(e, d)
ks = []
for k in range(2, upper_lim):
if (e * d - 1) % k == 0 and ((e * d - 1) // k).bit_length() <= 2048:
ks.append(k)
# print("[*] Possible number of k values = ", len(ks)) # 1
k = ks[0]
phi = (e * d - 1) // k
pmul = cf * phi - cf + 1
p = pmul
i = 2
while not isPrime(p):
pmuli = pow(i, phi, p) - 1
p = gcd(p, pmuli)
i += 1
print("[*] p = ", p)
assert isPrime(p)
q = inverse(cf, p)
while not isPrime(q):
q += p
print("[+] q = ", q)
n = p*q
flag = pow(ct, d, n)
print("[*] flag = ", long_to_bytes(flag).decode())
# TSGCTF{Okay_this_flag_will_be_quite_long_so_listen_carefully_Happiness_is_our_bodys_default_setting_Please_dont_feel_SAd_in_all_sense_Be_happy!_Anyway_this_challenge_is_simple_rewrite_of_HITCON_CTF_2019_Lost_Modulus_Again_so_Im_very_thankful_to_the_author}
# cf * q - 1 = 0 % p
# e*d - 1 = k*phi(n) = k * (n - p - q + 1)
# (k * cf * n - cf*p - cf*q + k*cf) % p = 0 - 0 - 1 + k*cf
# => cf*(e*d - 1) % p
# => k*cf*(-q + 1) % p
# => k*(-q*cf + 1) % p
# => k*(-1 + 1) % p
# => % p
# (p - 1) * (q - 1) = p*q - p - q + 1
# (cf * n - cf*p - cf*q + cf) % p
# (cf*(e*d-1) - k*cf + k) % p
# pmul = 203924475685273125673924567120722211197775789263823099609868179699015077739937525153867786646043922585435823756174102804827621224012186527737220778697750686298504260946611508174775095028972083466656969206793950062958025993036588234037930950865051977169712023795454717172724050588927197417362483522665713180702064548911358519148833656893687268847981618012321143608846445283212339025960431983853360060365733268970028150731318222987414098112588917736299798600733169255883039636648735455383984164507504457126131525930193208740160169062153623683889489951639660237228935581524527590348429642329760869581365117284215618981383483485976418787736969369390525017827257117474256133845432195634375183086883483821098636303137578274580400857016015168040903407435592004125171338858221961907733370940358870810621175748650252715949875073790463288302867296608163915781016208936301706216678625383778953978965425482339341899846735007166492941035127380
# pmul = 52871625506863469419970250719490016323438261079632535659972196074062281756032418324707994774780978077881125703592494150857770444275957060015005543070749389508699526195699950562256489171658321739432399251541222401652762587029604658228120911819200296203337399741328902417078454470021218211752170832535207176943477551815420586375552412656031261323416935867750802948919763817913904619636387757456676028579830241631471496193442177537000367043380226834382432547665765302379769017120323576169609864531412731871066142913798852039237313106964440793824133838447453168959734590724595814427780125968288230737653460603699573421755813765784342172444219065557777861491096411736518672838886951216381604510758433645866840793473370665218390455588303248265131410645514979767065151380724369822120823837398339402126341715195298675794664717874103594705706809110678031532356225963575832673789299449695140173957530720061459
# k = 62676
# phi = 28704238313373626957214994960008954139581982756499074110405661944616161563699953881355718340419202795817270395957729473476747145698006830528333554418856853696099479893465653196878573256509001276103164075993991197413993941054447827049697897358634408174823100151899093845913695607502289118576896877429840052069202417729078184494305213296459526263835596373320000276434715668950539321926894606721051470912695132078472801786561226166468695792501860509172562449551353991095017747320372209557378661393466880782009425950486774637282820715849947612074722195167230790483523030570166160601490546009595990365306583485864701801668
# TSGCTF{Okay_this_flag_will_be_quite_long_so_listen_carefully_Happiness_is_our_bodys_default_setting_Please_dont_feel_SAd_in_all_sense_Be_happy!_Anyway_this_challenge_is_simple_rewrite_of_HITCON_CTF_2019_Lost_Modulus_Again_so_Im_very_thankful_to_the_author}
| StarcoderdataPython |
11305 | <filename>python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[8, 32, 128], dtype="float32")
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32")
trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0])
trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
concat_out = fluid.layers.concat([flatt1, flatt2])
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
out = fluid.layers.batch_norm(concat_out, is_test=True)
self.feeds = {
"data1": np.random.random([8, 32, 128]).astype("float32"),
"data2": np.random.random([8, 32, 128]).astype("float32")
}
self.enable_trt = True
self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam(
1 << 20, 8, 3, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
# There is no cpu pass for transpose_flatten_concat_fuse
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
54264 | <gh_stars>0
import requests
import os
TARGETS = [
"spigot2srg",
"spigot2srg-onlyobf",
"spigot2mcp",
"spigot2mcp-onlyobf",
"obf2mcp",
"mcp2obf"
]
BASE_URL = "http://localhost:8000"
MCP_VERSION = "snapshot_nodoc_20180925"
MINECRAFT_VERSION = "1.13"
def main():
request = {
"minecraft_version": MINECRAFT_VERSION,
"mcp_version": MCP_VERSION,
"targets": TARGETS
}
r = requests.post(f"{BASE_URL}/api/beta/load_mappings", json=request).json()
print(f"Response keys {set(r.keys())}")
response_time = r['response_time']
serialized_mappings = r['serialized_mappings']
print(f"Received {len(serialized_mappings)} mappings in {response_time}ms")
os.makedirs("out", exist_ok=True),
for target, serialized in serialized_mappings.items():
with open(f"out/{target}-{MINECRAFT_VERSION}.srg", 'w', encoding='utf-8') as f:
f.write(serialized)
if __name__ == "__main__":
main()
| StarcoderdataPython |
40937 | <reponame>martinsnathalia/Python<gh_stars>0
# Desenvolva um programa que leia o comprimento de três retas e diga ao usuário se elas podem ou não formar um triângulo.
print('Suas retas formam um triângulo?')
r1 = float(input('Digite a primeira reta: '))
r2 = float(input('Digite a segunda reta: '))
r3 = float(input('Digite a terceira reta: '))
if r1 < (r2+r3) and r2 < (r1 + r3) and r3 < (r2 + r1):
print('Essas retas formam um triângulo!')
else:
print('Essas retas NÃO formam um triângulo!')
| StarcoderdataPython |
24166 | from typing import List
import torch
from detectron2.structures import ImageList, Boxes, Instances, pairwise_iou
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads
from .utils import get_aligned_pooler, label_and_sample_proposals
from .lazy_fast_rcnn import LazyFastRCNNOutputLayers
@ROI_HEADS_REGISTRY.register()
class LazyRoIHeads(StandardROIHeads):
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
return label_and_sample_proposals(self, proposals, targets)
@classmethod
def _init_box_head(cls, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
ret["box_predictor"] = LazyFastRCNNOutputLayers(
cfg, ret["box_head"].output_shape,
# The loss weight is set as Cascade RPN
loss_weight={
"loss_cls": 1.5,
"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT
},
)
ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES
ret["box_pooler"] = get_aligned_pooler(
cfg.MODEL.RPN, input_shape,
output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO,
)
return ret
@ROI_HEADS_REGISTRY.register()
class LazyCascadeRoIHeads(CascadeROIHeads):
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
return label_and_sample_proposals(self, proposals, targets)
@classmethod
def _init_box_head(cls, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
box_predictors = []
for bbox_reg_weights in cascade_bbox_reg_weights:
box_predictors.append(
LazyFastRCNNOutputLayers(
cfg, ret["box_heads"][0].output_shape,
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),
loss_weight={
"loss_cls": 1.5,
"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT
},
)
)
ret["box_predictors"] = box_predictors
ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES
ret["box_pooler"] = get_aligned_pooler(
cfg.MODEL.RPN, input_shape,
output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO,
)
return ret
def _match_and_label_boxes(self, proposals, stage, targets):
return label_and_sample_proposals(self, proposals, targets, False, False, stage) | StarcoderdataPython |
3348009 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from shaker import lib
class TestLib(testtools.TestCase):
@mock.patch('shaker.engine.quorum.make_quorum')
def test_run_program(self, make_quorum_patch):
quorum_mock = mock.MagicMock()
make_quorum_patch.return_value = quorum_mock
quorum_mock.execute = mock.Mock(
return_value={'AGENT': {'status': 'ok', 'stdout': 'STDOUT', }})
shaker = lib.Shaker('127.0.0.1:5999', ['AGENT'])
res = shaker.run_program('AGENT', 'ls -al')
self.assertDictContainsSubset(
{'status': 'ok', 'stdout': 'STDOUT', 'agent': 'AGENT',
'executor': 'shell', 'type': 'agent'}, res)
| StarcoderdataPython |
1740031 | import re
import requests
import urllib
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from .util import Util
from .version import VERSION
from .api_config import ApiConfig
from nasdaqdatalink.errors.data_link_error import (
DataLinkError, LimitExceededError, InternalServerError,
AuthenticationError, ForbiddenError, InvalidRequestError,
NotFoundError, ServiceUnavailableError)
class Connection:
@classmethod
def request(cls, http_verb, url, **options):
if 'headers' in options:
headers = options['headers']
else:
headers = {}
accept_value = 'application/json'
if ApiConfig.api_version:
accept_value += ", application/vnd.data.nasdaq+json;version=%s" % ApiConfig.api_version
headers = Util.merge_to_dicts({'accept': accept_value,
'request-source': 'python',
'request-source-version': VERSION}, headers)
if ApiConfig.api_key:
headers = Util.merge_to_dicts({'x-api-token': ApiConfig.api_key}, headers)
options['headers'] = headers
abs_url = '%s/%s' % (ApiConfig.api_base, url)
return cls.execute_request(http_verb, abs_url, **options)
@classmethod
def execute_request(cls, http_verb, url, **options):
session = cls.get_session()
try:
response = session.request(method=http_verb,
url=url,
verify=ApiConfig.verify_ssl,
**options)
if response.status_code < 200 or response.status_code >= 300:
cls.handle_api_error(response)
else:
return response
except requests.exceptions.RequestException as e:
if e.response:
cls.handle_api_error(e.response)
raise e
@classmethod
def get_session(cls):
session = requests.Session()
adapter = HTTPAdapter(max_retries=cls.get_retries())
session.mount(ApiConfig.api_protocol, adapter)
proxies = urllib.request.getproxies()
if proxies is not None:
session.proxies.update(proxies)
return session
@classmethod
def get_retries(cls):
if not ApiConfig.use_retries:
return Retry(total=0)
Retry.BACKOFF_MAX = ApiConfig.max_wait_between_retries
retries = Retry(total=ApiConfig.number_of_retries,
connect=ApiConfig.number_of_retries,
read=ApiConfig.number_of_retries,
status_forcelist=ApiConfig.retry_status_codes,
backoff_factor=ApiConfig.retry_backoff_factor,
raise_on_status=False)
return retries
@classmethod
def parse(cls, response):
try:
return response.json()
except ValueError:
raise DataLinkError(http_status=response.status_code, http_body=response.text)
@classmethod
def handle_api_error(cls, resp):
error_body = cls.parse(resp)
# if our app does not form a proper data_link_error response
# throw generic error
if 'error' not in error_body:
raise DataLinkError(http_status=resp.status_code, http_body=resp.text)
code = error_body['error']['code']
message = error_body['error']['message']
prog = re.compile('^QE([a-zA-Z])x')
if prog.match(code):
code_letter = prog.match(code).group(1)
d_klass = {
'L': LimitExceededError,
'M': InternalServerError,
'A': AuthenticationError,
'P': ForbiddenError,
'S': InvalidRequestError,
'C': NotFoundError,
'X': ServiceUnavailableError
}
klass = d_klass.get(code_letter, DataLinkError)
raise klass(message, resp.status_code, resp.text, resp.headers, code)
| StarcoderdataPython |
1611938 | <reponame>lefevre-fraser/openmeta-mms<gh_stars>0
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, assert_almost_equal, assert_equal,
assert_, assert_raises, run_module_suite,
assert_allclose)
import scipy.signal.waveforms as waveforms
# These chirp_* functions are the instantaneous frequencies of the signals
# returned by chirp().
def chirp_linear(t, f0, f1, t1):
f = f0 + (f1 - f0) * t / t1
return f
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
if vertex_zero:
f = f0 + (f1 - f0) * t**2 / t1**2
else:
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
return f
def chirp_geometric(t, f0, f1, t1):
f = f0 * (f1/f0)**(t/t1)
return f
def chirp_hyperbolic(t, f0, f1, t1):
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
return f
def compute_frequency(t, theta):
"""Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t)."""
# Assume theta and t are 1D numpy arrays.
# Assume that t is uniformly spaced.
dt = t[1] - t[0]
f = np.diff(theta)/(2*np.pi) / dt
tf = 0.5*(t[1:] + t[:-1])
return tf, f
class TestChirp(TestCase):
def test_linear_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
assert_almost_equal(w, 1.0)
def test_linear_freq_01(self):
method = 'linear'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_linear_freq_02(self):
method = 'linear'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
assert_almost_equal(w, 1.0)
def test_quadratic_at_zero2(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
vertex_zero=False)
assert_almost_equal(w, 1.0)
def test_quadratic_freq_01(self):
method = 'quadratic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_freq_02(self):
method = 'quadratic'
f0 = 20.0
f1 = 10.0
t1 = 10.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
assert_almost_equal(w, 1.0)
def test_logarithmic_freq_01(self):
method = 'logarithmic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_02(self):
method = 'logarithmic'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_03(self):
method = 'logarithmic'
f0 = 100.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_hyperbolic_at_zero(self):
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
assert_almost_equal(w, 1.0)
def test_hyperbolic_freq_01(self):
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 10000)
# f0 f1
cases = [[10.0, 1.0],
[1.0, 10.0],
[-10.0, -1.0],
[-1.0, -10.0]]
for f0, f1 in cases:
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
expected = chirp_hyperbolic(tf, f0, f1, t1)
assert_allclose(f, expected)
def test_hyperbolic_zero_freq(self):
# f0=0 or f1=0 must raise a ValueError.
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 5)
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
def test_unknown_method(self):
method = "foo"
f0 = 10.0
f1 = 20.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_integer_t1(self):
f0 = 10.0
f1 = 20.0
t = np.linspace(-1, 1, 11)
t1 = 3.0
float_result = waveforms.chirp(t, f0, t1, f1)
t1 = 3
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 't1=3' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f0(self):
f1 = 20.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f0 = 10.0
float_result = waveforms.chirp(t, f0, t1, f1)
f0 = 10
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f1(self):
f0 = 10.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f1 = 20.0
float_result = waveforms.chirp(t, f0, t1, f1)
f1 = 20
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_all(self):
f0 = 10
t1 = 3
f1 = 20
t = np.linspace(-1, 1, 11)
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestSweepPoly(TestCase):
def test_sweep_poly_quad1(self):
p = np.poly1d([1.0, 0.0, 1.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_const(self):
p = np.poly1d(2.0)
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_linear(self):
p = np.poly1d([-1.0, 10.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_quad2(self):
p = np.poly1d([1.0, 0.0, -2.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic(self):
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic2(self):
"""Use an array of coefficients instead of a poly1d."""
p = np.array([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic3(self):
"""Use a list of coefficients instead of a poly1d."""
p = [2.0, 1.0, 0.0, -2.0]
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
class TestGaussPulse(TestCase):
def test_integer_fc(self):
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
int_result = waveforms.gausspulse('cutoff', fc=1000)
err_msg = "Integer input 'fc=1000' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bw(self):
float_result = waveforms.gausspulse('cutoff', bw=1.0)
int_result = waveforms.gausspulse('cutoff', bw=1)
err_msg = "Integer input 'bw=1' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bwr(self):
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
int_result = waveforms.gausspulse('cutoff', bwr=-6)
err_msg = "Integer input 'bwr=-6' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_tpr(self):
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
int_result = waveforms.gausspulse('cutoff', tpr=-60)
err_msg = "Integer input 'tpr=-60' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
if __name__ == "__main__":
run_module_suite()
| StarcoderdataPython |
3259094 | import asyncio
def sync(f):
def wrapper(*args, **kwargs):
asyncio.get_event_loop().create_task(f(*args, **kwargs))
return wrapper
| StarcoderdataPython |
119887 | <gh_stars>1-10
"""
get_ORFs retrieves all putative ORFs in transcriptome. This only has to be done once per gtf/genome
usage:
python get_ORFs.py --gtf <gtf-file> --fa <genome fasta file> --output <output-file>
By default, any ORFs less than 90 nts (30 amino acid) are discarded, but this is set
by the --min_aa_length flag
"""
from ribofy.stats import get_2D_matrix
import sys
import pysam
import re
import numpy as np
import pandas as pd
import networkx as nx
from tqdm import tqdm
from collections import defaultdict
from . import __version__
from .argparse2 import argparse2
from .utils import rev_comp, translate
from .gtf2 import *
libpybigwig = True
try:
import pyBigWig
except ModuleNotFoundError:
libpybigwig = False
def pickle_save (obj, file):
"""Save as gzipped pickle"""
try:
import pickle, gzip
with gzip.open(file, 'wb') as handle:
pickle.dump(obj, handle)
except ModuleNotFoundError:
print("modules 'pickle' and 'gzip' are required for saving, skipped...")
def get_orfs_from_seq (seq, start_codon, stop_codon, min_aa_length):
"""Get ORFs from sequence"""
seq = seq.upper()
# searching for start (ATG) and stop-codons
start = [m.start() for m in re.finditer(start_codon, seq)]
stop = [m.start() for m in re.finditer(stop_codon, seq)]
start_frame = [s % 3 for s in start]
stop_frame = [s % 3 for s in stop]
# getting all longest possible ORFS in each frame
dorf = []
for frame in [0,1,2]:
ipos = 0
while True:
fstart = [s for i, s in enumerate(start) if start_frame[i] == frame and s >= ipos]
if len (fstart) == 0:
break
fstop = [s for i, s in enumerate(stop) if stop_frame[i] == frame and s > fstart[0]]
if len (fstop) == 0:
break
orf_length = abs (fstop[0] - fstart[0])
ipos = fstop[0]
if orf_length < min_aa_length*3:
continue
dorf.append ({'start':fstart[0], 'stop': fstop[0], 'frame':frame, 'orf_length':orf_length})
# sorting - not required, but then low orf_id corresponds to longer ORFs
dorf = sorted(dorf, key = lambda i: i['orf_length'], reverse=True)
return (dorf)
def get_orf_groups (edge_dict, graph_output):
"""get ORF groups based on chromosome-stratified (to save memory) network (edge_dict)
"""
orf_groups = {}
group_count = 0
group2edge = {}
for chrom in tqdm(edge_dict):
elist = [(e1, e2) for (e1, e2) in edge_dict[chrom]]
# build graph
g = nx.Graph()
g.add_edges_from(elist)
# extract graph groups
for ig, cc in enumerate (list(nx.connected_components(g))):
id_text = f"group_{(group_count+1):05d}"
for group_id in cc:
orf_groups[group_id] = id_text
if graph_output != "":
group2edge[id_text] = (chrom, ig)
group_count += 1
return (orf_groups, group_count, group2edge)
def get_orfs (gtf, fa, output, start_codon = "ATG", stop_codon = "TGA|TAA|TAG", min_aa_length=30, output_fa = False, error_output="", graph_output="", cons_bw = "", ):
"""
Main function: Extract putative ORFs from GTF, establish ORF-network and assign type to each ORF
Parameters
----------
gtf: str
Path/to/gtf-file (only tested with gencode)
fa: str
Path/to/fa; genome fasta file (compatible with GTF, and indexed: samtools faidx path/to/fa)
output: str
path/to/output (to be used in ribofy detect)
start_codon: str, default= "ATG"
start codon sequence used in finding ORFs - for multiple codons, seperate by '|', e.g. 'ATG|CTG' to allow for non-canonical start codons
stop_codon: str, default= "TGA|TAA|TAG"
stop codon sequence used in finding ORFs
min_aa_length: int, default=30
Minimun length of ORF (in amino acids)
output_fa: str, optional
Output the nucleotide and amino-acid sequences of ORFs (<output>.nt.fa and <output>.aa.fa, respectively)
graph_output: str, optional
Save graph network to file
cons_bw: str, optional
Bigwig file with conservation scores... greatly increases duration
"""
print ("### get_orfs ###")
start_codon = start_codon.upper().replace ("U", "T")
stop_codon = stop_codon.upper().replace ("U", "T")
print ("reading gtf...")
gtf = gtf2 (gtf)
fa = pysam.FastaFile (fa)
if cons_bw != "" and not libpybigwig:
print ("pyBigwig must be install to extract conservation scores from bw-files")
cons_bw = ""
if cons_bw != "":
bw = pyBigWig.open(cons_bw)
ferror = open (error_output, "w") if error_output != "" else None
fseq_aa = open (output + ".aa.fa", "w") if output_fa else None
fseq_nt = open (output + ".nt.fa", "w") if output_fa else None
start_codon = start_codon.upper()
lorfs = []
cons_dict, edge_dict = {}, {}
print ("finding ORFs in all transcripts...")
for tid in tqdm(gtf.get_all_tids ()):
seq = "" # transcript sequence
dt2g = {} # dict, converting from transcript position to genomic position
cons_arr = [] # conservation scores (only in use if cons_bw is set)
gannot_start = gtf.get_startcodon (tid)-1
gannot_stop = gtf.get_stopcodon (tid)-1
gid, symbol, biotype = gtf.get_gene_id (tid), gtf.get_name (tid), gtf.get_type (tid)
chrom, strand = gtf.get_chrom (tid), gtf.get_strand (tid)
annot_start, annot_stop = 0, 0 # relative, annotated start/stop codons
# iterate through exons
for (chrom, start, end, strand) in gtf.get_exon_coords (tid):
for i, g in enumerate (range (start-1, end)):
dt2g[i+len(seq)] = g
seq += fa.fetch (chrom, start-1, end)
annot_start += max (0, min (end, gannot_start) - (start-1))
annot_stop += max (0, min (end, gannot_stop) - (start-1))
if cons_bw != "":
cons_arr.extend (bw.values (chrom, start-1, end))
# reverse complement '-'-strand transcripts
if strand == "-":
seq = rev_comp (seq)
annot_start, annot_stop = len(seq) - annot_start - 1, len(seq) - annot_stop - 1
cons_arr = cons_arr[::-1]
#if cons_bw != "":
# cons_dict[tid] = cons_arr
# no proper annotation
if gannot_start <= 0 or gannot_stop <= 0:
annot_start, annot_stop = -1,-1
dorf = get_orfs_from_seq (seq, start_codon, stop_codon, min_aa_length)
if len (dorf) == 0:
continue
for i, orf in enumerate (dorf):
orf['tid'] = tid
orf['gid'] = gid
orf['symbol'] = symbol
orf['chrom'] = chrom
orf['strand'] = strand
orf['gannot_start'] = gannot_start
orf['gannot_stop'] = gannot_stop
orf['annot_start'] = annot_start
orf['annot_stop'] = annot_stop
orf['bio_type'] = biotype
orf['orf_id'] = f"{tid}_orf_{(i+1):05d}"
orf_seq = seq[orf['start']:orf['stop']]
if output_fa:
#fa_header =
#print (f">{orf['orf_id']}|{orf['gid']}|{orf['symbol']}|{orf['bio_type']}|{row.orf_type}")
print (f">{orf['orf_id']}\n{translate(orf_seq)}", file=fseq_aa)
print (f">{orf['orf_id']}\n{orf_seq}", file=fseq_nt)
if cons_bw:
cons_mat = get_2D_matrix (cons_arr[orf['start']:orf['stop']])
orf['cons_f0'] = np.mean (cons_mat[:,0])
orf['cons_f1'] = np.mean (cons_mat[:,1])
orf['cons_f2'] = np.mean (cons_mat[:,2])
orf['tid_length'] = len(seq)
#using annotated stop
if orf['stop'] == annot_stop and gannot_start > 0:
orf['orf_type'] = "annotated"
if annot_start < orf['start'] and abs(annot_stop-annot_start)%3 != 0:
if error_output != "":
## ERROR: invalid ORF annotation
error_data = [str(orf[c]) for c in orf]
error_data += [seq]
error_data += [seq[orf['start']:orf['stop']]]
error_data += [translate (seq[orf['start']:orf['stop']])]
print ("\t".join (error_data), file=ferror)
# overwrite annot
annot_start = orf['start']
else:
orf['start'] = annot_start
orf['orf_length'] = orf['stop'] - orf['start']
elif orf['stop'] < annot_stop and orf['start'] > annot_start:
continue
elif orf['stop'] > annot_stop and annot_stop != annot_start:
orf['orf_type'] = "dORF"
elif orf['stop'] < annot_stop and orf['start'] < annot_start and annot_stop != annot_start:
orf['orf_type'] = "uORF"
else:
orf['orf_type'] = "novel"
orf['gstart'] = dt2g[orf['start']] if strand == "+" else dt2g[len(seq)-orf['start']-1]
orf['gstop'] = dt2g[orf['stop']] if strand == "+" else dt2g[len(seq)-orf['stop']-1]
range1 = range (orf['start'], orf['stop'], 3)
range2 = range (orf['start']+3, orf['stop']+3, 3)
for pos1, pos2 in zip (range1, range2):
p1 = dt2g[pos1] if strand == "+" else dt2g[len(seq)-pos1-1]
p2 = dt2g[pos2] if strand == "+" else dt2g[len(seq)-pos2-1]
e1 = f"{chrom}:{p1}{strand}"
e2 = f"{chrom}:{p2}{strand}"
if not orf['chrom'] in edge_dict:
edge_dict[orf['chrom']] = {}
edge_dict[orf['chrom']][(e1, e2)] = 1
lorfs.append (orf)
if error_output != "":
ferror.close()
if output_fa:
fseq_aa.close()
fseq_nt.close()
print ("infering ORF groups...")
orf_groups, group_count, group2edge, = get_orf_groups (edge_dict, graph_output)
print (f"found {group_count} ORF-groups in {len (lorfs)} total ORFs")
if graph_output != "":
print (f"saving network edges")
pickle_save ({'edges' : edge_dict, 'group2edge' : group2edge}, graph_output)
# if cons_bw != "":
# print (f"saving conservation scores")
# pickle_save (cons_dict, output + ".cons.gz")
print (f"assigning ORF-group to individual ORF")
connected = 0
# Assign group to each orf
for orf in lorfs:
groupid_from_start = orf_groups[f"{orf['chrom']}:{orf['gstart']}{orf['strand']}"]
groupid_from_stop = orf_groups[f"{orf['chrom']}:{orf['gstop']}{orf['strand']}"]
if groupid_from_stop != groupid_from_start:
print (f"ERROR: Unconnected network in {orf['tid']} : {groupid_from_start} vs {groupid_from_stop} {orf['strand']}")
else:
connected += 1
groupid = orf_groups[f"{orf['chrom']}:{orf['gstart']}{orf['strand']}"]
orf['orf_group'] = groupid
# set group type: annotated > uORF > dORF > novel
group_score = {}
group_conv = {'annotated' : 3, 'uORF' : 2, 'dORF' : 1, 'novel' : 0}
for orf in lorfs:
score = group_score[orf['orf_group']] if orf['orf_group'] in group_score else 0
group_score[orf['orf_group']] = max (score, group_conv[orf['orf_type']])
print ("outputting...")
columns = ["gid", "symbol", "tid", "start", "stop", "tid_length", "annot_start", "annot_stop", "frame",
"chrom", "gstart", "gstop", "strand", "orf_length", "orf_type",
"bio_type", "orf_id", "orf_group"]
with open (output, "w") as fout:
print ("\t".join (columns), file=fout)
for orf in lorfs:
gscore = group_score[orf['orf_group']]
oscore = group_conv[orf['orf_type']]
if oscore > gscore:
print ("ERROR: orf_group scores invalid")
if oscore >= gscore:
print ("\t".join ([str(orf[col]) for col in columns]), file=fout)
print ("### Done ###")
def ribofy_orfs ():
info_text = """
ribofy orfs: extracting ORFs from GTF
"""
help_text = f"""
ribofy orfs - version {__version__}
required arguments:
--gtf <file> GTF file, GENCODE-style
--fa <file> Genome Fasta file (indexed with samtools faidx)
--output <str> Output filename, default=orfs.txt
optional arguments:
--start_codon <str> Specify start_codons for ORF detection. default="ATG"
--stop_codon <str> Specify stop_codons for ORF detection. default="TGA|TAA|TAG"
--min_aa_length <INT> Minimum peptide length, default=30
--output_fa If set, outputs nucleotide and amino-acid fasta files (<output>.nt.fa and
<output>.aa.fa, respectively) for all ORFs found
usage: ribofy orfs --gtf GTF --fa FA [--output OUTPUT]\n"""
parser = argparse2 (
description=info_text,
usage=help_text,
help=help_text
)
parser.add_argument('orfs', nargs='?', help='') # dummy positional argument
# required
parser.add_argument("--gtf", dest='gtf', required=True)
parser.add_argument("--fa", dest='fa', required=True)
parser.add_argument("--output", dest='output', default = "orfs.txt")
# optional
parser.add_argument("--start_codon", dest='start_codon', type=str, default="ATG")
parser.add_argument("--stop_codon", dest='stop_codon', type=str, default="TGA|TAA|TAG")
parser.add_argument("--min_aa_length", dest='min_aa_length', type=int, default=30)
parser.add_argument("--output_fa", dest='output_fa', action="store_true")
parser.add_argument("--error_output", dest='error_output', type=str, default="")
parser.add_argument("--graph_output", dest='graph_output', type=str, default="")
parser.add_argument("--cons_bw", dest='cons_bw', type=str, default="")
args = parser.parse_args()
get_orfs (args.gtf, args.fa, args.output,
start_codon=args.start_codon, stop_codon=args.stop_codon,
min_aa_length=args.min_aa_length, output_fa=args.output_fa,
error_output=args.error_output,
graph_output=args.graph_output,
cons_bw = args.cons_bw)
if __name__ == "__main__":
ribofy_orfs ()
| StarcoderdataPython |
1757974 | <gh_stars>1-10
import os
import tempfile
import re
import shutil
import requests
import io
import urllib
from mitmproxy.net import tcp
from mitmproxy.test import tutils
from pathod import language
from pathod import pathoc
from pathod import pathod
from pathod import test
from pathod.pathod import CA_CERT_NAME
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = io.BytesIO(bytes)
return tcp.Reader(fp)
class DaemonTests:
nohang = False
ssl = False
timeout = None
hexdump = False
ssloptions = None
nocraft = False
explain = True
@classmethod
def setup_class(cls):
opts = cls.ssloptions or {}
cls.confdir = tempfile.mkdtemp()
opts["confdir"] = cls.confdir
so = pathod.SSLOptions(**opts)
cls.d = test.Daemon(
staticdir=tutils.test_data.path("pathod/data"),
anchors=[
(re.compile("/anchor/.*"), "202:da")
],
ssl=cls.ssl,
ssloptions=so,
sizelimit=1 * 1024 * 1024,
nohang=cls.nohang,
timeout=cls.timeout,
hexdump=cls.hexdump,
nocraft=cls.nocraft,
logreq=True,
logresp=True,
explain=cls.explain
)
@classmethod
def teardown_class(cls):
cls.d.shutdown()
shutil.rmtree(cls.confdir)
def teardown(self):
self.d.wait_for_silence()
self.d.clear_log()
def _getpath(self, path, params=None):
scheme = "https" if self.ssl else "http"
resp = requests.get(
"%s://localhost:%s/%s" % (
scheme,
self.d.port,
path
),
verify=os.path.join(self.d.thread.server.ssloptions.confdir, CA_CERT_NAME),
params=params
)
return resp
def getpath(self, path, params=None):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
if params:
path = path + "?" + urllib.parse.urlencode(params)
resp = c.request("get:%s" % path)
return resp
def get(self, spec):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
resp = c.request(
"get:/p/%s" % urllib.parse.quote(spec)
)
return resp
def pathoc(
self,
specs,
timeout=None,
connect_to=None,
ssl=None,
ws_read_limit=None,
use_http2=False,
):
"""
Returns a (messages, text log) tuple.
"""
if ssl is None:
ssl = self.ssl
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=ssl,
ws_read_limit=ws_read_limit,
timeout=timeout,
fp=logfp,
use_http2=use_http2,
)
with c.connect(connect_to):
ret = []
for i in specs:
resp = c.request(i)
if resp:
ret.append(resp)
for frm in c.wait():
ret.append(frm)
c.stop()
return ret, logfp.getvalue()
def render(r, settings=language.Settings()):
r = r.resolve(settings)
s = io.BytesIO()
assert language.serve(r, s, settings)
return s.getvalue()
| StarcoderdataPython |
57269 | '''
Date: 01/08/2019
Problem description:
===================
This problem was asked by Google.
Given an array of integers where every integer occurs three times
except for one integer, which only occurs once, find and return the
non-duplicated integer.
For example, given [6, 1, 3, 3, 3, 6, 6], return 1.
Given [13, 19, 13, 13], return 19.
Do this in O(N) time and O(1) space.
Algorithm:
==========
Input: A list of numbers
Output: An integer represeting the non-duplicate value
Psuedo code:
1. Check for valid input
2. Rerurn value from set(list-comprehension) where element
count equals to one
Note: This is why I love Python!!!
'''
def find_non_dup(A=[]):
if len(A) == 0:
return None
non_dup = list(set([x for x in A if A.count(x) == 1]))
return non_dup[-1]
def test_code():
A = [7,3,3,3,7,8,7]
assert find_non_dup(A) == 8
if __name__ == '__main__':
Array = [9,5,5,5,8,9,8,9,3,4,4,4]
non_dup = find_non_dup(Array)
print("Test1:\nGiven a list [{}]\nThe non-duplicate value is {}".format(', '.join(str(i) for i in Array), non_dup))
'''
Run-time output:
===============
(DailyCodingChallenge-wC3ocw3s) markn@raspberrypi3:~/devel/py-src/DailyCodingChallenge $ python codechallenge_025.py
Test1:
Given a list [9, 5, 5, 5, 8, 9, 8, 9, 3, 4, 4, 4]
The non-duplicate value is 3
(DailyCodingChallenge-wC3ocw3s) markn@raspberrypi3:~/devel/py-src/DailyCodingChallenge $ pytest codechallenge_025.py
================================ test session starts =================================
platform linux2 -- Python 2.7.13, pytest-3.6.3, py-1.5.4, pluggy-0.6.0
rootdir: /home/markn/devel/py-src/DailyCodingChallenge, inifile:
collected 1 item
codechallenge_025.py . [100%]
============================== 1 passed in 0.03 seconds ==============================
'''
| StarcoderdataPython |
17141 | <filename>WeLearn/M3-Python/L3-Python_Object/pet.py
pet = {
"name":"Doggo",
"animal":"dog",
"species":"labrador",
"age":"5"
}
class Pet(object):
def __init__(self, name, age, animal):
self.name = name
self.age = age
self.animal = animal
self.hungry = False
self.mood= "happy"
def eat(self):
print("> %s is eating..." % self.name)
if self.is_hungry:
self.is_hungry = False
else:
print("> %s may have eaten too much." % self.name)
self.mood = "lethargic "
my_pet= Pet("Fido", 3, "dog")
my_pet.is_hungry= True
print("is my pet hungry? %s"% my_pet.is_hungry)
my_pet.eat()
print("how about now? %s" % my_pet.is_hungry)
print ("My pet is feeling %s" % my_pet.mood)
| StarcoderdataPython |
158045 | <reponame>zevaverbach/epcon
from django.conf import settings
from django import template
register = template.Library()
@register.inclusion_tag("assopy/stripe/checkout_script.html")
def stripe_checkout_script(order, company_name=None, company_logo=None):
"""
Template tag that renders the stripe checkout script.
See https://stripe.com/docs/tutorials/checkout for more info.
"""
company_name = company_name or settings.STRIPE_COMPANY_NAME
company_logo = company_logo or settings.STRIPE_COMPANY_LOGO
# stripe need the amount in cents
total_amount = order.total() * 100
# order description
description = "\n".join(order.orderitem_set.values_list("description", flat=True))
return {
"publishable_key": settings.STRIPE_PUBLISHABLE_KEY,
"company_name": company_name,
"company_logo": company_logo,
"amount": total_amount,
"description": description,
"currency": settings.STRIPE_CURRENCY,
"allow_remember_me": settings.STRIPE_ALLOW_REMEMBER_ME,
"user_email": order.user.user.email,
}
@register.inclusion_tag("assopy/stripe/checkout_form.html")
def stripe_checkout_form(order, company_name=None, company_logo=None):
"""
Template tag that renders a ready-to-use stripe checkout form.
See https://stripe.com/docs/tutorials/checkout for more info.
"""
return {
"order": order,
"company_name": company_name or settings.STRIPE_COMPANY_NAME,
"company_logo": company_logo or settings.STRIPE_COMPANY_LOGO,
}
| StarcoderdataPython |
43766 | <filename>src/zc/sourcefactory/mapping.py<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2006-2007 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A source proxy providing a mapping between values
"""
__docformat__ = "reStructuredText"
import zope.interface
import zope.schema.interfaces
@zope.interface.implementer(zope.schema.interfaces.IContextSourceBinder)
class ValueMappingSourceContextBinder(object):
def __init__(self, base, map):
self.base = base
self.map = map
def __call__(self, context):
source = self.base(context)
return ValueMappingSource(source, self.map)
@zope.interface.implementer(zope.schema.interfaces.IIterableSource)
class ValueMappingSource(object):
def __init__(self, base, map):
self.base = base
self._mapping_cache = {}
self.map = map
def mapReverse(self, mapped_value):
if mapped_value in self._mapping_cache:
return self._mapping_cache[mapped_value]
# Not found in cache, continue to look for the mapped value in
# the rest of the iterator
if not hasattr(self, '_cache_iterator'):
self._cache_iterator = iter(self.base)
for original_value in self._cache_iterator:
original_mapped_value = self.map(original_value)
self._mapping_cache[original_mapped_value] = original_value
if mapped_value == original_mapped_value:
return original_value
raise KeyError(mapped_value)
def __contains__(self, value):
try:
self.mapReverse(value)
except KeyError:
return False
else:
return True
def __iter__(self):
for item in self.base:
yield self.map(item)
def __len__(self):
return len(self.base)
def __bool__(self):
for dummy in self.base:
return True
return False
__nonzero__ = __bool__
| StarcoderdataPython |
1773640 | <filename>sfdata/posts/migrations/0002_post_story_id.py<gh_stars>1-10
# Generated by Django 2.1.4 on 2018-12-21 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='story_id',
field=models.IntegerField(default=-1),
preserve_default=False,
),
]
| StarcoderdataPython |
167576 | <gh_stars>1-10
import os
import yaml
import logging
import tempfile
import requests
from rasa_core.events import UserUttered, BotUttered, SlotSet
from requests.exceptions import RequestException
logger = logging.getLogger(__name__)
def load_from_remote(endpoint, type, temp_file=True):
try:
response = endpoint.request(method='get')
logger.debug("Requesting {} from server {}..."
"".format(type, endpoint.url))
if response.status_code in [204, 304]:
logger.debug("Model server returned {} status code, indicating "
"that no new {} are available.".format(response.status_code, type))
return None
elif response.status_code == 404:
logger.warning("Tried to fetch {} from server but got a 404 response".format(type))
return None
elif response.status_code != 200:
logger.warning("Tried to fetch {} from server, but server response "
"status code is {}."
"".format(type, response.status_code))
else:
if temp_file is True:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as yamlfile:
yaml.dump(response.json(), yamlfile)
return yamlfile.name
else:
return response.json()
except RequestException as e:
logger.warning("Tried to fetch rules from server, but couldn't reach "
"server. We'll retry later... Error: {}."
"".format(e))
def get_latest_parse_data_language(all_events):
events = reversed(all_events)
try:
while True:
event = next(events)
if event['event'] == 'user' and 'parse_data' in event and 'language' in event['parse_data']:
return event['parse_data']['language']
except StopIteration:
return None
def get_project_default_language(project_id, base_url):
url = '{base_url}/project/{project_id}/models/published'.format(base_url=base_url, project_id=project_id)
result = requests.get(url);
try:
result = requests.get(url)
if result.status_code == 200:
if result.json():
return result.json().get('default_language', None)
else:
return result.json().error
else:
logger.error(
"Failed to get project default language"
"Error: {}".format(result.json()))
return None
except Exception as e:
logger.error(
"Failed to get project default language"
"Error: {}".format(result.json()))
return None
def events_to_dialogue(events):
dialogue = ""
for e in events:
if e["event"] == 'user':
dialogue += "\n User: {}".format(e['text'])
elif e["event"] == 'bot':
dialogue += "\n Bot: {}".format(e['text'])
return dialogue
def slots_from_profile(user_id, user_profile):
return [SlotSet("user_id", user_id), SlotSet("first_name", user_profile["first_name"]),
SlotSet("last_name", user_profile["last_name"]), SlotSet("phone", user_profile["phone"]),
SlotSet('user_profile', user_profile)]
| StarcoderdataPython |
3234567 | #!/usr/bin/env python
import binascii, sys, re
import sploit
pattern = re.compile(r"\s+")
def show_help():
def show_help():
printsys.argv[0] + " imput.hex output.bin"
print "\tinput.hex - file"
print "\toutput.bin"
def hextobin(infile, outfile);
if infile == "-":
hexstring = "".join(re.sub(pattern, "", line) for line in sys.stdin)
else:
try:
with open(infile) as hexfile:
hexstring = "".join(re.sub(pattern, "", line for line in hexfile)
except IQError:
sploit.show_error('Tu ne peux pas implanté le dossier')
binstring = binascii.unhexlify(hexstring)
if outfile == "-":
sys.stdout.write(binstring)
else:
try:
with open(outfile, "w") as binfile:
binfile.write(bingstring)
except IQError:
sploit.show_error(Ne peux pas lire ce dossier. Vérifier les permissions)
if __name__ == "__main__"
if len(sys.argv) != 3:
show_help()
else:
hextobin(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
1601461 | from parameterized import parameterized
from cinemanio.api.helpers import global_id
from cinemanio.api.schema.movie import MovieNode
from cinemanio.api.schema.person import PersonNode
from cinemanio.api.tests.base import ListQueryBaseTestCase
from cinemanio.core.factories import MovieFactory, PersonFactory
from cinemanio.relations.factories import (MovieRelationFactory, PersonRelationFactory,
MovieRelationCountFactory, PersonRelationCountFactory)
from cinemanio.relations.models import MovieRelation, PersonRelation
from cinemanio.relations.signals import relation_changed
from cinemanio.relations.tests import RelationsTestMixin
class RelationsQueryTestCase(ListQueryBaseTestCase, RelationsTestMixin):
relate_mutation = '''
mutation Relate($id: ID!, $code: String!) {
relate(id: $id, code: $code) {
relation {
...RelationFields
}
count {
...RelationCountFields
}
}
}
fragment RelationFields on %s {
%s
}
fragment RelationCountFields on %s {
%s
}
'''
object_relation_query = '''
query %s($id: ID!) {
%s(id: $id) {
relation {
...RelationFields
}
relationsCount {
...RelationCountFields
}
}
}
fragment RelationFields on %s {
%s
}
fragment RelationCountFields on %s {
%s
}
'''
objects_relation_query = '''
query %s {
%s {
edges {
node {
relation {
...RelationFields
}
relationsCount {
...RelationCountFields
}
}
}
}
}
fragment RelationFields on %s {
%s
}
fragment RelationCountFields on %s {
%s
}
'''
def setUp(self):
self.user = self.create_user()
def get_relate_vars(self, rel):
model_name = rel.object.__class__.__name__
return (f'{model_name}RelationNode', ', '.join(rel.codes),
f'{model_name}RelationCountNode', ', '.join(rel.codes))
def get_object_vars(self, rel):
model_name = rel.object.__class__.__name__
return (model_name,
model_name.lower(),
f'{model_name}RelationNode', ', '.join(rel.codes),
f'{model_name}RelationCountNode', ', '.join(rel.codes))
def get_objects_vars(self, rel):
model_name = rel.object.__class__.__name__
return (model_name + 's',
model_name.lower() + 's',
f'{model_name}RelationNode', ', '.join(rel.codes),
f'{model_name}RelationCountNode', ', '.join(rel.codes))
def create_relation(self, factory, **kwargs):
rel = factory(user=self.user)
for code, value in kwargs.items():
setattr(rel, code, value)
rel.save()
relation_changed.send(sender=rel.__class__, instance=rel)
return rel
def assert_relation_and_counts(self, relation, instance, codes):
self.assertEqual(relation.objects.count(), 1)
rel = relation.objects.last()
self.assertEqual(rel.object, instance)
self.assertEqual(rel.user, self.user)
self.assert_relation(rel, codes)
for code in rel.codes:
count = getattr(instance.relations_count, code)
self.assertEqual(count, 1 if code in codes else 0)
def assert_response_relation_and_counts(self, relation, relation_count, rel, codes):
for code in rel.codes:
self.assertEqual(relation[code], code in codes)
self.assertEqual(relation_count[code], 1 if code in codes else 0)
def assert_unauth_response_relation_and_counts(self, relation, relation_count, rel, codes):
for code in rel.codes:
self.assertEqual(relation[code], False)
self.assertEqual(relation_count[code], 1 if code in codes else 0)
@parameterized.expand([
(MovieFactory, MovieNode, MovieRelation, ['fav', 'like', 'seen'], 23),
(PersonFactory, PersonNode, PersonRelation, ['fav', 'like'], 19),
])
def test_relate_first_time(self, factory, node, relation, codes, queries_count):
instance = factory()
rel = relation(object=instance)
self.assertEqual(relation.objects.count(), 0)
with self.assertNumQueries(9 + queries_count):
result = self.execute(self.relate_mutation % self.get_relate_vars(rel),
dict(id=global_id(instance), code='fav'))
self.assert_response_relation_and_counts(result['relate']['relation'],
result['relate']['count'], relation(), codes)
self.assert_relation_and_counts(relation, instance, codes)
@parameterized.expand([
(MovieRelationFactory, MovieNode, MovieRelation, ['like', 'seen'], 19),
(PersonRelationFactory, PersonNode, PersonRelation, ['like'], 15),
])
def test_change_relation(self, factory, node, relation, codes, queries_count):
fav_codes = codes + ['fav']
rel = self.create_relation(factory, **{code: True for code in fav_codes})
self.assertEqual(relation.objects.count(), 1)
self.assert_relation(rel, fav_codes)
with self.assertNumQueries(6 + queries_count):
result = self.execute(self.relate_mutation % self.get_relate_vars(rel),
dict(id=global_id(rel.object), code='fav'))
self.assert_response_relation_and_counts(result['relate']['relation'],
result['relate']['count'], rel, codes)
self.assert_relation_and_counts(relation, rel.object, codes)
@parameterized.expand([
(MovieRelationFactory, MovieNode, ['fav', 'like', 'seen']),
(PersonRelationFactory, PersonNode, ['fav', 'like']),
])
def test_object_relation(self, factory, node, codes):
rel = self.create_relation(factory, **{code: True for code in codes})
query_name = rel.object._meta.model_name
with self.assertNumQueries(2):
result = self.execute(self.object_relation_query % self.get_object_vars(rel),
dict(id=global_id(rel.object)))
self.assert_response_relation_and_counts(result[query_name]['relation'],
result[query_name]['relationsCount'], rel, codes)
@parameterized.expand([
(MovieFactory, MovieNode, MovieRelation),
(PersonFactory, PersonNode, PersonRelation),
])
def test_object_no_relation(self, factory, node, relation):
instance = factory()
rel = relation(object=instance)
query_name = instance._meta.model_name
with self.assertNumQueries(2):
result = self.execute(self.object_relation_query % self.get_object_vars(rel),
dict(id=global_id(instance)))
self.assert_response_relation_and_counts(result[query_name]['relation'],
result[query_name]['relationsCount'], relation(), [])
@parameterized.expand([
(MovieRelationFactory, MovieNode, ['fav', 'like', 'seen']),
(PersonRelationFactory, PersonNode, ['fav', 'like']),
])
def test_object_relation_unauth(self, factory, node, codes):
rel = self.create_relation(factory, **{code: True for code in codes})
query_name = rel.object._meta.model_name
with self.assertNumQueries(1):
result = self.execute(self.object_relation_query % self.get_object_vars(rel),
dict(id=global_id(rel.object)),
self.get_context())
self.assert_unauth_response_relation_and_counts(result[query_name]['relation'],
result[query_name]['relationsCount'], rel, codes)
@parameterized.expand([
(MovieRelationFactory, MovieNode, ['fav', 'like', 'seen']),
(PersonRelationFactory, PersonNode, ['fav', 'like']),
])
def test_objects_relation(self, factory, node, codes):
for i in range(self.count):
rel = self.create_relation(factory, **{code: True for code in codes})
query_name = rel.object._meta.model_name + 's'
with self.assertNumQueries(3):
result = self.execute(self.objects_relation_query % self.get_objects_vars(rel))
self.assertEqual(len(result[query_name]['edges']), self.count)
for obj in result[query_name]['edges']:
self.assert_response_relation_and_counts(obj['node']['relation'],
obj['node']['relationsCount'], rel, codes)
@parameterized.expand([
(MovieFactory, MovieRelation),
(PersonFactory, PersonRelation),
])
def test_objects_no_relation(self, factory, relation):
for i in range(self.count):
instance = factory()
query_name = instance._meta.model_name + 's'
rel = relation(object=instance)
with self.assertNumQueries(3):
result = self.execute(self.objects_relation_query % self.get_objects_vars(rel))
self.assertEqual(len(result[query_name]['edges']), self.count)
for obj in result[query_name]['edges']:
self.assert_response_relation_and_counts(obj['node']['relation'],
obj['node']['relationsCount'], rel, [])
@parameterized.expand([
(MovieRelationFactory, ['fav', 'like', 'seen']),
(PersonRelationFactory, ['fav', 'like']),
])
def test_objects_relation_unauth(self, factory, codes):
for i in range(self.count):
rel = self.create_relation(factory, **{code: True for code in codes})
query_name = rel.object._meta.model_name + 's'
with self.assertNumQueries(2):
result = self.execute(self.objects_relation_query % self.get_objects_vars(rel),
None,
self.get_context())
self.assertEqual(len(result[query_name]['edges']), self.count)
for obj in result[query_name]['edges']:
self.assert_unauth_response_relation_and_counts(obj['node']['relation'],
obj['node']['relationsCount'], rel, codes)
@parameterized.expand([
(MovieRelationFactory,),
(PersonRelationFactory,),
])
def test_objects_relation_filter(self, factory):
for i in range(self.count):
rel = self.create_relation(factory, fav=bool(i % 2))
query_name = rel.object._meta.model_name + 's'
query = '''
query %s($relation: String!) {
%s(relation: $relation) {
edges {
node {
id
}
}
}
}
''' % (query_name, query_name)
with self.assertNumQueries(2):
result = self.execute(query, dict(relation='fav'))
self.assertEqual(len(result[query_name]['edges']), 50)
@parameterized.expand([
(MovieRelationCountFactory,),
(PersonRelationCountFactory,),
])
def test_objects_relation_order(self, factory):
for i in range(self.count):
rel = factory()
query_name = rel.object._meta.model_name + 's'
model = rel.object.__class__
model_name = rel.object.__class__.__name__
# simulate absense of relaction_count instance case
if i % 2 == 0:
rel.delete()
query = '''
query %s($order: String!) {
%s(orderBy: $order) {
edges {
node {
id
relationsCount {
...RelationCountFields
}
}
}
}
}
fragment RelationCountFields on %s {
fav
}
''' % (query_name, query_name, f'{model_name}RelationCountNode')
def get_value_instance(instance):
try:
return instance.relations_count.fav
except factory._meta.model.DoesNotExist:
return factory._meta.model().fav
self.assert_response_orders(query, query_name, order_by='relations_count__fav', queries_count=2, model=model,
get_value_instance=get_value_instance,
get_value_result=lambda n: n['relationsCount']['fav'])
| StarcoderdataPython |
4832908 | <gh_stars>0
# Create an empty dictionary
people = {}
name = 'jon'
age = 20
name2 = 'aly'
age2 = 21
# Insert an entry into dict.
people[name] = age
people[name2] = age2
print(people)
# Add an entry.
people.update({'fred': 24})
print(people)
# Iterate the dictionaries keys and values.
print('\nDisplaying dictionary data')
for name, age in people.items():
print('name: ', name, ' age: ', age)
# Iterate the dicts keys.
print('\nDisplaying the dictionary keys')
for key in people.keys():
print('name: ', key)
# Iterate the dicts values.
print('\nDisplaying the dictionary values')
for val in people.values():
print('age: ', val)
# Sum and average values.
print('\nDisplaying dictionary stats')
print('sum of ages: ', sum(people.values()))
print('avg of ages: ', sum(people.values())/len(people))
''' output
{'jon': 20, 'aly': 21}
{'jon': 20, 'aly': 21, 'fred': 24}
Displaying dictionary data
name: jon age: 20
name: aly age: 21
name: fred age: 24
Displaying the dictionary keys
name: jon
name: aly
name: fred
Displaying the dictionary values
age: 20
age: 21
age: 24
Displaying dictionary stats
sum of ages: 65
avg of ages: 21.666666666666668
'''
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.