id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5109019 | <reponame>hupili/legco-watch
from django.forms import Form, BaseForm, fields_for_model, BooleanField
class OverrideForm(Form):
deactivate = BooleanField()
@classmethod
def from_model(cls, instance, form_kwargs=None):
# Create a Form from a model instance. The form has fields for all of the fields in the model that are
# overridable
# Get the fields and attach it to the new form instance
if form_kwargs is None:
form_kwargs = {}
fields = [xx.name for xx in instance.get_overridable_fields()]
fields = fields_for_model(instance._meta.model, fields=fields)
new_obj = cls(**form_kwargs)
new_obj.fields = fields
return new_obj
| StarcoderdataPython |
8184084 | from attachments.models import Attachment
from django.contrib.contenttypes import admin
class AttachmentInlines(admin.GenericStackedInline):
model = Attachment
extra = 1 | StarcoderdataPython |
4851130 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from netaddr import IPAddress, IPSet
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils.module_loading import import_string
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
DEFAULT_GETTER = 'middlewall.utils.get_remote_addr'
def get_ipaddr(request):
path = getattr(settings, 'MIDDLEWALL_ADDRESS_GETTER', DEFAULT_GETTER)
func = import_string(path)
return IPAddress(func(request))
def get_ipset_from_settings(name):
return IPSet(getattr(settings, name, []))
class WhitelistMiddleware(MiddlewareMixin):
def process_request(self, request):
whitelist = get_ipset_from_settings('MIDDLEWALL_WHITELIST')
if not get_ipaddr(request) in whitelist:
raise PermissionDenied
class BlacklistMiddleware(MiddlewareMixin):
def process_request(self, request):
blacklist = get_ipset_from_settings('MIDDLEWALL_BLACKLIST')
if get_ipaddr(request) in blacklist:
raise PermissionDenied
| StarcoderdataPython |
5142195 | Cassandra
=========
The Cassandra API is under development - coming soon!
| StarcoderdataPython |
6583810 | #!/usr/bin/env python3
# .. _`workbook_csv`:
#
#
# CSV Workbook
# ---------------
#
# ::
import csv
import logging
import pprint
from stingray.workbook.base import Workbook
import stingray.sheet
import stingray.cell
# .. py:module:: workbook.csv
#
# .. py:class:: CSV_Workbook
#
# Extract sheets, rows and cells from a CSV file.
#
# A wrapper for :py:func:`csv.reader`. This will create proper
# :py:class:`cell.TextCell` instances instead of the default string values
# that :py:mod:`csv` normally creates.
#
# There's only a single sheet and it matches the filename.
#
# In addition to the superclass attributes, an additional unique
# attribute is introduced here.
#
# .. py:attribute:: rdr
#
# The csv reader for this file.
#
# ::
class CSV_Workbook( Workbook ):
"""Uses ``csv.reader``. There's one sheet only."""
def __init__( self, name, file_object=None, **kw ):
"""Prepare the workbook for reading.
:param name: File name
:param file_object: Optional file-like object. If omitted, the named file is opened.
If provided, it must be opened with newline='' to permit non-standard
line-endings.
The kw are passed to :py:func:`csv.reader`
to provide dialect information."""
super().__init__( name, file_object )
if self.file_obj:
self.the_file= None
self.rdr= csv.reader( self.file_obj, **kw )
else:
self.the_file = open( name, 'r', newline='' )
self.rdr= csv.reader( self.the_file, **kw )
# We can build an eager :py:class:`sheet.Row` or a :py:class:`sheet.LazyRow` from
# the available data.
# The eager Row includes the conversions. The :py:class:`sheet.LazyRow` defers
# the conversions until the callback to :py:meth:`workbook.base.Workbook.row_get`.
#
# .. py:method:: CSV_Workbook.rows_of( sheet )
#
# Iterator through all rows. The sheet is ignored.
#
# ::
def rows_of( self, sheet ):
"""An iterator over all rows of the named sheet.
For CSV files, the sheet.name is simply ignored.
"""
for data in self.rdr:
logging.debug( pprint.pformat( data, indent=4 ) )
row = stingray.sheet.Row( sheet, *(stingray.cell.TextCell(col,self) for col in data) )
yield row
# .. py:method:: CSV_Workbook.row_get( row, attribute )
#
# Concrete implementation to get an attribute's value from a given row.
#
# ::
def row_get( self, row, attribute ):
"""Create a Cell from the row's data."""
return row[attribute.position]
# Since :py:mod:`csv` is eager, returning an individual :py:class:`cell.TextCell`
# is easy.
| StarcoderdataPython |
8028476 | from .task6 import get_all_shifts
def test_get_all_shifts():
''' get_get_all_shifts should correctly shift strings '''
assert get_all_shifts('ABC') == ['ABC', 'BCD', 'CDE', 'DEF', 'EFG', 'FGH', 'GHI', 'HIJ', 'IJK', 'JKL', 'KLM', 'LMN', 'MNO', 'NOP', 'OPQ', 'PQR', 'QRS', 'RST', 'STU', 'TUV', 'UVW', 'VWX', 'WXY', 'XYZ', 'YZA', 'ZAB']
| StarcoderdataPython |
4801496 | <reponame>tho-wa/virushack
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 15:18:46 2020
@author: LB
response = requests.get(
'https://hystreet.com/api/locations/',
params={},
headers={'X-API-Token': '<KEY>'},
)
json_response = response.json()
| StarcoderdataPython |
8001832 | <reponame>fogathmann/TheLMA
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
"""
from pyramid.compat import iteritems_
from everest.entities.utils import get_root_aggregate
from everest.querying.specifications import eq
from thelma.interfaces import IRack
from thelma.tools.base import BaseTool
from thelma.tools.semiconstants import get_item_status_managed
__docformat__ = 'reStructuredText en'
__all__ = ['PlateCopier'
]
class PlateCopier(BaseTool):
NAME = 'Plate Copier'
def __init__(self, source_barcode, target_barcodes, transfer_volume,
parent=None):
BaseTool.__init__(self, parent=parent)
self.__source_barcode = source_barcode
self.__target_barcodes = target_barcodes.split(',')
self.__transfer_volume = float(transfer_volume) * 1e-6
def run(self):
src_rack = self.__get_rack(self.__source_barcode)
for tgt_bc in self.__target_barcodes:
tgt_rack = self.__get_rack(tgt_bc)
for pos, src_cnt_loc in iteritems_(src_rack.container_positions):
if not src_cnt_loc.container is None:
src_cnt = src_cnt_loc.container
if not src_cnt.sample is None:
src_smpl = src_cnt.sample
tgt_cnt = tgt_rack.container_positions[pos]
tgt_smpl = tgt_cnt.make_sample(self.__transfer_volume)
for sm in src_smpl.sample_molecules:
tgt_smpl.make_sample_molecule(sm.molecule,
sm.concentration)
tgt_rack.status = get_item_status_managed()
def __get_rack(self, barcode):
rack_agg = get_root_aggregate(IRack)
rack_agg.filter = eq(barcode=barcode)
return next(rack_agg.iterator())
| StarcoderdataPython |
1894585 | <reponame>cgoldberg/corey-projects<gh_stars>0
#!/usr/bin/env python
#
# Copyright (c) 2010-2011 <NAME> (http://goldb.org)
#
# This file is part of linux_metrics
#
# License :: OSI Approved :: MIT License:
# http://www.opensource.org/licenses/mit-license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
"""
disk_stat - Python Module for Disk Stats on Linux
requires:
- Python 2.6+
- Linux 2.6.x
functions:
- disk_busy(device, sample_duration=1)
- disk_reads_writes(device)
- disk_reads_writes_persec(device, sample_duration=1)
example:
#!/usr/bin/env python
import disk_stat
r, w = disk_stat.disk_reads_writes('sda')
print 'reads: %s' % r
print 'writes: %s' % w
print 'busy: %s%%' % disk_stat.disk_busy('sda', 5)
rps, wps = disk_stat.disk_reads_writes_persec('sda', 5)
print 'reads per sec: %s' % rps
print 'writes per sec: %s' % wps
"""
import time
def disk_busy(device, sample_duration=1):
"""Return disk busy percent."""
with open('/proc/diskstats') as f1:
with open('/proc/diskstats') as f2:
content1 = f1.read()
time.sleep(sample_duration)
content2 = f2.read()
sep = '%s ' % device
for line in content1.splitlines():
if sep in line:
io_ms1 = line.strip().split(sep)[1].split()[9]
break
for line in content2.splitlines():
if sep in line:
io_ms2 = line.strip().split(sep)[1].split()[9]
break
delta = int(io_ms2) - int(io_ms1)
total = sample_duration * 1000
busy_pct = 100 - (100 * (float(total - delta) / total))
return busy_pct
def disk_reads_writes(device):
"""Return number of disk (reads, writes)."""
with open('/proc/diskstats') as f:
content = f.read()
sep = '%s ' % device
for line in content.splitlines():
if sep in line:
fields = line.strip().split(sep)[1].split()
num_reads = int(fields[0])
num_writes = int(fields[4])
break
return num_reads, num_writes
def disk_reads_writes_persec(device, sample_duration=1):
"""Return number of disk (reads, writes) per sec during the sample_duration."""
with open('/proc/diskstats') as f1:
with open('/proc/diskstats') as f2:
content1 = f1.read()
time.sleep(sample_duration)
content2 = f2.read()
sep = '%s ' % device
for line in content1.splitlines():
if sep in line:
fields = line.strip().split(sep)[1].split()
num_reads1 = int(fields[0])
num_writes1 = int(fields[4])
break
for line in content2.splitlines():
if sep in line:
fields = line.strip().split(sep)[1].split()
num_reads2 = int(fields[0])
num_writes2 = int(fields[4])
break
reads_per_sec = (num_reads2 - num_reads1) / float(sample_duration)
writes_per_sec = (num_writes2 - num_writes1) / float(sample_duration)
return reads_per_sec, writes_per_sec
if __name__ == '__main__':
r, w = disk_reads_writes('sda')
print 'reads: %s' % r
print 'writes: %s' % w
print 'busy: %s%%' % disk_busy('sda', 5)
rps, wps = disk_reads_writes_persec('sda', 5)
print 'reads per sec: %s' % rps
print 'writes per sec: %s' % wps
| StarcoderdataPython |
181481 | <filename>src/glip/gl/input/__init__.py
from .keyboard import Key, ModifierKey, KeyboardShortcut, Keyboard
from .mouse import MouseButton, Mouse
| StarcoderdataPython |
4806933 | <reponame>dolbyio-samples/dolbyio-rest-apis-client-python
"""
dolbyio_rest_apis.communications.authentication
~~~~~~~~~~~~~~~
This module contains the functions to work with the authentication API.
"""
from deprecated import deprecated
from dolbyio_rest_apis.core.helpers import add_if_not_none
from dolbyio_rest_apis.communications.internal.http_context import CommunicationsHttpContext
from dolbyio_rest_apis.communications.internal.urls import get_api_v1_url, get_session_url
from .models import AccessToken
async def _get_access_token(
url: str,
consumer_key: str,
consumer_secret: str,
expires_in: int=None,
) -> AccessToken:
data = {
'grant_type': 'client_credentials',
}
add_if_not_none(data, 'expires_in', expires_in)
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_post_basic_auth(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
url=url,
data=data
)
return AccessToken(json_response)
async def get_api_access_token(
consumer_key: str,
consumer_secret: str,
expires_in: int=None,
) -> AccessToken:
r"""
To make any API call, you must acquire a JWT (JSON Web Token) format access token.
Make sure to use this API against https://api.voxeet.com/v1.
Note: Even though the OAuth terminology is used in the following APIs, they are not OAuth compliant.
See: https://docs.dolby.io/communications-apis/reference/get-bearer-token
Args:
consumer_key: Your Dolby.io Consumer Key.
consumer_secret: Your Dolby.io Consumer Secret.
expires_in: (Optional) Access token expiration time in seconds.
The maximum value is 2,592,000, indicating 30 days. If no value is specified, the default is 600,
indicating ten minutes.
Returns:
An :class:`AccessToken` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
return await _get_access_token(f'{get_api_v1_url()}/auth/token', consumer_key, consumer_secret, expires_in)
async def get_client_access_token(
consumer_key: str,
consumer_secret: str,
expires_in: int=None,
) -> AccessToken:
r"""
This API returns an access token that your backend can request on behalf of a client to initialize
the Dolby.io SDK in a secure way. Make sure to use this API against https://session.voxeet.com.
Note: Even though the OAuth2 terminology is used in the following APIs, they are not OAuth2 compliant.
See: https://docs.dolby.io/communications-apis/reference/get-client-access-token
Args:
consumer_key: Your Dolby.io Consumer Key.
consumer_secret: Your Dolby.io Consumer Secret.
expires_in: (Optional) Access token expiration time in seconds.
The maximum value is 2,592,000, indicating 30 days. If no value is specified, the default is 600,
indicating ten minutes.
Returns:
An :class:`AccessToken` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
return await _get_access_token(f'{get_session_url()}/oauth2/token', consumer_key, consumer_secret, expires_in)
@deprecated(reason='This API is no longer applicable for applications on the new Dolby.io Communications APIs platform.')
async def revoke_access_token(
consumer_key: str,
consumer_secret: str,
access_token: str,
) -> None:
r"""
Revokes the authentication token.
See: https://docs.dolby.io/communications-apis/reference/revoke-token
Args:
consumer_key: Your Dolby.io Consumer Key.
consumer_secret: Your Dolby.io Consumer Secret.
access_token: The access token to revoke.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
data = {
'access_token': access_token,
}
async with CommunicationsHttpContext() as http_context:
await http_context.requests_post_basic_auth(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
url=f'{get_session_url()}/oauth2/invalidate',
data=data
)
| StarcoderdataPython |
9756843 | """
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import List, Tuple, Any, Optional
import aiosqlite
async def execute_query(database_name: str, query: str, values: Tuple[Any, ...] = None) -> Optional[int]:
"""
A method that executes a sqlite3 statement.
Note: Use retrieve_query() for 'SELECT' statements.
Parameters:
database_name (str): The name of the database.
query (str): The statement to execute.
values (Tuple[Any, ...]): The values to insert into the query.
Returns:
(Optional[int]): The number of affect rows.
"""
values = values if values else tuple()
try:
async with aiosqlite.connect(database_name) as db:
affected = await db.execute(query, values)
await db.commit()
return affected.rowcount
except aiosqlite.Error as error:
print(f'aiosqlite execute error\n{query=}\n{error=}')
raise error
async def retrieve_query(database_name: str, query: str, values: Tuple[Any, ...] = None) -> List[Any]:
"""
A method that returns the result of a sqlite3 'SELECT' statement.
Note: Use execute_query() for non-'SELECT' statements.
Parameters:
database_name (str): The name of the database.
query (str): The statement to execute.
values (Tuple[Any, ...]): The values to insert into the query.
Returns:
(List[Any]): A list of sqlite3 row objects. Can be empty.
"""
values = values if values else tuple()
try:
async with aiosqlite.connect(database_name) as db:
async with db.execute(query, values) as cursor:
return await cursor.fetchall()
except aiosqlite.Error as error:
print(f'aiosqlite retrieve error\n{query=}\n{error=}')
raise error
| StarcoderdataPython |
8077544 | <reponame>CsatariGergely/openstack-fenix-doc-test
# Copyright (c) 2018 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from importlib import import_module
from novaclient import API_MAX_VERSION as nova_max_version
import novaclient.client as novaclient
from novaclient.exceptions import BadRequest
from oslo_log import log as logging
import time
from fenix.db import api as db_api
from fenix.utils.time import datetime_to_str
from fenix.utils.time import is_time_after_time
from fenix.utils.time import reply_time_str
from fenix.utils.time import time_now_str
from fenix.workflow.workflow import BaseWorkflow
LOG = logging.getLogger(__name__)
class Workflow(BaseWorkflow):
def __init__(self, conf, session_id, data):
super(Workflow, self).__init__(conf, session_id, data)
nova_version = 2.53
self.nova = novaclient.Client(nova_version, session=self.auth_session)
max_nova_server_ver = float(self.nova.versions.get_current().version)
max_nova_client_ver = float(nova_max_version.get_string())
if max_nova_server_ver > 2.53 and max_nova_client_ver > 2.53:
if max_nova_client_ver <= max_nova_server_ver:
nova_version = max_nova_client_ver
else:
nova_version = max_nova_server_ver
self.nova = novaclient.Client(nova_version,
session=self.auth_session)
if not self.hosts:
self.hosts = self._init_hosts_by_services()
else:
self._init_update_hosts()
LOG.info("%s: initialized. Nova version %f" % (self.session_id,
nova_version))
def _init_hosts_by_services(self):
LOG.info("%s: Dicovering hosts by Nova services" % self.session_id)
hosts = []
controllers = self.nova.services.list(binary='nova-conductor')
for controller in controllers:
host = {}
service_host = str(controller.__dict__.get(u'host'))
host['hostname'] = service_host
host['type'] = 'controller'
if str(controller.__dict__.get(u'status')) == 'disabled':
LOG.error("%s: %s nova-conductor disabled before maintenance"
% (self.session_id, service_host))
raise Exception("%s: %s already disabled"
% (self.session_id, service_host))
host['disabled'] = False
host['details'] = str(controller.__dict__.get(u'id'))
host['maintained'] = False
hosts.append(host)
computes = self.nova.services.list(binary='nova-compute')
for compute in computes:
host = {}
service_host = str(compute.__dict__.get(u'host'))
host['hostname'] = service_host
host['type'] = 'compute'
if str(compute.__dict__.get(u'status')) == 'disabled':
LOG.error("%s: %s nova-compute disabled before maintenance"
% (self.session_id, service_host))
raise Exception("%s: %s already disabled"
% (self.session_id, service_host))
host['disabled'] = False
host['details'] = str(compute.__dict__.get(u'id'))
host['maintained'] = False
hosts.append(host)
return db_api.create_hosts_by_details(self.session_id, hosts)
def _init_update_hosts(self):
LOG.info("%s: Update given hosts" % self.session_id)
controllers = self.nova.services.list(binary='nova-conductor')
computes = self.nova.services.list(binary='nova-compute')
for host in self.hosts:
hostname = host.hostname
host.disabled = False
host.maintained = False
match = [compute for compute in computes if
hostname == compute.host]
if match:
host.type = 'compute'
if match[0].status == 'disabled':
LOG.error("%s: %s nova-compute disabled before maintenance"
% (self.session_id, hostname))
raise Exception("%s: %s already disabled"
% (self.session_id, hostname))
host.details = match[0].id
continue
if ([controller for controller in controllers if
hostname == controller.host]):
host.type = 'controller'
continue
host.type = 'other'
def disable_host_nova_compute(self, hostname):
LOG.info('%s: disable nova-compute on host %s' % (self.session_id,
hostname))
host = self.get_host_by_name(hostname)
try:
self.nova.services.disable_log_reason(host.details, "maintenance")
except TypeError:
LOG.debug('%s: Using old API to disable nova-compute on host %s' %
(self.session_id, hostname))
self.nova.services.disable_log_reason(hostname, "nova-compute",
"maintenance")
host.disabled = True
def enable_host_nova_compute(self, hostname):
LOG.info('%s: enable nova-compute on host %s' % (self.session_id,
hostname))
host = self.get_host_by_name(hostname)
try:
self.nova.services.enable(host.details)
except TypeError:
LOG.debug('%s: Using old API to enable nova-compute on host %s' %
(self.session_id, hostname))
self.nova.services.enable(hostname, "nova-compute")
host.disabled = False
def get_compute_hosts(self):
return [host.hostname for host in self.hosts
if host.type == 'compute']
def get_empty_computes(self):
all_computes = self.get_compute_hosts()
instance_computes = []
for instance in self.instances:
if instance.host not in instance_computes:
instance_computes.append(instance.host)
return [host for host in all_computes if host not in instance_computes]
def get_instance_details(self, instance):
network_interfaces = next(iter(instance.addresses.values()))
for network_interface in network_interfaces:
_type = network_interface.get('OS-EXT-IPS:type')
if _type == "floating":
LOG.info('Instance with floating ip: %s %s' %
(instance.id, instance.name))
return "floating_ip"
return None
def _fenix_instance(self, project_id, instance_id, instance_name, host,
state, details, action=None, project_state=None,
action_done=False):
instance = {'session_id': self.session_id,
'instance_id': instance_id,
'action': action,
'project_id': project_id,
'instance_id': instance_id,
'project_state': project_state,
'state': state,
'instance_name': instance_name,
'action_done': action_done,
'host': host,
'details': details}
return instance
def initialize_server_info(self):
project_ids = []
instances = []
compute_hosts = self.get_compute_hosts()
opts = {'all_tenants': True}
servers = self.nova.servers.list(detailed=True, search_opts=opts)
for server in servers:
try:
host = str(server.__dict__.get('OS-EXT-SRV-ATTR:host'))
if host not in compute_hosts:
continue
project_id = str(server.tenant_id)
instance_name = str(server.name)
instance_id = str(server.id)
details = self.get_instance_details(server)
state = str(server.__dict__.get('OS-EXT-STS:vm_state'))
except Exception:
raise Exception('can not get params from server=%s' % server)
instances.append(self._fenix_instance(project_id, instance_id,
instance_name, host, state,
details))
if project_id not in project_ids:
project_ids.append(project_id)
if len(project_ids):
self.projects = self.init_projects(project_ids)
else:
LOG.info('%s: No projects on computes under maintenance %s' %
self.session_id)
if len(instances):
self.instances = self.add_instances(instances)
else:
LOG.info('%s: No instances on computes under maintenance %s' %
self.session_id)
LOG.info(str(self))
def update_instance(self, project_id, instance_id, instance_name, host,
state, details):
if self.instance_id_found(instance_id):
# TBD Might need to update instance variables here if not done
# somewhere else
return
elif self.instance_name_found(instance_name):
# Project has made re-instantiation, remove old add new
old_instance = self.instance_by_name(instance_name)
instance = self._fenix_instance(project_id, instance_id,
instance_name, host,
state, details,
old_instance.action,
old_instance.project_state,
old_instance.action_done)
self.instances.append(self.add_instance(instance))
self.remove_instance(old_instance)
else:
# Instance new, as project has added instances
instance = self._fenix_instance(project_id, instance_id,
instance_name, host,
state, details)
self.instances.append(self.add_instance(instance))
def remove_non_existing_instances(self, instance_ids):
remove_instances = [instance for instance in
self.instances if instance.instance_id not in
instance_ids]
for instance in remove_instances:
# Instance deleted, as project possibly scaled down
self.remove_instance(instance)
def update_server_info(self):
# TBD This keeps internal instance information up-to-date and prints
# it out. Same could be done by updating the information when changed
# Anyhow this also double checks information against Nova
instance_ids = []
compute_hosts = self.get_compute_hosts()
opts = {'all_tenants': True}
servers = self.nova.servers.list(detailed=True, search_opts=opts)
for server in servers:
try:
host = str(server.__dict__.get('OS-EXT-SRV-ATTR:host'))
if host not in compute_hosts:
continue
project_id = str(server.tenant_id)
instance_name = str(server.name)
instance_id = str(server.id)
details = self.get_instance_details(server)
state = str(server.__dict__.get('OS-EXT-STS:vm_state'))
except Exception:
raise Exception('can not get params from server=%s' % server)
self.update_instance(project_id, instance_id, instance_name, host,
state, details)
instance_ids.append(instance_id)
self.remove_non_existing_instances(instance_ids)
LOG.info(str(self))
def confirm_maintenance(self):
allowed_actions = []
actions_at = self.session.maintenance_at
state = 'MAINTENANCE'
self.set_projets_state(state)
for project in self.project_names():
LOG.info('\nMAINTENANCE to project %s\n' % project)
instance_ids = '%s/v1/maintenance/%s/%s' % (self.url,
self.session_id,
project)
reply_at = reply_time_str(self.conf.project_maintenance_reply)
if is_time_after_time(reply_at, actions_at):
LOG.error('%s: No time for project to answer in state: %s' %
(self.session_id, state))
self.session.state = "MAINTENANCE_FAILED"
return False
metadata = self.session.meta
self._project_notify(project, instance_ids, allowed_actions,
actions_at, reply_at, state, metadata)
self.start_timer(self.conf.project_maintenance_reply,
'MAINTENANCE_TIMEOUT')
return self.wait_projects_state(state, 'MAINTENANCE_TIMEOUT')
def confirm_scale_in(self):
allowed_actions = []
actions_at = reply_time_str(self.conf.project_scale_in_reply)
reply_at = actions_at
state = 'SCALE_IN'
self.set_projets_state(state)
for project in self.project_names():
LOG.info('\nSCALE_IN to project %s\n' % project)
instance_ids = '%s/v1/maintenance/%s/%s' % (self.url,
self.session_id,
project)
metadata = self.session.meta
self._project_notify(project, instance_ids, allowed_actions,
actions_at, reply_at, state, metadata)
self.start_timer(self.conf.project_scale_in_reply,
'SCALE_IN_TIMEOUT')
return self.wait_projects_state(state, 'SCALE_IN_TIMEOUT')
def need_scale_in(self):
hvisors = self.nova.hypervisors.list(detailed=True)
prev_vcpus = 0
free_vcpus = 0
prev_hostname = ''
LOG.info('checking hypervisors for VCPU capacity')
for hvisor in hvisors:
hostname = hvisor.__getattr__('hypervisor_hostname')
if hostname not in self.get_compute_hosts():
continue
vcpus = hvisor.__getattr__('vcpus')
vcpus_used = hvisor.__getattr__('vcpus_used')
if prev_vcpus != 0 and prev_vcpus != vcpus:
raise Exception('%s: %d vcpus on %s does not match to'
'%d on %s'
% (self.session_id, vcpus, hostname,
prev_vcpus, prev_hostname))
free_vcpus += vcpus - vcpus_used
prev_vcpus = vcpus
prev_hostname = hostname
if free_vcpus >= vcpus:
# TBD vcpu capacity might be too scattered so moving instances from
# one host to other host still might not succeed.
return False
else:
return True
def get_free_vcpus_by_host(self, host, hvisors):
hvisor = ([h for h in hvisors if
h.__getattr__('hypervisor_hostname') == host][0])
vcpus = hvisor.__getattr__('vcpus')
vcpus_used = hvisor.__getattr__('vcpus_used')
return vcpus - vcpus_used
def find_host_to_be_empty(self):
# Preferrably host with most free vcpus, no floating ip instances and
# least instances altogether
host_to_be_empty = None
host_no_fip_instances = 0
host_free_vcpus = 0
hvisors = self.nova.hypervisors.list(detailed=True)
for host in self.get_compute_hosts():
free_vcpus = self.get_free_vcpus_by_host(host, hvisors)
fip_instances = 0
no_fip_instances = 0
for project in self.project_names():
for instance in (self.instances_by_host_and_project(host,
project)):
if instance.details and "floating_ip" in instance.details:
fip_instances += 1
else:
no_fip_instances += 1
LOG.info('%s has %d floating ip and %d other instances %s free '
'vcpus' % (host, fip_instances, no_fip_instances,
free_vcpus))
if fip_instances == 0:
# We do not want to choose host with floating ip instance
if host_to_be_empty:
# We have host candidate, let's see if this is better
if free_vcpus > host_free_vcpus:
# Choose as most vcpus free
host_to_be_empty = host
host_no_fip_instances = no_fip_instances
host_free_vcpus = 0
elif free_vcpus == host_free_vcpus:
if no_fip_instances < host_no_fip_instances:
# Choose as most vcpus free and least instances
host_to_be_empty = host
host_no_fip_instances = no_fip_instances
host_free_vcpus = 0
else:
# This is first host candidate
host_to_be_empty = host
host_no_fip_instances = no_fip_instances
host_free_vcpus = 0
if not host_to_be_empty:
# No best cadidate found, let's choose last host in loop
host_to_be_empty = host
LOG.info('host %s selected to be empty' % host_to_be_empty)
# TBD It might yet not be possible to move instances away from this
# host if other hosts has free vcpu capacity scattered. It should
# checked if instances on this host fits to other hosts
return host_to_be_empty
def confirm_host_to_be_emptied(self, host, state):
allowed_actions = ['MIGRATE', 'LIVE_MIGRATE', 'OWN_ACTION']
actions_at = reply_time_str(self.conf.project_maintenance_reply)
reply_at = actions_at
self.set_projects_state_and_hosts_instances(state, [host])
for project in self.project_names():
if not self.project_has_state_instances(project):
continue
LOG.info('%s to project %s' % (state, project))
instance_ids = '%s/v1/maintenance/%s/%s' % (self.url,
self.session_id,
project)
metadata = self.session.meta
self._project_notify(project, instance_ids, allowed_actions,
actions_at, reply_at, state, metadata)
self.start_timer(self.conf.project_maintenance_reply,
'%s_TIMEOUT' % state)
return self.wait_projects_state(state, '%s_TIMEOUT' % state)
def confirm_maintenance_complete(self):
state = 'MAINTENANCE_COMPLETE'
metadata = self.session.meta
actions_at = reply_time_str(self.conf.project_scale_in_reply)
reply_at = actions_at
self.set_projets_state(state)
for project in self.project_names():
LOG.info('%s to project %s' % (state, project))
instance_ids = '%s/v1/maintenance/%s/%s' % (self.url,
self.session_id,
project)
allowed_actions = []
self._project_notify(project, instance_ids, allowed_actions,
actions_at, reply_at, state, metadata)
self.start_timer(self.conf.project_scale_in_reply,
'%s_TIMEOUT' % state)
return self.wait_projects_state(state, '%s_TIMEOUT' % state)
def notify_action_done(self, project, instance):
instance_ids = [instance.instance_id]
allowed_actions = []
actions_at = None
reply_at = None
state = "INSTANCE_ACTION_DONE"
instance.project_state = state
metadata = "{}"
self._project_notify(project, instance_ids, allowed_actions,
actions_at, reply_at, state, metadata)
def actions_to_have_empty_host(self, host):
# TBD these might be done parallel
for project in self.proj_instance_actions.keys():
instances = (
self.instances_by_host_and_project(host, project))
for instance in instances:
instance.action = (self.instance_action_by_project_reply(
project, instance.instance_id))
LOG.info('Action %s instance %s ' % (instance.action,
instance.instance_id))
if instance.action == 'MIGRATE':
if not self.migrate_server(instance):
return False
self.notify_action_done(project, instance)
elif instance.action == 'OWN_ACTION':
pass
else:
# TBD LIVE_MIGRATE not supported
raise Exception('%s: instance %s action '
'%s not supported' %
(self.session_id, instance.instance_id,
instance.action))
return self._wait_host_empty(host)
def _wait_host_empty(self, host):
hid = self.nova.hypervisors.search(host)[0].id
vcpus_used_last = 0
# wait 4min to get host empty
for j in range(48):
hvisor = self.nova.hypervisors.get(hid)
vcpus_used = hvisor.__getattr__('vcpus_used')
if vcpus_used > 0:
if vcpus_used != vcpus_used_last or vcpus_used_last == 0:
LOG.info('%s still has %d vcpus reserved. wait...'
% (host, vcpus_used))
vcpus_used_last = vcpus_used
time.sleep(5)
else:
LOG.info('%s empty' % host)
return True
LOG.info('%s host still not empty' % host)
return False
def migrate_server(self, instance):
# TBD this method should be enhanced for errors and to have failed
# instance back to state active instead of error
server_id = instance.instance_id
server = self.nova.servers.get(server_id)
instance.state = server.__dict__.get('OS-EXT-STS:vm_state')
LOG.info('server %s state %s' % (server_id, instance.state))
last_vm_state = instance.state
retry_migrate = 2
while True:
try:
server.migrate()
time.sleep(5)
retries = 36
while instance.state != 'resized' and retries > 0:
# try to confirm within 3min
server = self.nova.servers.get(server_id)
instance.state = server.__dict__.get('OS-EXT-STS:vm_state')
if instance.state == 'resized':
server.confirm_resize()
LOG.info('instance %s migration confirmed' %
server_id)
instance.host = (
str(server.__dict__.get('OS-EXT-SRV-ATTR:host')))
return True
if last_vm_state != instance.state:
LOG.info('instance %s state: %s' % (server_id,
instance.state))
if instance.state == 'error':
LOG.error('instance %s migration failed, state: %s'
% (server_id, instance.state))
return False
time.sleep(5)
retries = retries - 1
last_vm_state = instance.state
# Timout waiting state to change
break
except BadRequest:
if retry_migrate == 0:
LOG.error('server %s migrate failed after retries' %
server_id)
return False
# Might take time for scheduler to sync inconsistent instance
# list for host
# TBD Retry doesn't help, need investigating if reproduces
retry_timeout = 150 - (retry_migrate * 60)
LOG.info('server %s migrate failed, retry in %s sec'
% (server_id, retry_timeout))
time.sleep(retry_timeout)
except Exception as e:
LOG.error('server %s migration failed, Exception=%s' %
(server_id, e))
return False
finally:
retry_migrate = retry_migrate - 1
LOG.error('instance %s migration timeout, state: %s' %
(server_id, instance.state))
return False
def host_maintenance_by_plugin_type(self, hostname, plugin_type):
aps = self.get_action_plugins_by_type(plugin_type)
if aps:
LOG.info("%s: Calling action plug-ins with type %s" %
(self.session_id, plugin_type))
for ap in aps:
ap_name = "fenix.workflow.actions.%s" % ap.plugin
LOG.info("%s: Calling action plug-in module: %s" %
(self.session_id, ap_name))
action_plugin = getattr(import_module(ap_name), 'ActionPlugin')
ap_db_instance = self._create_action_plugin_instance(ap.plugin,
hostname)
ap_instance = action_plugin(self, ap_db_instance)
ap_instance.run()
if ap_db_instance.state:
LOG.info('%s: %s finished with %s host %s' %
(self.session_id, ap.plugin,
ap_db_instance.state, hostname))
if 'FAILED' in ap_db_instance.state:
raise Exception('%s: %s finished with %s host %s' %
(self.session_id, ap.plugin,
ap_db_instance.state, hostname))
else:
raise Exception('%s: %s reported no state for host %s' %
(self.session_id, ap.plugin, hostname))
# If ap_db_instance failed, we keep it for state
db_api.remove_action_plugin_instance(ap_db_instance)
else:
LOG.info("%s: No action plug-ins with type %s" %
(self.session_id, plugin_type))
def host_maintenance(self, hostname):
host = self.get_host_by_name(hostname)
LOG.info('%s: Maintaining host %s' % (self.session_id, hostname))
for plugin_type in ["host", host.type]:
self.host_maintenance_by_plugin_type(hostname, plugin_type)
LOG.info('%s: Maintaining host %s complete' % (self.session_id,
hostname))
def maintenance(self):
LOG.info("%s: maintenance called" % self.session_id)
self.initialize_server_info()
if not self.projects_listen_alarm('maintenance.scheduled'):
self.session.state = 'MAINTENANCE_FAILED'
return
if not self.confirm_maintenance():
self.session.state = 'MAINTENANCE_FAILED'
return
maintenance_empty_hosts = self.get_empty_computes()
if len(maintenance_empty_hosts) == 0:
if self.need_scale_in():
LOG.info('%s: Need to scale in to get capacity for '
'empty host' % (self.session_id))
self.session.state = 'SCALE_IN'
else:
LOG.info('%s: Free capacity, but need empty host' %
(self.session_id))
self.session.state = 'PREPARE_MAINTENANCE'
else:
LOG.info('Empty host found')
self.session.state = 'START_MAINTENANCE'
if self.session.maintenance_at > datetime.datetime.utcnow():
time_now = time_now_str()
LOG.info('Time now: %s maintenance starts: %s....' %
(time_now, datetime_to_str(self.session.maintenance_at)))
td = self.session.maintenance_at - datetime.datetime.utcnow()
self.start_timer(td.total_seconds(), 'MAINTENANCE_START_TIMEOUT')
while not self.is_timer_expired('MAINTENANCE_START_TIMEOUT'):
time.sleep(1)
time_now = time_now_str()
LOG.info('Time to start maintenance: %s' % time_now)
def scale_in(self):
LOG.info("%s: scale in" % self.session_id)
if not self.confirm_scale_in():
self.session.state = 'MAINTENANCE_FAILED'
return
# TBD it takes time to have proper information updated about free
# capacity. Should make sure instances removed has also VCPUs removed
self.update_server_info()
maintenance_empty_hosts = self.get_empty_computes()
if len(maintenance_empty_hosts) == 0:
if self.need_scale_in():
LOG.info('%s: Need to scale in more to get capacity for '
'empty host' % (self.session_id))
self.session.state = 'SCALE_IN'
else:
LOG.info('%s: Free capacity, but need empty host' %
(self.session_id))
self.session.state = 'PREPARE_MAINTENANCE'
else:
LOG.info('Empty host found')
self.session.state = 'START_MAINTENANCE'
def prepare_maintenance(self):
LOG.info("%s: prepare_maintenance called" % self.session_id)
host = self.find_host_to_be_empty()
if not self.confirm_host_to_be_emptied(host, 'PREPARE_MAINTENANCE'):
self.session.state = 'MAINTENANCE_FAILED'
return
if not self.actions_to_have_empty_host(host):
# TBD we found the hard way that we couldn't make host empty and
# need to scale in more. Thigns might fail after this if any
# instance if error or Nova scheduler cached data corrupted for
# what instance on which host
LOG.info('%s: Failed to empty %s. Need to scale in more to get '
'capacity for empty host' % (self.session_id, host))
self.session.state = 'SCALE_IN'
else:
self.session.state = 'START_MAINTENANCE'
self.update_server_info()
def start_maintenance(self):
LOG.info("%s: start_maintenance called" % self.session_id)
empty_hosts = self.get_empty_computes()
if not empty_hosts:
LOG.info("%s: No empty host to be maintained" % self.session_id)
self.session.state = 'MAINTENANCE_FAILED'
return
maintained_hosts = self.get_maintained_hosts_by_type('compute')
if not maintained_hosts:
computes = self.get_compute_hosts()
for compute in computes:
# When we start to maintain compute hosts, all these hosts
# nova-compute service is disabled, so projects cannot have
# instances scheduled to not maintained hosts
self.disable_host_nova_compute(compute)
# First we maintain all empty hosts
for host in empty_hosts:
# TBD we wait host VCPUs to report right, but this is not
# correct place. We should handle this after scale in
# also this could be made parallel if more than one empty host
self._wait_host_empty(host)
LOG.info('IN_MAINTENANCE host %s' % host)
self._admin_notify(self.conf.workflow_project, host,
'IN_MAINTENANCE',
self.session_id)
self.host_maintenance(host)
self._admin_notify(self.conf.workflow_project, host,
'MAINTENANCE_COMPLETE',
self.session_id)
self.enable_host_nova_compute(host)
LOG.info('MAINTENANCE_COMPLETE host %s' % host)
self.host_maintained(host)
else:
# Now we maintain hosts gone trough PLANNED_MAINTENANCE
hosts = [h for h in empty_hosts if h not in maintained_hosts]
for host in hosts:
# TBD this could be made parallel if more than one empty host
self._wait_host_empty(host)
LOG.info('IN_MAINTENANCE host %s' % host)
self._admin_notify(self.conf.workflow_project, host,
'IN_MAINTENANCE',
self.session_id)
self.host_maintenance(host)
self._admin_notify(self.conf.workflow_project, host,
'MAINTENANCE_COMPLETE',
self.session_id)
self.enable_host_nova_compute(host)
LOG.info('MAINTENANCE_COMPLETE host %s' % host)
self.host_maintained(host)
maintained_hosts = self.get_maintained_hosts_by_type('compute')
if len(maintained_hosts) != len(self.get_compute_hosts()):
# Not all host maintained
self.session.state = 'PLANNED_MAINTENANCE'
else:
self.session.state = 'MAINTENANCE_COMPLETE'
def planned_maintenance(self):
LOG.info("%s: planned_maintenance called" % self.session_id)
maintained_hosts = self.get_maintained_hosts_by_type('compute')
compute_hosts = self.get_compute_hosts()
not_maintained_hosts = ([host for host in compute_hosts if host
not in maintained_hosts])
LOG.info("%s: Not maintained hosts: %s" % (self.session_id,
not_maintained_hosts))
host = not_maintained_hosts[0]
if not self.confirm_host_to_be_emptied(host, 'PLANNED_MAINTENANCE'):
self.session.state = 'MAINTENANCE_FAILED'
return
if not self.actions_to_have_empty_host(host):
# Failure in here might indicate action to move instance failed.
# This might be as Nova VCPU capacity was not yet emptied from
# expected target hosts
self.session.state = 'MAINTENANCE_FAILED'
return
self.update_server_info()
self.session.state = 'START_MAINTENANCE'
def maintenance_complete(self):
LOG.info("%s: maintenance_complete called" % self.session_id)
LOG.info('Projects may still need to up scale back to full '
'capcity')
if not self.confirm_maintenance_complete():
self.session.state = 'MAINTENANCE_FAILED'
return
self.update_server_info()
self.session.state = 'MAINTENANCE_DONE'
def maintenance_done(self):
pass
def maintenance_failed(self):
LOG.info("%s: maintenance_failed called" % self.session_id)
def cleanup(self):
LOG.info("%s: cleanup" % self.session_id)
db_api.remove_session(self.session_id)
| StarcoderdataPython |
4959710 | #!/usr/bin/env python3
"""
Copied from GEMCode/GEMValidation
"""
from ROOT import *
import os
import sys
import ROOT
ROOT.gROOT.SetBatch(1)
import optparse
output = TFile("output.root","RECREATE")
def draw_occ(target_dir, h1,h2, ext =".png", opt = ""):
gStyle.SetStatStyle(0)
gStyle.SetOptStat(1110)
c = TCanvas(h1.GetTitle(),h1.GetName(),600,600)
c_title = c.GetTitle()
c.Clear()
if not h1 or not h2:
sys.exit('h1 or h2 does not exist')
h1.SetLineWidth(2)
h1.SetLineColor(kBlue)
h1.SetMarkerColor(kBlue)
h1.SetTitle(args[0])
h1.Draw(opt)
h2.SetLineWidth(2)
h2.SetLineColor(kRed)
h2.SetMarkerColor(kRed)
h2.SetTitle(args[1])
h2.Draw("same"+opt)
gPad.SetTitle(c_title)
leg = gPad.BuildLegend()
h1.SetTitle(c_title)
c.Update()
c.SaveAs(target_dir + c_title + ext)
def draw_diff_strip(target_dir, h1,h2, ext =".png", opt = ""):
gStyle.SetStatStyle(0)
gStyle.SetOptStat(0)
c = TCanvas("c1",("strip_diff_%s")%(h1.GetName()),600,600)
c_title = c.GetTitle()
c.Clear()
if not h1 or not h2:
sys.exit('h1 or h2 does not exist')
xbin = h1.GetXaxis().GetNbins()
xmin = h1.GetXaxis().GetXmin()
xmax = h1.GetXaxis().GetXmax()
title = ("Difference of strip phi between %s and %s")%(h1.GetName(),h2.GetName())
h = TH1F("strip_diff",title,xbin,xmin,xmax)
for x in range( xbin ) :
value1 = h1.GetBinContent( x + 1 )
value2 = h2.GetBinContent( x + 1 )
h.SetBinContent( x+1, value1 - value2)
h.Draw(opt)
gPad.SetTitle(c_title)
#leg = gPad.BuildLegend().SetFillColor(kWhite)
c.Update()
c.SaveAs(target_dir + c_title + ext)
output.ReOpen("UPDATE")
h.Write()
def draw_plot( file1, file2, tDir,oDir ) :
c = TCanvas("c","c",600,600)
dqm_file1 = TFile( file1)
dqm_file2 = TFile( file2)
d1 = dqm_file1.Get(tDir)
d2 = dqm_file2.Get(tDir)
key_list =[]
tlist1 = d1.GetListOfKeys()
for x in tlist1 :
key_list.append(x.GetName())
for hist in key_list :
if ( hist.find("_phiz_") != -1 ) :
draw_occ( oDir,d1.Get(hist), d2.Get(hist),".png","col");
elif ( hist.find("strip_phi_dist") != -1 ) :
draw_diff_strip( oDir, d1.Get(hist), d2.Get(hist) )
elif ( hist.find("sp") != -1) :
draw_occ( oDir, d1.Get(hist), d2.Get(hist))
if __name__ == '__main__' :
usage = ": %prog DQM_file1.root file2.root \negs) ./%prog -o c_temp_plot DQM_v6.root DQM_v7.root"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-o",dest='directory',help='Name of output directory(Default : c_temp_plot)',default="c_temp_plot")
options, args = parser.parse_args()
if len(args)==0 :
print "Input file name is None."
print "Use default name.[ DQM_v6.root and DQM_v7.root]"
args.append("DQM_v6.root")
args.append("DQM_v7.root")
tDir = "DQMData/Run 1/MuonGEMDigisV/Run summary/GEMDigisTask"
oDir = options.directory+"_GEMDigis/"
os.system("mkdir -p "+oDir )
draw_plot(args[0],args[1],tDir,oDir)
| StarcoderdataPython |
1781915 | # Generated by Django 2.2.4 on 2019-08-27 05:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('friends', '0002_customnotification'),
]
operations = [
migrations.AddField(
model_name='friend',
name='status',
field=models.CharField(default='none', max_length=20),
),
]
| StarcoderdataPython |
12812865 | <filename>tianshou/env/__init__.py
"""Env package."""
from tianshou.env.maenv import MultiAgentEnv
from tianshou.env.venvs import (
BaseVectorEnv,
DummyVectorEnv,
RayVectorEnv,
ShmemVectorEnv,
SubprocVectorEnv,
)
__all__ = [
"BaseVectorEnv",
"DummyVectorEnv",
"SubprocVectorEnv",
"ShmemVectorEnv",
"RayVectorEnv",
"MultiAgentEnv",
]
| StarcoderdataPython |
6423103 | def setup():
import warnings
warnings.warn('Flask-Admin sqlalchemy integration module was renamed as '
'flask_admin.contrib.sqla, please use it instead.')
from flask_admin._backwards import import_redirect
import_redirect(__name__, 'flask_admin.contrib.sqla')
setup()
del setup
from ..sqla.view import ModelView # noqa: F401
| StarcoderdataPython |
11290753 | <reponame>villares/Paper-objects-with-Processing-and-Python<filename>box_with_rectangular_holes/frame_box.py
CUT_STROKE, FOLD_STROKE = color(255, 0, 0), color(0, 0, 255)
def frame_box(w, h, d, thick=0):
""" draw the 3D version of the box with rectangular holes """
mw, mh, md = w / 2., h / 2., d / 2.
translate(0, 0, -md) # base
face(0, 0, w, h, thick)
translate(0, 0, d) # top
face(0, 0, w, h, thick)
translate(0, 0, -md) # back to 0
rotateY(HALF_PI)
translate(0, 0, -mw) # left side
face(0, 0, d, h, thick)
translate(0, 0, w) # right side
face(0, 0, d, h, thick)
translate(0, 0, -mw) # back to middle
rotateY(-HALF_PI) # back to 0 rotation
rotateX(HALF_PI)
translate(0, 0, -mh) # lateral e
face(0, 0, w, d, thick)
translate(0, 0, h) # lateral d
face(0, 0, w, d, thick)
translate(0, 0, -mw) # reset translate
rotateX(-HALF_PI) # reset rotate
def face(x, y, w, h, thick):
mw, mh = w / 2., h / 2.
pushMatrix()
translate(x, y)
beginShape()
vertex(-mw, -mh)
vertex(+mw, -mh)
vertex(+mw, +mh)
vertex(-mw, +mh)
if thick > 0 and mw - thick > 0 and mh - thick > 0:
mw -= thick
mh -= thick
beginContour() # counterclockwise hole
vertex(-mw, -mh)
vertex(-mw, +mh)
vertex(+mw, +mh)
vertex(+mw, -mh)
endContour()
endShape(CLOSE)
popMatrix()
def unfolded_frame_box(w, h, d, thick=0, draw_main=True):
mw, mh, md = w / 2., h / 2., d / 2.
unfolded_face(0, -h - md, w, d, "aaan", thick, draw_main)
unfolded_face(0, -mh, w, h, "vvvv", thick, draw_main)
unfolded_face(0, -mh + mh + md, w, d, "cncv", thick, draw_main)
unfolded_face(0, +mh + d, w, h, "cncc", thick, draw_main)
unfolded_face(-mw - md, -mh, d, h, "acna", thick, draw_main)
unfolded_face(mw + md, -mh, d, h, "ncaa", thick, draw_main)
def unfolded_face(x, y, w, h, edge_types, thick=0, draw_main=True):
e0, e1, e2, e3 = edge_types
mw, mh = w / 2., h / 2.
pushMatrix()
translate(x, y)
if draw_main:
edge(-mw, +mh, -mw, -mh, e0)
edge(-mw, -mh, +mw, -mh, e1)
edge(+mw, -mh, +mw, +mh, e2)
edge(+mw, +mh, -mw, +mh, e3)
if thick > 0 and mw - thick > 0 and mh - thick > 0:
unfolded_face(0, 0, w - thick * 2, h - thick * 2, "cccc")
popMatrix()
def edge(x0, y0, x1, y1, edge_type):
if edge_type == "n": # no edge is drawn
return
elif edge_type == "c": # cut stroke selected
stroke(CUT_STROKE)
else:
stroke(FOLD_STROKE) # fold stroke selected for "v" and "a"
line(x0, y0, x1, y1) # line drawn here
if edge_type == "a": # tab (note a fold-stroke line was already drawn)
stroke(CUT_STROKE)
noFill()
glue_tab((x0, y0), (x1, y1), 10)
def glue_tab(p1, p2, tab_w, cut_ang=QUARTER_PI / 3):
"""
draws a trapezoidal or triangular glue tab along edge defined by p1 and p2,
with width tab_w and cut angle a
"""
al = atan2(p1[0] - p2[0], p1[1] - p2[1])
a1 = al + cut_ang + PI
a2 = al - cut_ang
# calculate cut_len to get the right tab width
cut_len = tab_w / sin(cut_ang)
f1 = (p1[0] + cut_len * sin(a1),
p1[1] + cut_len * cos(a1))
f2 = (p2[0] + cut_len * sin(a2),
p2[1] + cut_len * cos(a2))
edge_len = dist(p1[0], p1[1], p2[0], p2[1])
if edge_len > 2 * cut_len * cos(cut_ang): # 'normal' trapezoidal tab
beginShape()
vertex(*p1) # vertex(p1[0], p1[1])
vertex(*f1)
vertex(*f2)
vertex(*p2)
endShape()
else: # short triangular tab
fm = ((f1[0] + f2[0]) / 2, (f1[1] + f2[1]) / 2)
beginShape()
vertex(*p1)
vertex(*fm) # middle way of f1 and f2
vertex(*p2)
endShape()
| StarcoderdataPython |
6628279 | #
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (<NAME>)
# 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by <NAME>. All rights reserved.
#
# <EMAIL>
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by <NAME>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring",
"iselement", "iterparse",
"parse",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring",
"TreeBuilder",
"VERSION", "XML",
"XMLParser", "XMLTreeBuilder",
]
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} or {@link
# #SubElement} factory functions.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import string, sys, re
class _SimpleElementPath:
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None):
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
def findall(self, element, tag):
if tag[:3] == ".//":
return element.getiterator(tag[3:])
result = []
for elem in element:
if elem.tag == tag:
result.append(elem)
return result
try:
import ElementPath
except ImportError:
# FIXME: issue warning in this case?
ElementPath = _SimpleElementPath()
# TODO: add support for custom namespace resolvers/default namespaces
# TODO: add improved support for incremental parsing
VERSION = "1.2.6"
##
# Internal element class. This class defines the Element interface,
# and provides a reference implementation of this interface.
# <p>
# You should not create instances of this class directly. Use the
# appropriate factory functions instead, such as {@link #Element}
# and {@link #SubElement}.
#
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class _ElementInterface:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #_ElementInterface.get},
# {@link #_ElementInterface.set},
# {@link #_ElementInterface.keys}, and
# {@link #_ElementInterface.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None, if there was no text.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None, if there was no text.
tail = None # text after end tag, if any
def __init__(self, tag, attrib):
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at %x>" % (self.tag, id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return Element(tag, attrib)
##
# Returns the number of subelements.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
##
# Returns the given subelement.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
# @exception AssertionError If element is not a valid object.
def __setitem__(self, index, element):
assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Returns a list containing subelements in the given range.
#
# @param start The first subelement to return.
# @param stop The first subelement that shouldn't be returned.
# @return A sequence object containing subelements.
def __getslice__(self, start, stop):
return self._children[start:stop]
##
# Replaces a number of subelements with elements from a sequence.
#
# @param start The first subelement to replace.
# @param stop The first subelement that shouldn't be replaced.
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a sequence member is not a valid object.
def __setslice__(self, start, stop, elements):
for element in elements:
assert iselement(element)
self._children[start:stop] = list(elements)
##
# Deletes a number of subelements.
#
# @param start The first subelement to delete.
# @param stop The first subelement to leave in there.
def __delslice__(self, start, stop):
del self._children[start:stop]
##
# Adds a subelement to the end of this element.
#
# @param element The element to add.
# @exception AssertionError If a sequence member is not a valid object.
def append(self, element):
assert iselement(element)
self._children.append(element)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
# @exception AssertionError If the element is not a valid object.
def insert(self, index, element):
assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
# @exception AssertionError If the element is not a valid object.
def remove(self, element):
assert iselement(element)
self._children.remove(element)
##
# Returns all subelements. The elements are returned in document
# order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
return ElementPath.find(self, path)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
return ElementPath.findtext(self, path, default)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
return ElementPath.findall(self, path)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the text and tail attributes to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, the result
# is undefined.
#
# @param tag What tags to look for (default is to return all elements).
# @return A list or iterator containing all the matching elements.
# @defreturn list or iterator
def getiterator(self, tag=None):
nodes = []
if tag == "*":
tag = None
if tag is None or self.tag == tag:
nodes.append(self)
for node in self._children:
nodes.extend(node.getiterator(tag))
return nodes
# compatibility
_Element = _ElementInterface
##
# Element factory. This function returns an object implementing the
# standard Element interface. The exact class or type of that object
# is implementation dependent, but it will always be compatible with
# the {@link #_ElementInterface} class in this module.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def Element(tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
return _ElementInterface(tag, attrib)
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return The document root element.
# @defreturn Element
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLTreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def getiterator(self, tag=None):
assert self._root is not None
return self._root.getiterator(tag)
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.find(path)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findtext(path, default)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findall(path)
##
# Writes the element tree to a file, as XML.
#
# @param file A file name, or a file object opened for writing.
# @param encoding Optional output encoding (default is US-ASCII).
def write(self, file, encoding="us-ascii"):
assert self._root is not None
if not hasattr(file, "write"):
file = open(file, "wb")
if not encoding:
encoding = "us-ascii"
elif encoding != "utf-8" and encoding != "us-ascii":
file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
self._write(file, self._root, encoding, {})
def _write(self, file, node, encoding, namespaces):
# write XML to file
tag = node.tag
if tag is Comment:
file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
elif tag is ProcessingInstruction:
file.write("<?%s?>" % _escape_cdata(node.text, encoding))
else:
items = node.items()
xmlns_items = [] # new namespaces in this scope
try:
if isinstance(tag, QName) or tag[:1] == "{":
tag, xmlns = fixtag(tag, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(tag)
file.write("<" + _encode(tag, encoding))
if items or xmlns_items:
items.sort() # lexical order
for k, v in items:
try:
if isinstance(k, QName) or k[:1] == "{":
k, xmlns = fixtag(k, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(k)
try:
if isinstance(v, QName):
v, xmlns = fixtag(v, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(v)
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
for k, v in xmlns_items:
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
if node.text or len(node):
file.write(">")
if node.text:
file.write(_escape_cdata(node.text, encoding))
for n in node:
self._write(file, n, encoding, namespaces)
file.write("</" + _encode(tag, encoding) + ">")
else:
file.write(" />")
for k, v in xmlns_items:
del namespaces[v]
if node.tail:
file.write(_escape_cdata(node.tail, encoding))
# --------------------------------------------------------------------
# helpers
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, _ElementInterface) or hasattr(element, "tag")
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def _encode(s, encoding):
try:
return s.encode(encoding)
except AttributeError:
return s # 1.5.2: assume the string uses the right encoding
if sys.version[:3] == "1.5":
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
_escape_map = {
"&": "&",
"<": "<",
">": ">",
'"': """,
}
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m, map=_escape_map):
out = []
append = out.append
for char in m.group():
text = map.get(char)
if text is None:
text = "&#%d;" % ord(char)
append(text)
return string.join(out, "")
try:
return _encode(pattern.sub(escape_entities, text), "ascii")
except TypeError:
_raise_serialization_error(text)
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def _escape_cdata(text, encoding=None, replace=string.replace):
# escape character data
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding=None, replace=string.replace):
# escape attribute value
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "'", "'") # FIXME: overkill
text = replace(text, "\"", """)
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def fixtag(tag, namespaces):
# given a decorated tag (of the form {uri}tag), return prefixed
# tag and namespace declaration, if any
if isinstance(tag, QName):
tag = tag.text
namespace_uri, tag = string.split(tag[1:], "}", 1)
prefix = namespaces.get(namespace_uri)
if prefix is None:
prefix = _namespace_map.get(namespace_uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
namespaces[namespace_uri] = prefix
if prefix == "xml":
xmlns = None
else:
xmlns = ("xmlns:%s" % prefix, namespace_uri)
else:
xmlns = None
return "%s:%s" % (prefix, tag), xmlns
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @return A (event, elem) iterator.
class iterparse:
def __init__(self, source, events=None):
if not hasattr(source, "read"):
source = open(source, "rb")
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = XMLTreeBuilder()
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = _encode(uri, "ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri)))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
try:
raise StopIteration
except NameError:
raise IndexError
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
try:
iter
def __iter__(self):
return self
except NameError:
def __getitem__(self, index):
return self.next()
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
def XML(text):
parser = XMLTreeBuilder()
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text):
parser = XMLTreeBuilder()
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.getiterator():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding)
return string.join(data, "")
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = _ElementInterface
self._factory = element_factory
##
# Flushes the parser buffers, and returns the toplevel documen
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last != None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = string.join(self._data, "")
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @see #ElementTree
# @see #TreeBuilder
class XMLTreeBuilder:
def __init__(self, html=0, target=None):
try:
from xml.parsers import expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
self._parser = parser = expat.ParserCreate(None, "}")
if target is None:
target = TreeBuilder()
self._target = target
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
encoding = None
if not parser.returns_unicode:
encoding = "utf-8"
# target.xml(encoding, None)
self._doctype = None
self.entity = {}
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return _encode(text, "ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = self._fixtext(value)
return self._target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
return self._target.start(tag, attrib)
def _data(self, text):
return self._target.data(self._fixtext(text))
def _end(self, tag):
return self._target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self._target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
raise expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = string.strip(text)
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
pass
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
self._parser.Parse(data, 0)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
self._parser.Parse("", 1) # end of data
tree = self._target.close()
del self._target, self._parser # get rid of circular references
return tree
# compatibility
XMLParser = XMLTreeBuilder
| StarcoderdataPython |
1670287 | <gh_stars>10-100
#!/usr/bin/env python
#/*******************************************************************************
# Copyright (c) 2012 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#/*******************************************************************************
#--------------------------------- START CB API --------------------------------
from sys import path, argv
from time import sleep
import fnmatch
import os
import pwd
home = os.environ["HOME"]
username = pwd.getpwuid(os.getuid())[0]
api_file_name = "/tmp/cb_api_" + username
if os.access(api_file_name, os.F_OK) :
try :
_fd = open(api_file_name, 'r')
_api_conn_info = _fd.read()
_fd.close()
except :
_msg = "Unable to open file containing API connection information "
_msg += "(" + api_file_name + ")."
print _msg
exit(4)
else :
_msg = "Unable to locate file containing API connection information "
_msg += "(" + api_file_name + ")."
print _msg
exit(4)
_path_set = False
for _path, _dirs, _files in os.walk(os.path.abspath(path[0] + "/../")):
for _filename in fnmatch.filter(_files, "code_instrumentation.py") :
if _path.count("/lib/auxiliary") :
path.append(_path.replace("/lib/auxiliary",''))
_path_set = True
break
if _path_set :
break
from lib.api.api_service_client import *
_msg = "Connecting to API daemon (" + _api_conn_info + ")..."
print _msg
api = APIClient(_api_conn_info)
#---------------------------------- END CB API ---------------------------------
if len(argv) < 2 :
print "./" + argv[0] + " <cloud_name>"
exit(1)
_cloud_name = argv[1]
try :
error = False
_cloud_attached = False
for _cloud in api.cldlist() :
if _cloud["name"] == _cloud_name :
_cloud_attached = True
_cloud_model = _cloud["model"]
break
if not _cloud_attached :
print "Cloud " + _cloud_name + " not attached"
exit(1)
print "Listing pending objects..."
ais = api.applist(_cloud_name, "pending")
for ai in ais :
print "AI: " + str(ai)
except APIException, obj :
error = True
print "API Problem (" + str(obj.status) + "): " + obj.msg
except APINoSuchMetricException, obj :
error = True
print "API Problem (" + str(obj.status) + "): " + obj.msg
except KeyboardInterrupt :
print "Aborting this VM."
except Exception, msg :
error = True
print "Problem during experiment: " + str(msg)
| StarcoderdataPython |
1738223 | # Created by zhouwang on 2018/5/5.
from .base import BaseRequestHandler, permission
import datetime
import pymysql
import logging
logger = logging.getLogger()
def argements_valid(handler, pk=None):
error = dict()
name = handler.get_argument('name', '')
path = handler.get_argument('path', '')
comment = handler.get_argument('comment', '')
host = handler.get_argument('host', '')
monitor_choice = handler.get_argument('monitor_choice', '0')
if not path:
error['path'] = 'Required'
else:
select_sql = 'SELECT id FROM logfile WHERE name="%s" %s'
select_arg = (pymysql.escape_string(name), 'and id!="%d"' % pk if pk else '')
count = handler.cursor.execute(select_sql % select_arg)
if count:
error['path'] = 'Already existed'
for i, j in ((name, 'name'), (host, 'host'), (comment, 'comment')):
if not i:
error[j] = 'Required'
if monitor_choice not in ('0', '-1'):
error['monitor_choice'] = 'Invalid'
data = dict(name=name,
path=path,
comment=comment,
host=host,
hosts=host.split(','),
monitor_choice=int(monitor_choice))
return error, data
def add_valid(func):
def _wrapper(self):
error, self.reqdata = argements_valid(self)
if error:
return dict(code=400, msg='Bad POST data', error=error)
return func(self)
return _wrapper
def query_valid(func):
def _wrapper(self, pk):
error = dict()
if not pk and self.request.arguments:
argument_keys = self.request.arguments.keys()
query_keys = ['id', 'name', 'host', 'path', 'comment', 'create_time',
'order', 'search', 'offset', 'limit', 'sort']
error = {key: 'Bad key' for key in argument_keys if key not in query_keys}
if error:
return dict(code=400, msg='Bad GET param', error=error)
return func(self, pk)
return _wrapper
def update_valid(func):
def _wrapper(self, pk):
select_sql = 'SELECT id FROM logfile WHERE id="%d"' % pk
count = self.cursor.execute(select_sql)
if not count:
return {'code': 404, 'msg': 'Update row not found'}
error, self.reqdata = argements_valid(self, pk)
if error:
return dict(code=400, msg='Bad PUT param', error=error)
return func(self, pk)
return _wrapper
def del_valid(func):
def _wrapper(self, pk):
select_sql = 'SELECT id FROM logfile WHERE id="%d"' % pk
count = self.cursor.execute(select_sql)
if not count:
return dict(code=404, msg='Delete row not found')
return func(self, pk)
return _wrapper
class Handler(BaseRequestHandler):
@permission()
def get(self, pk=0):
''' Query logfile '''
response_data = self._query(int(pk))
self._write(response_data)
@permission(role=2)
def post(self):
''' Add logfile '''
response_data = self._add()
self._write(response_data)
@permission(role=2)
def put(self, pk=0):
''' Update logfile '''
response_data = self._update(int(pk))
self._write(response_data)
@permission(role=2)
def delete(self, pk=0):
''' Delete logfile '''
response_data = self._del(int(pk))
self._write(response_data)
@query_valid
def _query(self, pk):
fields = search_fields = ['id', 'name', 'host', 'path', 'comment', 'create_time']
where, order, limit = self.select_sql_params(int(pk), fields, search_fields)
self.cursor.execute(self.select_sql % (where, order, limit))
results = self.cursor.dictfetchall()
if limit:
self.cursor.execute(self.total_sql % where)
total = self.cursor.dictfetchone().get('total')
return dict(code=200, msg='Query Successful', data=results, total=total)
return dict(code=200, msg='Query Successful', data=results)
@add_valid
def _add(self):
try:
with self.transaction(atomic=True):
insert_arg = (self.reqdata['name'], self.reqdata['host'], self.reqdata['path'],
datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), self.reqdata['comment'],
self.reqdata['monitor_choice'])
self.cursor.execute(self.insert_sql, insert_arg)
self.cursor.execute(self.last_insert_id_sql)
insert = self.cursor.dictfetchone()
insert_host_mp_args = [(insert['id'], host) for host in self.reqdata['hosts']]
self.cursor.executemany(self.insert_host_mp_sql, insert_host_mp_args)
except Exception as e:
logger.error('Add logfile failed: %s' % str(e))
return dict(code=500, msg='Add failed')
else:
return dict(code=200, msg='Add successful', data=insert)
@update_valid
def _update(self, pk):
try:
with self.transaction(atomic=True):
update_arg = (self.reqdata['name'], self.reqdata['host'], self.reqdata['path'],
self.reqdata['comment'], self.reqdata['monitor_choice'], pk)
self.cursor.execute(self.update_sql, update_arg)
delete_host_mp_arg = (pk,)
self.cursor.execute(self.delete_host_mp_sql, delete_host_mp_arg)
insert_host_mp_args = [(pk, host) for host in self.reqdata['hosts']]
self.cursor.executemany(self.insert_host_mp_sql, insert_host_mp_args)
except Exception as e:
logger.error('Update logfile failed: %s' % str(e))
return dict(code=500, msg='Update failed')
else:
return dict(code=200, msg='Update successful', data=dict(id=pk))
@del_valid
def _del(self, pk):
try:
with self.transaction(atomic=True):
delete_arg = (pk,)
self.cursor.execute(self.delete_sql, delete_arg)
self.cursor.execute(self.delete_host_mp_sql, delete_arg)
self.cursor.execute(self.delete_monitor_sql, delete_arg)
except Exception as e:
logger.error('Delete logfile failed: %s' % str(e))
return dict(code=500, msg='Delete failed')
else:
return dict(code=200, msg='Delete successful')
insert_sql = \
'INSERT INTO logfile (name, host, path, create_time, comment, monitor_choice) VALUES (%s, %s, %s, %s, %s, %s)'
insert_host_mp_sql = 'INSERT INTO logfile_host (logfile_id, host) VALUES (%s, %s)'
update_sql = 'UPDATE logfile SET name=%s, host=%s, path=%s, comment=%s, monitor_choice=%s WHERE id=%s'
delete_sql = 'DELETE FROM logfile WHERE id=%s'
delete_host_mp_sql = 'DELETE FROM logfile_host WHERE logfile_id=%s'
delete_monitor_sql = 'DELETE FROM monitor_item WHERE logfile_id=%s'
last_insert_id_sql = 'SELECT LAST_INSERT_ID() as id'
select_sql = '''
SELECT
id, name, host, path,
date_format(create_time, "%%Y-%%m-%%d %%H:%%i:%%s") as create_time,
comment, monitor_choice
FROM
logfile
%s %s %s
'''
total_sql = 'SELECT count(*) as total FROM logfile %s' | StarcoderdataPython |
3484252 | from __future__ import absolute_import
from enum import IntEnum
class SearchType(IntEnum):
ISSUE = 0
EVENT = 1
| StarcoderdataPython |
8072393 | import attr
@attr.dataclass(frozen=True)
class A4:
a: int = 1
def <warning descr="'__setattr__' is ignored if the class already defines 'frozen' parameter">__setattr__</warning>(self, key, value):
pass
# A4(1).b = 2
class Base4:
def __setattr__(self, key, value):
pass
@attr.dataclass(frozen=True)
class Derived4(Base4):
d: int = 1
# Derived4(1).b = 2
@attr.dataclass(frozen=True)
class A2:
a: int = 1
def <warning descr="'__delattr__' is ignored if the class already defines 'frozen' parameter">__delattr__</warning>(self, key):
pass
# del A2(1).a
class Base2:
def __delattr__(self, key):
pass
@attr.dataclass(frozen=True)
class Derived2(Base2):
d: int = 1
# del Derived2(1).d | StarcoderdataPython |
248987 | <reponame>witchtrash/suika-api
from typing import List, Optional, Literal
from fastapi import APIRouter, Depends, HTTPException, Query
from pydantic import BaseModel
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import asc, desc
from fastapi_pagination import Page
from fastapi_pagination.ext.sqlalchemy import paginate
from suika.core.db import get_db
from suika.models.product import Product
from suika.schemas.product import ProductResponse, ProductCollection
from suika.schemas.price import PriceResponse
router = APIRouter()
class ProductParams(BaseModel):
sort_direction: Optional[Literal["asc", "desc"]] = Query("asc")
sort_by: Optional[Literal["id", "name", "current_price", "abv"]] = Query("id")
filter: Optional[str] = Query(None)
@router.get(
"/",
response_model=ProductCollection,
summary="Get products",
response_description="Response containing a list of products",
)
async def get_products(
db: Session = Depends(get_db),
params: ProductParams = Depends(),
) -> Page[ProductResponse]:
"""
Get a list of products
"""
sort_field = params.sort_by if params.sort_by is not None else "id"
sort = desc(sort_field) if params.sort_direction == "desc" else asc(sort_field)
query = db.query(Product).order_by(sort)
if params.filter:
query = query.filter(
Product.name.like(f"%{params.filter}%")
| Product.sku.like(f"%{params.filter}")
)
return paginate(query)
@router.get(
"/{product_id}",
response_model=ProductResponse,
summary="Get product",
response_description="Response containing a single product",
)
async def get_product(
product_id: int,
db: Session = Depends(get_db),
) -> ProductResponse:
"""
Get a single product by ID
"""
product = db.query(Product).filter(Product.id == product_id).first()
if product is None:
raise HTTPException(status_code=404, detail="Product not found.")
return product
@router.get(
"/{product_id}/price",
response_model=List[PriceResponse],
summary="Get prices",
response_description="Response containing historical "
"pricing information for a given product",
)
async def get_prices(
product_id: int,
db: Session = Depends(get_db),
) -> List[PriceResponse]:
"""
Get pricing history for a product
"""
product = db.query(Product).filter(Product.id == product_id).first()
if product is None:
raise HTTPException(status_code=404, detail="Product not found.")
return product.prices
| StarcoderdataPython |
8188886 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
CWD = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(os.path.join(CWD, '..'))
from tap import Tap
from aims.database.models import *
class Args(Tap):
task: Literal['qm_cv', 'md_npt']
"""The task of molecular simulation."""
def main(args: Args):
mols = session.query(Molecule)
print('There are total %i molecules.' % mols.count())
print('%i molecules have been selected through active learning.' % mols.filter_by(active=True).count())
print('%i molecules have been rejected through active learning.' % mols.filter_by(inactive=True).count())
print('%i molecules haven\'t been considered in active learning.' %
mols.filter_by(active=False, inactive=False).count())
if args.task == 'qm_cv':
jobs = session.query(QM_CV)
for mol in session.query(Molecule).filter_by(active=True):
if Status.ANALYZED not in mol.status_qm_cv and Status.FAILED in mol.status_qm_cv:
print(f'{mol.id} failed.')
elif args.task == 'md_npt':
jobs = session.query(MD_NPT)
else:
return
print('There are total %i jobs' % jobs.count())
for i, status in enumerate(['FAILED', 'STARTED', 'BUILD', 'PREPARED', 'SUBMITED', 'DONE', 'ANALYZED',
'NOT_CONVERGED', 'EXTENDED']):
print('There are %i jobs in status %s.' % (jobs.filter_by(status=i-1).count(), status))
if __name__ == '__main__':
main(args=Args().parse_args())
| StarcoderdataPython |
1971401 | <filename>Examples/TheresaExp/Basic/Model.py
""" Helper functions for pre-/postprocessing """
from BoundaryConditions.Simulation.SimulationData import getSimData
from GenericModel.Design import generateGenericCell
from GenericModel.PARAMETER import PBTYPES_NOW
from SystemComponentsFast import TheresaSystem
import pandas as pd
def addTheresaSystem(cell, nSteps):
theresa = TheresaSystem(200, nSteps)
cell.add_theresa(theresa)
return cell
def getDefaultCellData(start, end):
""" Return simulation data and default cell for HiL Simulation
Arguments:
start {str} -- Start Date of Simulation
end {str} -- End Date of Simulation
Returns:
tuple -- nSteps, time, SLP, HWP, Weather, Solar, cell
"""
region = 'East'
# agents
nSepBSLagents = 10
pAgricultureBSLsep = 0.7
nBuildings = {'FSH': 500, 'REH': 500, 'SAH': 400, 'BAH': 150}
pAgents = {'FSH': 1., 'REH': 1., 'SAH': 0.9, 'BAH': 0.75}
pPHHagents = {'FSH': 0.9, 'REH': 0.9, 'SAH': 0.8, 'BAH': 1.}
pAgriculture = {'FSH': 0.0, 'REH': 0.0, 'SAH': 0.0, 'BAH': 0.0}
pDHN = {'FSH': 0.0, 'REH': 0.0, 'SAH': 0.25, 'BAH': 1.}
pPVplants = 0.2
pHeatpumps = {'class_1': 0, 'class_2': 0,
'class_3': 0, 'class_4': 0.12,
'class_5': 0.27}
pCHP = 0.02 # Fraction of electrical chp generation at demand
nSteps, time, SLP, HWP, Weather, Solar = getSimData(start, end, region)
cell = generateGenericCell(nBuildings, pAgents,
pPHHagents, pAgriculture,
pDHN, pPVplants, pHeatpumps, pCHP, PBTYPES_NOW,
nSepBSLagents, pAgricultureBSLsep,
region, nSteps)
return nSteps, time, SLP, HWP, Weather, Solar, cell
def getSaveDataFrame():
colIdx = [('Cell', ['Electrical Generation [MW]',
'Thermal Generation [MW]',
'Electrical Load [MW]', 'Thermal Load [MW]']),
('Environment', ['T [degC]', 'E diffuse [W/m^2]',
'E direct [W/m^2]', 'Solar elevation [deg]',
'Solar azimuth [deg]']),
('1B01 Measurements', ['Level [m]', 'Pressure [bar]',
'Temperature [degC]', 'Enthalpy [kJ/kg]',
'MassFlow_in [kg/h]', 'Pressure_in [bar]',
'Temperature_in [degC]',
'Enthalpy_in [kJ/kg]',
'MassFlow_out [kg/h]',
'Pressure_out [bar]',
'Temperature_out [degC]',
'Enthalpy_out [kJ/kg]',
'Heater State [%]']),
('Storage State', ['Energy stored [MWh]', 'Charge [%]']),
('Storage Balance', ['Thermal Energy Requested [MWh]',
'Thermal Energy Delivered [MWh]',
'Model Power Equivalence [MW]',
'Energy in [MWh]']),
('Storage Input: CHP', ['actuation [%]',
'Thermal Power Output [MW]',
'Thermal Energy Delivered [MWh]',
'Model Power Equivalence [MW]']),
('Storage Input: Boiler', ['actuation [%]',
'Thermal Power Output [MW]',
'Thermal Energy Delivered [MWh]',
'Model Power Equivalence [MW]'])
]
colIdx = [(main, element) for main, elements in colIdx
for element in elements]
colIdx = pd.MultiIndex.from_tuples(colIdx)
return pd.DataFrame(columns=colIdx, dtype='float64')
def saveParameter(saveLoc, cell, PLCparameter):
nAgents = 0
pPV = 0
for building in cell.buildings:
nAgents += building.n_agents
if building.pv:
pPV += 1
pPV = pPV / cell.n_buildings * 100.
CellParameter = {'Nummber of Buildings': cell.n_buildings,
'Number of Agents in Buildings': nAgents,
'Number of sep. Business Agents': cell.n_sep_bsl_agents,
'Number of Sub-Cells': cell.n_cells,
'Mean annual global irradiation [kWh/m^2]': cell.eg,
'Normed outside temperature [degC]': cell.t_out_n,
'Proportion of Buildings with PV [%]': pPV
}
parameter = {'Cell': CellParameter,
'Scaled Thermal System THERESA': PLCparameter
}
parameter = pd.DataFrame.from_dict(parameter,
orient='index').stack().to_frame()
parameter.to_csv(saveLoc + "parameter.csv", sep=';', header=False)
def saveStep(time, cellData, envData, Nodes, sDF):
sDF.loc[time, ('Cell',
'Electrical Generation [MW]')] = cellData['E gen'] * 1e-6
sDF.loc[time, ('Cell',
'Electrical Load [MW]')] = cellData['E load'] * 1e-6
sDF.loc[time, ('Cell',
'Thermal Generation [MW]')] = cellData['T gen'] * 1e-6
sDF.loc[time, ('Cell',
'Thermal Load [MW]')] = cellData['E load'] * 1e-6
sDF.loc[time, ('Environment', 'T [degC]')] = envData['T [degC]']
sDF.loc[time, ('Environment',
'E diffuse [W/m^2]')] = envData['E diffuse [W/m^2]']
sDF.loc[time, ('Environment',
'E direct [W/m^2]')] = envData['E direct [W/m^2]']
sDF.loc[time, ('Environment',
'Solar elevation [deg]')] = envData['Solar elevation [deg]']
sDF.loc[time, ('Environment',
'Solar azimuth [deg]')] = envData['Solar azimuth [deg]']
SG = Nodes['steamGen'].get_value()
sDF.loc[time, ('1B01 Measurements',
'Level [m]')] = SG.Level
sDF.loc[time, ('1B01 Measurements',
'Pressure [bar]')] = SG.Pressure
sDF.loc[time, ('1B01 Measurements',
'Temperature [degC]')] = SG.Temperature
sDF.loc[time, ('1B01 Measurements',
'Enthalpy [kJ/kg]')] = Nodes['h'].get_value()
sDF.loc[time, ('1B01 Measurements',
'MassFlow_in [kg/h]')] = SG.MassFlow_in
sDF.loc[time, ('1B01 Measurements',
'Pressure_in [bar]')] = SG.Pressure_in
sDF.loc[time, ('1B01 Measurements',
'Temperature_in [degC]')] = SG.Temperature_in
sDF.loc[time, ('1B01 Measurements',
'Enthalpy_in [kJ/kg]')] = Nodes['hIn'].get_value()
sDF.loc[time, ('1B01 Measurements',
'MassFlow_out [kg/h]')] = SG.MassFlow_out
sDF.loc[time, ('1B01 Measurements',
'Pressure_out [bar]')] = SG.Pressure_out
sDF.loc[time, ('1B01 Measurements',
'Temperature_out [degC]')] = SG.Temperature_out
sDF.loc[time, ('1B01 Measurements',
'Enthalpy_out [kJ/kg]')] = Nodes['hOut'].get_value()
sDF.loc[time, ('1B01 Measurements',
'Heater State [%]')] = SG.heater_proc
CHP = Nodes['CHP'].get_value()
sDF.loc[time, ('Storage Input: CHP',
'actuation [%]')] = CHP.actuation * 100.
sDF.loc[time, ('Storage Input: CHP',
'Thermal Power Output [MW]')] = CHP.power
sDF.loc[time, ('Storage Input: CHP',
'Thermal Energy Delivered [MWh]')] = CHP.E_Delivered
sDF.loc[time, ('Storage Input: CHP',
'Model Power Equivalence [MW]')] = CHP.P_Model
Boiler = Nodes['Boiler'].get_value()
sDF.loc[time, ('Storage Input: Boiler',
'actuation [%]')] = Boiler.actuation * 100.
sDF.loc[time, ('Storage Input: Boiler',
'Thermal Power Output [MW]')] = Boiler.power
sDF.loc[time, ('Storage Input: Boiler',
'Thermal Energy Delivered [MWh]')] = Boiler.E_Delivered
sDF.loc[time, ('Storage Input: Boiler',
'Model Power Equivalence [MW]')] = Boiler.P_Model
Storage = Nodes['steamGenModel'].get_value()
sDF.loc[time, ('Storage State',
'Energy stored [MWh]')] = Storage.stored
sDF.loc[time, ('Storage State',
'Charge [%]')] = Storage.charge
sDF.loc[time, ('Storage balance',
'Thermal Energy Requested [MWh]')] = Storage.E_requested
sDF.loc[time, ('Storage balance',
'Thermal Energy Delivered [MWh]')] = Storage.E_delivered
sDF.loc[time, ('Storage balance',
'Model Power Equivalence [MW]')] = Storage.P_equivalence
sDF.loc[time, ('Storage balance',
'Energy in [MWh]')] = Storage.E_in
return sDF
| StarcoderdataPython |
393446 | from __future__ import division
from mqc.mqc import MQC
from misc import au_to_K, call_name
import os, shutil, textwrap
import numpy as np
import pickle
class BOMD(MQC):
""" Class for born-oppenheimer molecular dynamics (BOMD)
:param object molecule: Molecule object
:param object thermostat: Thermostat object
:param integer istate: Electronic state
:param double dt: Time interval
:param integer nsteps: Total step of nuclear propagation
:param string unit_dt: Unit of time step
:param integer out_freq: Frequency of printing output
:param integer verbosity: Verbosity of output
"""
def __init__(self, molecule, thermostat=None, istate=0, dt=0.5, nsteps=1000, unit_dt="fs", out_freq=1, verbosity=0):
# Initialize input values
super().__init__(molecule, thermostat, istate, dt, nsteps, None, None, None, \
False, None, None, unit_dt, out_freq, verbosity)
def run(self, qm, mm=None, output_dir="./", l_save_qm_log=False, l_save_mm_log=False, l_save_scr=True, restart=None):
""" Run MQC dynamics according to BOMD
:param object qm: QM object containing on-the-fly calculation infomation
:param object mm: MM object containing MM calculation infomation
:param string output_dir: Name of directory where outputs to be saved.
:param boolean l_save_qm_log: Logical for saving QM calculation log
:param boolean l_save_mm_log: Logical for saving MM calculation log
:param boolean l_save_scr: Logical for saving scratch directory
:param string restart: Option for controlling dynamics restarting
"""
# Initialize PyUNIxMD
base_dir, unixmd_dir, qm_log_dir, mm_log_dir =\
self.run_init(qm, mm, output_dir, l_save_qm_log, l_save_mm_log, l_save_scr, restart)
bo_list = [self.istate]
qm.calc_coupling = False
self.print_init(qm, mm, restart)
if (restart == None):
# Calculate initial input geometry at t = 0.0 s
self.istep = -1
self.mol.reset_bo(qm.calc_coupling)
qm.get_data(self.mol, base_dir, bo_list, self.dt, self.istep, calc_force_only=False)
if (self.mol.l_qmmm and mm != None):
mm.get_data(self.mol, base_dir, bo_list, self.istep, calc_force_only=False)
self.update_energy()
self.write_md_output(unixmd_dir, self.istep)
self.print_step(self.istep)
elif (restart == "write"):
# Reset initial time step to t = 0.0 s
self.istep = -1
self.write_md_output(unixmd_dir, self.istep)
self.print_step(self.istep)
elif (restart == "append"):
# Set initial time step to last successful step of previous dynamics
self.istep = self.fstep
self.istep += 1
# Main MD loop
for istep in range(self.istep, self.nsteps):
self.calculate_force()
self.cl_update_position()
self.mol.reset_bo(qm.calc_coupling)
qm.get_data(self.mol, base_dir, bo_list, self.dt, istep, calc_force_only=False)
if (self.mol.l_qmmm and mm != None):
mm.get_data(self.mol, base_dir, bo_list, istep, calc_force_only=False)
self.calculate_force()
self.cl_update_velocity()
if (self.thermo != None):
self.thermo.run(self)
self.update_energy()
if ((istep + 1) % self.out_freq == 0):
self.write_md_output(unixmd_dir, istep)
self.print_step(istep)
if (istep == self.nsteps - 1):
self.write_final_xyz(unixmd_dir, istep)
self.fstep = istep
restart_file = os.path.join(base_dir, "RESTART.bin")
with open(restart_file, 'wb') as f:
pickle.dump({'qm':qm, 'md':self}, f)
# Delete scratch directory
if (not l_save_scr):
tmp_dir = os.path.join(unixmd_dir, "scr_qm")
if (os.path.exists(tmp_dir)):
shutil.rmtree(tmp_dir)
if (self.mol.l_qmmm and mm != None):
tmp_dir = os.path.join(unixmd_dir, "scr_mm")
if (os.path.exists(tmp_dir)):
shutil.rmtree(tmp_dir)
def calculate_force(self):
""" Routine to calculate the forces
"""
self.rforce = np.copy(self.mol.states[self.istate].force)
def update_energy(self):
""" Routine to update the energy of molecules in BOMD
"""
# Update kinetic energy
self.mol.update_kinetic()
self.mol.epot = self.mol.states[self.istate].energy
self.mol.etot = self.mol.epot + self.mol.ekin
def print_init(self, qm, mm, restart):
""" Routine to print the initial information of dynamics
:param object qm: QM object containing on-the-fly calculation infomation
:param object mm: MM object containing MM calculation infomation
:param string restart: Option for controlling dynamics restarting
"""
# Print initial information about molecule, qm, mm and thermostat
super().print_init(qm, mm, restart)
# Print dynamics information for start line
dynamics_step_info = textwrap.dedent(f"""\
{"-" * 118}
{"Start Dynamics":>65s}
{"-" * 118}
""")
# Print INIT for each step
INIT = f" #INFO{'STEP':>8s}{'State':>7s}{'Kinetic(H)':>13s}{'Potential(H)':>15s}{'Total(H)':>13s}{'Temperature(K)':>17s}"
dynamics_step_info += INIT
print (dynamics_step_info, flush=True)
def print_step(self, istep):
""" Routine to print each steps infomation about dynamics
:param integer istep: Current MD step
"""
ctemp = self.mol.ekin * 2. / float(self.mol.ndof) * au_to_K
# Print INFO for each step
INFO = f" INFO{istep + 1:>9d}{self.istate:>5d} "
INFO += f"{self.mol.ekin:14.8f}{self.mol.epot:15.8f}{self.mol.etot:15.8f}"
INFO += f"{ctemp:13.6f}"
print (INFO, flush=True)
| StarcoderdataPython |
6405001 | <reponame>cevirici/dominion-woodcutter
# -*- coding: utf-8 -*-
import os
from copy import deepcopy
from django.conf import settings
from .Card import *
from .Pred import *
Cards = {}
CardList = []
Preds = {}
PredList = []
def empty(i, blockLengths, moves, state):
return {}
def staticWorth(val):
def out_function(gS, player):
return val
return out_function
cardFile = open(os.path.join(settings.STATIC_ROOT, "woodcutter/data/carddata.txt"), "r")
for line in cardFile:
t = line.strip().split(",")
index = int(t[0], 16)
t_ptr = *t
c = Card(index, t_ptr[1:9], empty)
if len(t) > 9:
c.worth = staticWorth(int(t[9]))
Cards[t[1].upper()] = c
CardList.append(c)
cardOrder = {CardList[i].simple_name.upper(): i for i in range(len(CardList))}
def supplyOrder(card):
supplyCards = [
"COLONY",
"PLATINUM",
"PROVINCE",
"GOLD",
"DUCHY",
"SILVER",
"ESTATE",
"COPPER",
"CURSE",
]
if card in supplyCards:
return supplyCards.index(card)
else:
return cardOrder[card] + len(supplyCards)
CardList.sort(key=lambda c: c.index)
cardFile.close()
predFile = open(os.path.join(settings.STATIC_ROOT, "woodcutter/data/preddata.txt"), "r")
for line in predFile:
t = line.strip().split("~")
p = Pred(int(t[0], 16), t[1], empty, t[2])
Preds[t[2]] = p
PredList.append(p)
predParseOrder = deepcopy(PredList)
PredList.sort(key=lambda p: p.index)
predFile.close()
| StarcoderdataPython |
4955940 | <reponame>codyhan94/PMA-scheduler
import datetime
from flask.ext.wtf import Form
from wtforms import (Field, HiddenField, TextField,
TextAreaField, SubmitField, DateField, SelectField)
from wtforms.validators import Required, Length, Email
from flask.ext.wtf.html5 import EmailField
from ..utils import (USERNAME_LEN_MIN, USERNAME_LEN_MAX)
EMAIL_LEN_MIN = 4
EMAIL_LEN_MAX = 64
MESSAGE_LEN_MIN = 16
MESSAGE_LEN_MAX = 1024
TIMEZONE_LEN_MIN = 1
TIMEZONE_LEN_MAX = 64
TIMEZONES = {
"TZ1": [("-8.00", "(GMT -8:00) Pacific Time (US & Canada)"),
("-7.00", "(GMT -7:00) Mountain Time (US & Canada)"),
("-6.00", "(GMT -6:00) Central Time (US & Canada), Mexico City"),
("-5.00", "(GMT -5:00) Eastern Time (US & Canada), Bogota, Lima")],
"TZ2": [("8.00", "(GMT +8:00) Beijing, Perth, Singapore, Hong Kong")],
"TZ3": [("-12.00", "(GMT -12:00) Eniwetok, Kwajalein"),
("-11.00", "(GMT -11:00) Midway Island, Samoa"),
("-10.00", "(GMT -10:00) Hawaii"),
("-9.00", "(GMT -9:00) Alaska"),
("-8.00", "(GMT -8:00) Pacific Time (US & Canada)"),
("-7.00", "(GMT -7:00) Mountain Time (US & Canada)"),
("-6.00", "(GMT -6:00) Central Time (US & Canada), Mexico City"),
("-5.00", "(GMT -5:00) Eastern Time (US & Canada), Bogota, Lima"),
("-4.00", "(GMT -4:00) Atlantic Time (Canada), Caracas, La Paz"),
("-3.50", "(GMT -3:30) Newfoundland"),
("-3.00", "(GMT -3:00) Brazil, Buenos Aires, Georgetown"),
("-2.00", "(GMT -2:00) Mid-Atlantic"),
("-1.00", "(GMT -1:00 hour) Azores, Cape Verde Islands"),
("0.00", "(GMT) Western Europe Time, London, Lisbon, Casablanca"),
("1.00", "(GMT +1:00 hour) Brussels, Copenhagen, Madrid, Paris"),
("2.00", "(GMT +2:00) Kaliningrad, South Africa"),
("3.00", "(GMT +3:00) Baghdad, Riyadh, Moscow, St. Petersburg"),
("3.50", "(GMT +3:30) Tehran"),
("4.00", "(GMT +4:00) Abu Dhabi, Muscat, Baku, Tbilisi"),
("4.50", "(GMT +4:30) Kabul"),
("5.00", "(GMT +5:00) Ekaterinburg, Islamabad, Karachi, Tashkent"),
("5.50", "(GMT +5:30) Bombay, Calcutta, Madras, New Delhi"),
("5.75", "(GMT +5:45) Kathmandu"),
("6.00", "(GMT +6:00) Almaty, Dhaka, Colombo"),
("7.00", "(GMT +7:00) Bangkok, Hanoi, Jakarta"),
("8.00", "(GMT +8:00) Beijing, Perth, Singapore, Hong Kong"),
("9.00", "(GMT +9:00) Tokyo, Seoul, Osaka, Sapporo, Yakutsk"),
("9.50", "(GMT +9:30) Adelaide, Darwin"),
("10.00", "(GMT +10:00) Eastern Australia, Guam, Vladivostok"),
("11.00", "(GMT +11:00) Magadan, Solomon Islands, New Caledonia"),
("12.00", "(GMT +12:00) Auckland, Wellington, Fiji, Kamchatka")]
}
MESSAGE_PLACEHOLDER = """Please indicate how you would like us to contact you
for the conversation to launch your complimentary service package: we offer
conference calling via Webex and Go To Meeting, and telephony as well. Should
you choose to leave your email, we will use it only for contact in this
case."""
class SelectOptgroupField(SelectField):
"""
Monkey-patched SelectField to make it support one-level optgroup.
"""
# A really really dirty workaround, or we will get a "too many values to
# unpack" error.
def pre_validate(self, form):
return True
class TimeRangeSliderField(Field):
pass
class MakeAppointmentForm(Form):
next = HiddenField()
name = TextField(u'Name',
[Required(),
Length(USERNAME_LEN_MIN, USERNAME_LEN_MAX)])
time_range = TimeRangeSliderField(u'Time Range')
start_time = HiddenField(u'start_time')
end_time = HiddenField(u'end_time')
email = EmailField(u'Email',
[Email(),
Length(EMAIL_LEN_MIN, EMAIL_LEN_MAX)])
date = DateField(u'Date',
[Required()],
default=datetime.date.today())
timezone = SelectOptgroupField(u'Timezone',
[Required(),
Length(TIMEZONE_LEN_MIN,
TIMEZONE_LEN_MAX)],
choices=TIMEZONES)
message = TextAreaField(u'Message',
[Required(),
Length(MESSAGE_LEN_MIN, MESSAGE_LEN_MAX)],
description={'placeholder': MESSAGE_PLACEHOLDER})
submit = SubmitField('OK')
| StarcoderdataPython |
9661431 | import re
class MyTardisModelBase(object):
def __init__(self):
self.parameter_sets = []
def fields_to_dict(self, ignore_none=True):
"""
Return the attributes of the class as a dictionary, ignoring
the parameter_sets attribute and anything beginning with an underscore.
These values represent the data for a MyTardis Django model,
eg and Experiment, Dataset or DataFile, without the additional
Parameters/ParameterNames.
:param ignore_none:
:type ignore_none: bool
:rtype: dict
"""
fields = {}
for k, v in self.__dict__.items():
if not k.startswith('_') and k != 'parameter_sets':
if ignore_none:
if v is not None:
fields[k] = v
else:
fields[k] = v
return fields
def package(self, ignore_none=True):
request_dict = self.fields_to_dict(ignore_none=ignore_none)
request_dict['parameter_sets'] = \
self.package_parameter_sets(ignore_none=ignore_none)
return request_dict
def to_json(self, ignore_none=True):
import datetime, json
date_handler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
)
return json.dumps(self.package(ignore_none=ignore_none),
default=date_handler)
@property
def parameters(self):
"""
Get just the first parameter set.
:rtype: MyTardisParameterSet
"""
if len(self.parameter_sets) >= 1:
return self.parameter_sets[0]
else:
return []
@parameters.setter
def parameters(self, value):
"""
Assign a single parameter set. Deletes any existing parameter sets.
:type value: MyTardisParameterSet
"""
del self.parameter_sets[:]
self.parameter_sets = [value]
@parameters.deleter
def parameters(self):
"""
Deletes all parameter sets.
"""
del self.parameter_sets[:]
def package_parameter_sets(self, ignore_none=True):
"""
Returns:
[{'schema':'http://x', 'parameters': [{'name':'w', 'value': 'z'},
{'schema':'http://y', 'parameters': [{'name':'a', 'value': 'b'},
]
:return:
:rtype: list[dict]
"""
parameter_sets = [param_set.package_parameter_set(ignore_none=
ignore_none)
for param_set in self.parameter_sets]
return parameter_sets
class MyTardisParameterSet(object):
def __init__(self):
self._namespace__schema = None # type: unicode
def from_dict(self, d, existing_only=True, ignore_missing=True):
for k, v in d.items():
if existing_only:
if hasattr(self, k):
setattr(self, k, v)
else:
if not ignore_missing:
raise KeyError('Attribute %s not found in %s' %
(k, self.__class__))
else:
setattr(self, k, v)
def to_dict(self, ignore_none=True):
params = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
if ignore_none:
if v is not None:
params[k] = v
else:
params[k] = v
return params
def package_parameter_list(self, ignore_none=True):
pset = self.to_dict(ignore_none=ignore_none)
parameter_list = [{u'name': k, u'value': v} for k, v in pset.items()]
return parameter_list
def package_parameter_set(self, ignore_none=True):
return {u'schema': self._namespace__schema,
u'parameters': self.package_parameter_list(
ignore_none=ignore_none)
}
def to_schema(self):
schema = {'pk': None, 'model': 'tardis_portal.schema', 'fields': {}}
attributes = sorted(self.__dict__.items())
for k, v in attributes:
if k.startswith('_') and k.endswith('__schema'):
# remove leading _ and __schema suffix
kname = re.sub(re.escape('__schema')+'$', '', k[1:])
# model and pk go into the top level, all the rest are
# part of the nest 'fields' dictionary
if kname in ['pk', 'model']:
schema[kname] = v
else:
schema['fields'][kname] = v
return [schema]
def to_parameter_schema(self):
param_schemas = []
attributes = sorted(self.__dict__.items())
for k, v in attributes:
if k.startswith('_') and k.endswith('__attr_schema'):
param_schemas.append(v)
# param_schemas.sort() # not Python3 compatible
# We sort the list of dictionaries so this output is deterministic
param_schemas = sorted(param_schemas, key=lambda d: repr(d))
return param_schemas
class Experiment(MyTardisModelBase):
def __init__(self, *args, **kwargs):
super(Experiment, self).__init__(*args, **kwargs)
self.title = None # type: unicode
self.institution_name = None # type: unicode
self.description = None # type: unicode
self.start_time = None # type: datetime
self.end_time = None # type: datetime
self.created_time = None # type: datetime
self.created_by = None # type: unicode
self.handle = None # type: unicode
self.locked = None # type: bool
self.public_access = None # type: int
self.license = None # type: unicode
class Dataset(MyTardisModelBase):
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
self.experiments = [] # type: list[str]
self.description = None # type: unicode
self.immutable = None # type: bool
self.instrument = None # type: unicode
class DataFile(MyTardisModelBase):
def __init__(self, *args, **kwargs):
super(DataFile, self).__init__(*args, **kwargs)
pass
| StarcoderdataPython |
6419067 | <reponame>aaabbb2021/Trescope<filename>Trescope4Python/library/src/trescope/controller/Label.py<gh_stars>100-1000
from trescope.controller import ControllerNode
class Label(ControllerNode):
"""
Label for information display .
"""
def __init__(self):
super().__init__()
self.__value = None
self.__openIfLink = True
def value(self, value: str):
"""
Value of label .
:param value: value
:return: self , for chain call
"""
self.__value = value
return self
def openIfLink(self, openIfLink: bool):
"""
Forward to a new web page if label is a hyper link .
:param openIfLink: open if link , default `True`
:return: self , for chain call
"""
self.__openIfLink = openIfLink
return self
def toDict(self):
return {**super().toDict(), 'type': 'Label', 'value': self.__value, 'openIfLink': self.__openIfLink}
| StarcoderdataPython |
8134421 | #!/usr/bin/env python
# encoding: utf-8
"""Simple throttle to wait for Solr to start on busy test servers"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import time
import requests
if __name__ == '__main__':
max_retries = 100
retry_count = 0
retry_delay = 15
status_url = 'http://localhost:8983/solr/core0/admin/ping'
while retry_count < max_retries:
status_code = 0
try:
r = requests.get(status_url)
status_code = r.status_code
if status_code == 200:
sys.exit(0)
except Exception as exc:
print('Unhandled exception requesting %s: %s' % (status_url, exc), file=sys.stderr)
retry_count += 1
print('Waiting {0} seconds for Solr to start (retry #{1}, status {2})'.format(
retry_delay, retry_count, status_code), file=sys.stderr)
time.sleep(retry_delay)
print("Solr took too long to start (#%d retries)" % retry_count, file=sys.stderr)
sys.exit(1)
| StarcoderdataPython |
1755439 | <filename>chat_downloader/sites/youtube.py
from .common import (
BaseChatDownloader, Chat, Timeout
)
from requests.exceptions import RequestException
from ..errors import (
NoChatReplay,
NoContinuation,
ParsingError,
VideoUnavailable,
LoginRequired,
VideoUnplayable,
InvalidParameter,
UnexpectedHTML
)
from urllib import parse
import json
import time
import re
from ..utils import (
try_get,
multi_get,
time_to_seconds,
seconds_to_time,
int_or_none,
get_colours,
try_get_first_key,
try_get_first_value,
remove_prefixes,
remove_suffixes,
camel_case_split,
ensure_seconds,
log,
attempts
)
from datetime import datetime
from base64 import b64decode
class YouTubeChatDownloader(BaseChatDownloader):
def __init__(self, **kwargs):
super().__init__(**kwargs)
_NAME = 'YouTube'
def __str__(self):
return 'youtube.com'
# return 'youtube.com'
_SITE_DEFAULT_PARAMS = {
'format': 'youtube',
}
# _DEFAULT_FORMAT = ''
# Regex provided by youtube-dl
_VALID_URL = r'''(?x)^
(
# http(s):// or protocol-independent URL
(?:https?://|//)
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
# v/ or embed/ or e/
(?:(?:v|embed|e)/(?!videoseries))
|(?: # or the v= param in all its forms
# preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)?
(?:\?|\#!?) # the params delimiter ? or # or #!
# any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
(?:.*?[&;])??
v=
)
))
|(?:
youtu\.be # just youtu.be/xxxx
)/)
)? # all until now is optional -> you can pass the naked ID
# here is it! the YouTube video ID
(?P<id>[0-9A-Za-z_-]{11})
# if we found the ID, everything can follow
(?(1).+)?
$'''
_TESTS = [
# Get top live streams
# https://www.youtube.com/results?search_query&sp=CAMSAkAB
# OTHER:
# Japanese characters and lots of superchats
# https://www.youtube.com/watch?v=UlemRwXYWHg
# strange end times:
# https://www.youtube.com/watch?v=DzEbfQI4TPQ
# https://www.youtube.com/watch?v=7PPnCOhkxqo
# purchased a product linked to the YouTube channel merchandising
# https://youtu.be/y5ih7nqEoc4
# TESTING FOR CORRECT FUNCIONALITY
{
'name': 'Get chat messages from live chat replay',
'params': {
'url': 'https://www.youtube.com/watch?v=wXspodtIxYU',
'max_messages': 10
},
'expected_result': {
'message_types': ['text_message'],
'action_types': ['add_chat_item'],
'messages_condition': lambda messages: len(messages) > 0,
}
},
{
'name': 'Get superchat and ticker messages from live chat replay',
'params': {
'url': 'https://www.youtube.com/watch?v=UlemRwXYWHg',
'end_time': 20,
'message_groups': ['superchat', 'tickers']
},
'expected_result': {
'message_types': ['paid_message', 'ticker_paid_message_item', 'membership_item', 'ticker_sponsor_item', 'paid_sticker', 'ticker_paid_sticker_item'],
'action_types': ['add_chat_item', 'add_live_chat_ticker_item'],
'messages_condition': lambda messages: len(messages) > 0,
}
},
{
'name': 'Get all messages from live chat replay',
'params': {
'url': 'https://www.youtube.com/watch?v=97w16cYskVI',
'end_time': 50,
'message_types': ['all']
},
'expected_result': {
'message_types': ['viewer_engagement_message', 'paid_message', 'ticker_paid_message_item', 'text_message', 'paid_sticker', 'ticker_paid_sticker_item'],
'action_types': ['add_chat_item', 'add_live_chat_ticker_item'],
'messages_condition': lambda messages: len(messages) > 0,
}
},
{
'name': 'Get messages from top chat replay',
'params': {
'url': 'https://www.youtube.com/watch?v=zVCs9Cug_qM',
'start_time': 0,
'end_time': 20,
'chat_type': 'top'
},
'expected_result': {
'message_types': ['text_message'],
'action_types': ['add_chat_item'],
'messages_condition': lambda messages: len(messages) > 0,
}
},
{
'name': 'Chat replay with donations',
'params': {
'url': 'https://www.youtube.com/watch?v=Ih2WTyY62J4',
'start_time': 0,
'end_time': 40,
'message_groups': ['donations']
},
'expected_result': {
'message_types': ['donation_announcement'],
'action_types': ['add_chat_item'],
'messages_condition': lambda messages: len(messages) > 0,
}
},
{
# 874:24:05 current test
'name': 'Get chat messages from an unplayable stream.',
'params': {
'url': 'https://www.youtube.com/watch?v=V2Afni3S-ok',
'start_time': 10,
'end_time': 100,
},
'expected_result': {
'message_types': ['text_message'],
'action_types': ['add_chat_item'],
'messages_condition': lambda messages: len(messages) > 0,
}
},
# TESTING FOR ERRORS
{
'name': 'Video does not exist',
'params': {
'url': 'https://www.youtube.com/watch?v=xxxxxxxxxxx',
},
'expected_result': {
'error': VideoUnavailable,
}
},
{
'name': 'Members-only content',
'params': {
'url': 'https://www.youtube.com/watch?v=vprErlL1w2E',
},
'expected_result': {
'error': VideoUnplayable,
}
},
{
'name': 'Chat is disabled for this live stream',
'params': {
'url': 'https://www.youtube.com/watch?v=XWq5kBlakcQ',
},
'expected_result': {
'error': NoChatReplay,
}
},
{
'name': 'Live chat replay has been turned off for this video',
'params': {
'url': 'https://www.youtube.com/watch?v=7lGZvbasx6A',
},
'expected_result': {
'error': NoChatReplay,
}
},
{
'name': 'Video is private',
'params': {
'url': 'https://www.youtube.com/watch?v=ijFMXqa-N0c',
},
'expected_result': {
'error': LoginRequired,
}
},
{
'name': 'The uploader has not made this video available in your country.',
'params': {
'url': 'https://www.youtube.com/watch?v=sJL6WA-aGkQ',
},
'expected_result': {
'error': VideoUnplayable,
}
}
]
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_HOME = 'https://www.youtube.com'
_YOUTUBE_INIT_API_TEMPLATE = _YT_HOME + '/{}?continuation={}'
_YOUTUBE_CHAT_API_TEMPLATE = _YT_HOME + \
b64decode(
'L3lvdXR1YmVpL3YxL2xpdmVfY2hhdC9nZXRfe30/a2V5PUFJemFTeUFPX0ZKMlNscVU4UTRTVEVITEdDaWx3X1k5XzExcWNXOA==').decode()
_MESSAGE_GROUPS = {
'messages': [
'text_message' # normal message
],
'superchat': [
# superchat messages which appear in chat
'membership_item',
'paid_message',
'paid_sticker',
],
'tickers': [
# superchat messages which appear ticker (at the top)
'ticker_paid_sticker_item',
'ticker_paid_message_item',
'ticker_sponsor_item',
],
'banners': [
'banner',
'banner_header'
],
'donations': [
'donation_announcement'
],
'engagement': [
# message saying live chat replay is on
'viewer_engagement_message',
],
'purchases': [
'purchased_product_message' # product purchased
],
'mode_changes': [
'mode_change_message' # e.g. slow mode enabled
],
'deleted': [
'deleted_message'
],
'bans': [
'ban_user'
],
'placeholder': [
'placeholder_item' # placeholder
]
}
_MESSAGE_TYPES = ['all']
for group in _MESSAGE_GROUPS:
_MESSAGE_TYPES += _MESSAGE_GROUPS[group]
@ staticmethod
def parse_youtube_link(text):
if text.startswith(('/redirect', 'https://www.youtube.com/redirect')): # is a redirect link
info = dict(parse.parse_qsl(parse.urlsplit(text).query))
return info.get('q') or ''
elif text.startswith('//'):
return 'https:' + text
elif text.startswith('/'): # is a youtube link e.g. '/watch','/results'
return YouTubeChatDownloader._YT_HOME + text
else: # is a normal link
return text
@ staticmethod
def parse_navigation_endpoint(navigation_endpoint, default_text=''):
url = try_get(navigation_endpoint, lambda x: YouTubeChatDownloader.parse_youtube_link(
x['commandMetadata']['webCommandMetadata']['url'])) or default_text
return url
@ staticmethod
def parse_runs(run_info, parse_links=False):
""" Reads and parses YouTube formatted messages (i.e. runs). """
message_text = ''
runs = run_info.get('runs') or []
for run in runs:
if 'text' in run:
if parse_links and 'navigationEndpoint' in run: # is a link and must parse
# if something fails, use default text
message_text += YouTubeChatDownloader.parse_navigation_endpoint(
run['navigationEndpoint'], run['text'])
else: # is a normal message
message_text += run['text']
elif 'emoji' in run:
message_text += run['emoji']['shortcuts'][0]
else:
# unknown run
message_text += str(run)
return message_text
@ staticmethod
def _parse_item(item, info=None):
if info is None:
info = {}
# info is starting point
item_index = try_get_first_key(item)
item_info = item.get(item_index)
if not item_info:
return info
for key in item_info:
BaseChatDownloader.remap(info, YouTubeChatDownloader._REMAPPING,
YouTubeChatDownloader._REMAP_FUNCTIONS, key, item_info[key])
# check for colour information
for colour_key in YouTubeChatDownloader._COLOUR_KEYS:
if colour_key in item_info: # if item has colour information
info[camel_case_split(colour_key.replace('Color', 'Colour'))] = get_colours(
item_info[colour_key]).get('hex')
item_endpoint = item_info.get('showItemEndpoint')
if item_endpoint: # has additional information
renderer = multi_get(
item_endpoint, 'showLiveChatItemEndpoint', 'renderer')
if renderer:
info.update(YouTubeChatDownloader._parse_item(renderer))
# amount is money with currency
amount = info.get('amount')
if amount:
# print('has amount', item_info)
pass # TODO split amount into:
# currency type
# amount (float)
BaseChatDownloader.move_to_dict(info, 'author')
# TODO determine if youtube glitch has occurred
# round(time_in_seconds/timestamp) == 1
time_in_seconds = info.get('time_in_seconds')
time_text = info.get('time_text')
if time_in_seconds is not None:
if time_text is not None:
# all information was provided
# check if time_in_seconds is <= 0
if time_in_seconds <= 0:
info['time_in_seconds'] = time_to_seconds(time_text)
else:
# recreate time text from time in seconds
info['time_text'] = seconds_to_time(int(time_in_seconds))
elif time_text is not None: # doesn't have time in seconds, but has time text
info['time_in_seconds'] = time_to_seconds(time_text)
else:
pass
# has no current video time information
# (usually live video or a sub-item)
return info
_IMAGE_SIZE_REGEX = r'=s(\d+)'
# TODO move regex to inline where possible?
@ staticmethod
def parse_badges(badge_items):
badges = []
for badge in badge_items:
to_add = {}
parsed_badge = YouTubeChatDownloader._parse_item(badge)
title = parsed_badge.pop('tooltip', None)
if title:
to_add['title'] = title
icon = parsed_badge.pop('icon', None)
if icon:
to_add['icon_name'] = icon.lower()
badge_icons = parsed_badge.pop('badge_icons', None)
if badge_icons:
to_add['icons'] = []
url = None
for icon in badge_icons:
url = icon.get('url')
if url:
matches = re.search(
YouTubeChatDownloader._IMAGE_SIZE_REGEX, url)
if matches:
size = int(matches.group(1))
to_add['icons'].append(
BaseChatDownloader.create_image(url, size, size))
if url:
to_add['icons'].append(BaseChatDownloader.create_image(
url[0:url.index('=')], image_id='source'))
badges.append(to_add)
# if 'member'
# remove the tooltip afterwards
# print(badges)
return badges
@ staticmethod
def parse_thumbnails(item):
# sometimes thumbnails come as a list
if isinstance(item, list):
item = item[0] # rebase
# TODO add source:
# https://yt3.ggpht.com/ytc/AAUvwnhBYeK7_iQTJbXe6kIMpMlCI2VsVHhb6GBJuYeZ=s32-c-k-c0xffffffff-no-rj-mo
# https://yt3.ggpht.com/ytc/AAUvwnhBYeK7_iQTJbXe6kIMpMlCI2VsVHhb6GBJuYeZ
thumbnails = item.get('thumbnails') or []
return list(map(lambda x: BaseChatDownloader.create_image(
x.get('url'),
x.get('width'),
x.get('height'),
), thumbnails))
@ staticmethod
def parse_action_button(item):
return {
'url': try_get(item, lambda x: YouTubeChatDownloader.parse_navigation_endpoint(x['buttonRenderer']['navigationEndpoint'])) or '',
'text': multi_get(item, 'buttonRenderer', 'text', 'simpleText') or ''
}
_REMAP_FUNCTIONS = {
'simple_text': lambda x: x.get('simpleText'),
'convert_to_int': lambda x: int_or_none(x),
'get_thumbnails': lambda x: YouTubeChatDownloader.parse_thumbnails(x),
'parse_runs': lambda x: YouTubeChatDownloader.parse_runs(x, True),
'parse_badges': lambda x: YouTubeChatDownloader.parse_badges(x),
'parse_icon': lambda x: x.get('iconType'),
'parse_action_button': lambda x: YouTubeChatDownloader.parse_action_button(x),
}
_REMAPPING = {
# 'youtubeID' : ('mapped_id', 'remapping_function')
'id': 'message_id',
'authorExternalChannelId': 'author_id',
'authorName': ('author_name', 'simple_text'),
# TODO author_display_name
'purchaseAmountText': ('amount', 'simple_text'),
'message': ('message', 'parse_runs'),
'timestampText': ('time_text', 'simple_text'),
'timestampUsec': ('timestamp', 'convert_to_int'),
'authorPhoto': ('author_images', 'get_thumbnails'),
'tooltip': 'tooltip',
'icon': ('icon', 'parse_icon'),
'authorBadges': ('author_badges', 'parse_badges'),
# stickers
'sticker': ('sticker_images', 'get_thumbnails'),
# ticker_paid_message_item
'fullDurationSec': ('ticker_duration', 'convert_to_int'),
'amount': ('amount', 'simple_text'),
# ticker_sponsor_item
'detailText': ('message', 'parse_runs'),
'customThumbnail': ('badge_icons', 'get_thumbnails'),
# membership_item
'headerSubtext': ('message', 'parse_runs'),
'sponsorPhoto': ('sponsor_icons', 'get_thumbnails'),
# ticker_paid_sticker_item
'tickerThumbnails': ('ticker_icons', 'get_thumbnails'),
# deleted messages
'deletedStateMessage': ('message', 'parse_runs'),
'targetItemId': 'target_message_id',
'externalChannelId': 'author_id',
# action buttons
'actionButton': ('action', 'parse_action_button'),
# addBannerToLiveChatCommand
'text': ('message', 'parse_runs'),
'viewerIsCreator': 'viewer_is_creator',
'targetId': 'target_message_id',
'isStackable': 'is_stackable',
# removeBannerForLiveChatCommand
'targetActionId': 'target_message_id',
# donation_announcement
'subtext': ('sub_message', 'parse_runs'),
# tooltip
'detailsText': ('message', 'parse_runs'),
}
_COLOUR_KEYS = [
# paid_message
'authorNameTextColor', 'timestampColor', 'bodyBackgroundColor',
'headerTextColor', 'headerBackgroundColor', 'bodyTextColor',
# paid_sticker
'backgroundColor', 'moneyChipTextColor', 'moneyChipBackgroundColor',
# ticker_paid_message_item
'startBackgroundColor', 'amountTextColor', 'endBackgroundColor',
# ticker_sponsor_item
'detailTextColor'
]
_STICKER_KEYS = [
# to actually ignore
'stickerDisplayWidth', 'stickerDisplayHeight', # ignore
# parsed elsewhere
'sticker',
]
_KEYS_TO_IGNORE = [
# to actually ignore
'contextMenuAccessibility', 'contextMenuEndpoint', 'trackingParams', 'accessibility',
'contextMenuButton',
# parsed elsewhere
'showItemEndpoint',
'durationSec',
# banner parsed elsewhere
'header', 'contents', 'actionId',
# tooltipRenderer
'dismissStrategy', 'suggestedPosition', 'promoConfig'
]
_KNOWN_KEYS = set(list(_REMAPPING.keys()) +
_COLOUR_KEYS + _STICKER_KEYS + _KEYS_TO_IGNORE)
# KNOWN ACTIONS AND MESSAGE TYPES
_KNOWN_ADD_TICKER_TYPES = {
'addLiveChatTickerItemAction': [
'liveChatTickerSponsorItemRenderer',
'liveChatTickerPaidStickerItemRenderer',
'liveChatTickerPaidMessageItemRenderer'
]
}
_KNOWN_ADD_ACTION_TYPES = {
'addChatItemAction': [
# message saying Live Chat replay is on
'liveChatViewerEngagementMessageRenderer',
'liveChatMembershipItemRenderer',
'liveChatTextMessageRenderer',
'liveChatPaidMessageRenderer',
'liveChatPlaceholderItemRenderer', # placeholder
'liveChatDonationAnnouncementRenderer',
'liveChatPaidStickerRenderer',
'liveChatModeChangeMessageRenderer', # e.g. slow mode enabled
# TODO find examples of:
# 'liveChatPurchasedProductMessageRenderer', # product purchased
# liveChatLegacyPaidMessageRenderer
# liveChatModerationMessageRenderer
# liveChatAutoModMessageRenderer
]
}
_KNOWN_REPLACE_ACTION_TYPES = {
'replaceChatItemAction': [
'liveChatPlaceholderItemRenderer',
'liveChatTextMessageRenderer'
]
}
# actions that have an 'item'
_KNOWN_ITEM_ACTION_TYPES = {
**_KNOWN_ADD_TICKER_TYPES, **_KNOWN_ADD_ACTION_TYPES}
# [message deleted] or [message retracted]
_KNOWN_REMOVE_ACTION_TYPES = {
'markChatItemsByAuthorAsDeletedAction': [ # TODO ban?
'banUser' # deletedStateMessage
],
'markChatItemAsDeletedAction': [
'deletedMessage' # deletedStateMessage
]
}
_KNOWN_ADD_BANNER_TYPES = {
'addBannerToLiveChatCommand': [
'liveChatBannerRenderer',
'liveChatBannerHeaderRenderer'
'liveChatTextMessageRenderer'
]
}
_KNOWN_REMOVE_BANNER_TYPES = {
'removeBannerForLiveChatCommand': [
'removeBanner' # targetActionId
]
}
_KNOWN_TOOLTIP_ACTION_TYPES = {
'showLiveChatTooltipCommand': [
'tooltipRenderer'
]
}
# Not come across yet (actions/commands)
# search "livechat"
# https://github.com/reachomk/ytvanced/tree/master/src/main/java/com/google/protos/youtube/api/innertube
# addLiveChatTextMessageFromTemplateAction
# liveChatMessageBuyFlowHeadingRenderer
# liveChatPaidMessageFooterRenderer
# liveChatProductButtonRenderer
# liveChatPurchaseMessageEndpoint
# removeChatItemAction
# replaceLiveChatRendererAction
# showLiveChatDialogAction
# showLiveChatSurveyCommand
# Not checked for
# _KNOWN_IGNORE_ACTION_TYPES = {
# 'authorBadges': [
# 'liveChatAuthorBadgeRenderer'
# ],
# 'showLiveChatItemEndpoint': [
# 'liveChatPaidStickerRenderer',
# 'liveChatPaidMessageRenderer',
# 'liveChatMembershipItemRenderer'
# ]
# }
_KNOWN_POLL_ACTION_TYPES = {
}
_KNOWN_IGNORE_ACTION_TYPES = {
# TODO add support for poll actions
'showLiveChatActionPanelAction': [],
'updateLiveChatPollAction': [],
'closeLiveChatActionPanelAction': []
}
_KNOWN_ACTION_TYPES = {
**_KNOWN_ITEM_ACTION_TYPES,
**_KNOWN_REMOVE_ACTION_TYPES,
**_KNOWN_REPLACE_ACTION_TYPES,
**_KNOWN_ADD_BANNER_TYPES,
**_KNOWN_REMOVE_BANNER_TYPES,
**_KNOWN_TOOLTIP_ACTION_TYPES,
**_KNOWN_POLL_ACTION_TYPES,
**_KNOWN_IGNORE_ACTION_TYPES
}
_KNOWN_IGNORE_MESSAGE_TYPES = [
'liveChatPlaceholderItemRenderer'
]
_KNOWN_MESSAGE_TYPES = []
for action in _KNOWN_ACTION_TYPES:
_KNOWN_MESSAGE_TYPES += _KNOWN_ACTION_TYPES[action]
_KNOWN_SEEK_CONTINUATIONS = [
'playerSeekContinuationData'
]
_KNOWN_CHAT_CONTINUATIONS = [
'invalidationContinuationData', 'timedContinuationData',
'liveChatReplayContinuationData', 'reloadContinuationData'
]
_KNOWN_CONTINUATIONS = _KNOWN_SEEK_CONTINUATIONS + _KNOWN_CHAT_CONTINUATIONS
def get_playlist_items(self, playlist_id):
pass
def _get_initial_info(self, url):
html = self._session_get(url).text
yt = re.search(self._YT_INITIAL_DATA_RE, html)
yt_initial_data = json.loads(yt.group(1)) if yt else None
return html, yt_initial_data
def _get_initial_video_info(self, video_id):
""" Get initial YouTube video information. """
original_url = '{}/watch?v={}'.format(self._YT_HOME, video_id)
html, yt_initial_data = self._get_initial_info(original_url)
player_response = re.search(self._YT_INITIAL_PLAYER_RESPONSE_RE, html)
if not yt_initial_data:
raise ParsingError(
'Unable to parse video data. Please try again.')
player_response_info = json.loads(player_response.group(1))
playability_status = player_response_info.get('playabilityStatus')
status = playability_status.get('status')
adaptive_formats = multi_get(
player_response_info, 'streamingData', 'adaptiveFormats')
last_modified = try_get(
adaptive_formats, lambda x: float(x[0]['lastModified']))
details = {
'start_time': last_modified,
'visitor_data': multi_get(yt_initial_data, 'responseContext', 'webResponseContextExtensionData', 'ytConfigData', 'visitorData')
}
# Try to get continuation info
contents = yt_initial_data.get('contents') or {}
conversation_bar = multi_get(
contents, 'twoColumnWatchNextResults', 'conversationBar')
sub_menu_items = multi_get(conversation_bar, 'liveChatRenderer', 'header', 'liveChatHeaderRenderer',
'viewSelector', 'sortFilterSubMenuRenderer', 'subMenuItems') or {}
details['continuation_info'] = {
x['title']: x['continuation']['reloadContinuationData']['continuation']
for x in sub_menu_items
}
details['is_live'] = 'Live chat' in details['continuation_info']
error_screen = playability_status.get('errorScreen')
# Only raise an error if there is no continuation info. Sometimes you
# are able to view chat, but not the video (e.g. for very long livestreams)
if not details['continuation_info']:
if error_screen: # There is a error screen visible
error_reasons = {
'reason': '',
'subreason': '',
}
error_info = try_get_first_value(error_screen)
for error_reason in error_reasons:
text = error_info.get(error_reason) or {}
error_reasons[error_reason] = text.get('simpleText') or try_get(
text, lambda x: self.parse_runs(x)) or error_info.pop(
'itemTitle', '') or error_info.pop(
'offerDescription', '') or playability_status.get(error_reason) or ''
error_message = ''
for error_reason in error_reasons:
if error_reasons[error_reason]:
if isinstance(error_reasons[error_reason], str):
error_message += ' {}.'.format(
error_reasons[error_reason].rstrip('.'))
else:
error_message += str(error_reasons[error_reason])
error_message = error_message.strip()
if status == 'ERROR':
raise VideoUnavailable(error_message)
elif status == 'LOGIN_REQUIRED':
raise LoginRequired(error_message)
elif status == 'UNPLAYABLE':
raise VideoUnplayable(error_message)
else:
# print('UNKNOWN STATUS', status)
# print(playability_status)
error_message = '{}: {}'.format(status, error_message)
raise VideoUnavailable(error_message)
elif not contents:
raise VideoUnavailable(
'Unable to find initial video contents.')
else:
# Video exists, but you cannot view chat for some reason
error_message = try_get(conversation_bar, lambda x: self.parse_runs(
x['conversationBarRenderer']['availabilityMessage']['messageRenderer']['text'])) or \
'Video does not have a chat replay.'
raise NoChatReplay(error_message)
video_details = player_response_info.get('videoDetails')
details['title'] = video_details.get('title')
details['duration'] = int(video_details.get('lengthSeconds')) or None
return details
def _get_chat_messages(self, initial_info, params):
initial_continuation_info = initial_info.get('continuation_info')
# stream_start_time = initial_info.get('start_time')
is_live = initial_info.get('is_live')
visitor_data = initial_info.get('visitor_data')
# duration = initial_info.get('duration')
start_time = ensure_seconds(params.get('start_time'))
end_time = ensure_seconds(params.get('end_time'))
# Top chat replay - Some messages, such as potential spam, may not be visible
# Live chat replay - All messages are visible
chat_type = params.get('chat_type').title() # Live or Top
continuation_title = '{} chat'.format(chat_type)
api_type = 'live_chat'
if not is_live:
continuation_title += ' replay'
api_type += '_replay'
continuation = initial_continuation_info.get(continuation_title)
if not continuation:
raise NoContinuation(
'Initial continuation information could not be found for {}.'.format(continuation_title))
init_page = self._YOUTUBE_INIT_API_TEMPLATE.format(
api_type, continuation)
# must run to get first few messages, otherwise might miss some
html, yt_info = self._get_initial_info(init_page)
continuation_url = self._YOUTUBE_CHAT_API_TEMPLATE.format(api_type)
continuation_params = {
'context': {
'client': {
'visitorData': visitor_data,
'userAgent': self.get_session_headers('User-Agent'),
'clientName': 'WEB',
'clientVersion': '2.{}.01.00'.format(datetime.today().strftime('%Y%m%d'))
}
}
}
offset_milliseconds = (
start_time * 1000) if isinstance(start_time, int) else None
# force_no_timeout = params.get('force_no_timeout')
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
messages_groups_to_add = params.get('message_groups') or []
messages_types_to_add = params.get('message_types') or []
invalid_groups = set(messages_groups_to_add) - \
self._MESSAGE_GROUPS.keys()
if 'all' not in messages_groups_to_add and invalid_groups:
raise InvalidParameter(
'Invalid groups specified: {}'.format(invalid_groups))
self.check_for_invalid_types(
messages_types_to_add, self._MESSAGE_TYPES)
def debug_log(*items):
log(
'debug',
items,
params.get('pause_on_debug')
)
timeout = Timeout(params.get('timeout'))
inactivity_timeout = Timeout(params.get(
'inactivity_timeout'), Timeout.INACTIVITY)
message_count = 0
first_time = True
while True:
info = None
for attempt_number in attempts(max_attempts):
timeout.check_for_timeout()
try:
if not first_time:
continuation_params['continuation'] = continuation
if not is_live and offset_milliseconds is not None:
continuation_params['currentPlayerState'] = {
'playerOffsetMs': offset_milliseconds}
log('debug', 'Continuation: {}'.format(continuation))
yt_info = self._session_post(
continuation_url, json=continuation_params).json()
info = multi_get(
yt_info, 'continuationContents', 'liveChatContinuation')
if not info:
raise NoContinuation(
'Live stream ended.' if is_live else 'No continuation.')
break # successful retrieve
except (UnexpectedHTML, RequestException) as e:
self.retry(attempt_number, max_attempts, e, retry_timeout)
self.clear_cookies()
continue
except NoContinuation:
# debug_log(e)
# Live stream ended
return
actions = info.get('actions') or []
# print(actions)
if actions:
for action in actions:
# print(action)
data = {}
# if it is a replay chat item action, must re-base it
replay_chat_item_action = action.get(
'replayChatItemAction')
if replay_chat_item_action:
offset_time = replay_chat_item_action.get(
'videoOffsetTimeMsec')
if offset_time:
data['time_in_seconds'] = float(offset_time) / 1000
action = replay_chat_item_action['actions'][0]
action.pop('clickTrackingParams', None)
original_action_type = try_get_first_key(action)
data['action_type'] = camel_case_split(
remove_suffixes(original_action_type, ('Action', 'Command')))
original_message_type = None
original_item = {}
# We now parse the info and get the message
# type based on the type of action
if original_action_type in self._KNOWN_ITEM_ACTION_TYPES:
original_item = multi_get(
action, original_action_type, 'item')
original_message_type = try_get_first_key(
original_item)
data = self._parse_item(original_item, data)
elif original_action_type in self._KNOWN_REMOVE_ACTION_TYPES:
original_item = action
if original_action_type == 'markChatItemAsDeletedAction':
original_message_type = 'deletedMessage'
else: # markChatItemsByAuthorAsDeletedAction
original_message_type = 'banUser'
data = self._parse_item(original_item, data)
elif original_action_type in self._KNOWN_REPLACE_ACTION_TYPES:
original_item = multi_get(
action, original_action_type, 'replacementItem')
original_message_type = try_get_first_key(
original_item)
data = self._parse_item(original_item, data)
elif original_action_type in self._KNOWN_TOOLTIP_ACTION_TYPES:
original_item = multi_get(
action, original_action_type, 'tooltip')
original_message_type = try_get_first_key(
original_item)
data = self._parse_item(original_item, data)
elif original_action_type in self._KNOWN_ADD_BANNER_TYPES:
original_item = multi_get(
action, original_action_type, 'bannerRenderer')
if original_item:
original_message_type = try_get_first_key(
original_item)
header = original_item[original_message_type].get(
'header')
parsed_header = self._parse_item(header)
header_message = parsed_header.get('message')
contents = original_item[original_message_type].get(
'contents')
parsed_contents = self._parse_item(contents)
data.update(parsed_header)
data.update(parsed_contents)
data['header_message'] = header_message
else:
debug_log(
'No bannerRenderer item',
'Action type: {}'.format(
original_action_type),
'Action: {}'.format(action),
'Parsed data: {}'.format(data)
)
elif original_action_type in self._KNOWN_REMOVE_BANNER_TYPES:
original_item = action
original_message_type = 'removeBanner'
data = self._parse_item(original_item, data)
elif original_action_type in self._KNOWN_IGNORE_ACTION_TYPES:
continue
# ignore these
else:
# not processing these
debug_log(
'Unknown action: {}'.format(
original_action_type),
action,
data
)
test_for_missing_keys = original_item.get(
original_message_type, {}).keys()
missing_keys = test_for_missing_keys - self._KNOWN_KEYS
# print(action)
if not data: # TODO debug
debug_log(
'Parse of action returned empty results: {}'.format(
original_action_type),
action
)
if missing_keys: # TODO debugging for missing keys
debug_log(
'Missing keys found: {}'.format(missing_keys),
'Message type: {}'.format(
original_message_type),
'Action type: {}'.format(original_action_type),
'Action: {}'.format(action),
'Parsed data: {}'.format(data)
)
if original_message_type:
new_index = remove_prefixes(
original_message_type, 'liveChat')
new_index = remove_suffixes(new_index, 'Renderer')
data['message_type'] = camel_case_split(new_index)
# TODO add option to keep placeholder items
if original_message_type in self._KNOWN_IGNORE_MESSAGE_TYPES:
continue
# skip placeholder items
elif original_message_type not in self._KNOWN_ACTION_TYPES[original_action_type]:
debug_log(
'Unknown message type "{}" for action "{}"'.format(
original_message_type,
original_action_type
),
'New message type: {}'.format(
data['message_type']),
'Action: {}'.format(action),
'Parsed data: {}'.format(data)
)
else: # no type # can ignore message
debug_log(
'No message type',
'Action type: {}'.format(original_action_type),
'Action: {}'.format(action),
'Parsed data: {}'.format(data)
)
continue
# check whether to skip this message or not, based on its type
to_add = self.must_add_item(
data,
self._MESSAGE_GROUPS,
messages_groups_to_add,
messages_types_to_add
)
if not to_add:
continue
# if from a replay, check whether to skip this message or not, based on its time
if not is_live:
# assume message is at beginning if it does not have a time component
time_in_seconds = data.get('time_in_seconds', 0)
before_start = start_time is not None and time_in_seconds < start_time
after_end = end_time is not None and time_in_seconds > end_time
if first_time and before_start:
continue # first time and invalid start time
elif before_start or after_end:
return # while actually searching, if time is invalid
# try to reconstruct time in seconds from timestamp and stream start
# if data.get('time_in_seconds') is None and data.get('timestamp') is not None:
# data['time_in_seconds'] = (data['timestamp'] - stream_start_time)/1e6
# data['time_text'] = seconds_to_time(int(data['time_in_seconds']))
# pass
# valid timing, add
inactivity_timeout.reset()
message_count += 1
yield data
log('debug', 'Total number of messages: {}'.format(message_count))
elif not is_live:
# no more actions to process in a chat replay
break
else:
# otherwise, is live, so keep trying
log('debug', 'No actions to process.')
inactivity_timeout.check_for_timeout()
# assume there are no more chat continuations
no_continuation = True
# parse the continuation information
for cont in info.get('continuations') or []:
continuation_key = try_get_first_key(cont)
continuation_info = cont[continuation_key]
if continuation_key in self._KNOWN_CHAT_CONTINUATIONS:
# set new chat continuation
# overwrite if there is continuation data
continuation = continuation_info.get('continuation')
# there is a chat continuation
no_continuation = False
elif continuation_key in self._KNOWN_SEEK_CONTINUATIONS:
pass
# ignore these continuations
else:
debug_log(
'Unknown continuation: {}'.format(
continuation_key),
cont
)
# sometimes continuation contains timeout info
sleep_duration = continuation_info.get('timeoutMs')
if sleep_duration: # and not actions:# and not force_no_timeout:
# if there is timeout info, there were no actions and the user
# has not chosen to force no timeouts, then sleep.
# This is useful for streams with varying number of messages
# being sent per second. Timeouts help prevent 429 errors
# (caused by too many requests)
sleep_duration = min(sleep_duration,
timeout.time_until_timeout_ms(),
inactivity_timeout.time_until_timeout_ms()
)
log('debug', 'Sleeping for {}ms.'.format(sleep_duration))
# print('time_until_timeout',timeout.time_until_timeout())
time.sleep(sleep_duration / 1000)
if no_continuation: # no continuation, end
break
if first_time:
first_time = False
def get_chat_by_video_id(self, video_id, params):
""" Get chat messages for a YouTube video, given its ID. """
initial_info = self._get_initial_video_info(video_id)
title = initial_info.get('title')
duration = initial_info.get('duration')
start_time = initial_info.get('start_time')
is_live = initial_info.get('is_live')
return Chat(
self._get_chat_messages(initial_info, params),
title=title,
duration=duration,
is_live=is_live,
start_time=start_time
)
def get_chat(self,
**kwargs
):
# get video id
url = kwargs.get('url')
match = re.search(self._VALID_URL, url)
if match:
video_id = match.group('id')
if video_id: # normal youtube video
return self.get_chat_by_video_id(match.group('id'), kwargs)
# else: # TODO add profile, etc.
# pass
# else:
# pass
# Raise unsupported URL type
| StarcoderdataPython |
4908957 | <filename>main.py
import sys
import run
if __name__ == '__main__':
func = sys.argv[1]
try:
method = run.functions[func]
method()
except TypeError:
print("Selected functionality does not exist. Please select from the following:\n"
"* scraping\n"
"* cleaning\n"
"* create_models\n"
"* test_model\n"
"* data_exploration\n"
"* analyze_results\n\n"
"Syntax: python main.py scraping")
| StarcoderdataPython |
164992 | <reponame>RafeyIqbalRahman/Quantum-Teleportation<filename>quantum_teleportation.py
# -*- coding: utf-8 -*-
"""quantum_teleportation
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
!pip install qiskit
from qiskit import *
# %matplotlib inline
# Create a Quantum Circuit acting on a quantum register of five qubits
circ = QuantumCircuit(5)
# Add an H gate on qubit 1, putting this qubit in superposition.
circ.h(1)
# Add a CX (CNOT) gate on control qubit 1 and target qubit 4, putting
# the qubits in a Bell state.
circ.cx(1, 4)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a GHz state.
circ.cx(0, 1)
# Add an H gate on qubit 0, putting this qubit in superposition.
circ.h(0)
# Add a CX (CNOT) gate on control qubit 1 and target qubit 4, putting
# the qubits in a GHz state.
circ.cx(1, 4)
# Add a Z gate on qubit 4, flipping this qubit.
circ.z(4)
circ.draw()
# Import Aer
from qiskit import Aer
# Run the quantum circuit on a statevector simulator backend
backend = Aer.get_backend('statevector_simulator')
# Create a Quantum Program for execution
job = execute(circ, backend)
result = job.result()
outputstate = result.get_statevector(circ, decimals=3)
print(outputstate)
from qiskit.visualization import plot_state_city
plot_state_city(outputstate)
# Run the quantum circuit on a unitary simulator backend
backend = Aer.get_backend('unitary_simulator')
job = execute(circ, backend)
result = job.result()
# Show the results
print(result.get_unitary(circ, decimals=3))
# Create a Quantum Circuit
meas = QuantumCircuit(5, 5)
meas.barrier(range(3))
# map the quantum measurement to the classical bits
meas.measure(range(5),range(5))
# The Qiskit circuit object supports composition using
# the addition operator.
qc = circ+meas
#drawing the circuit
qc.draw()
# Use Aer's qasm_simulator
backend_sim = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator.
# We've set the number of repeats of the circuit
# to be 8192
job_sim = execute(qc, backend_sim, shots=8192)
# Grab the results from the job.
result_sim = job_sim.result()
counts = result_sim.get_counts(qc)
print(counts)
from qiskit.visualization import plot_histogram
plot_histogram(counts)
| StarcoderdataPython |
9663705 | <filename>reversing/constellations/solver/solver.py<gh_stars>1-10
# python3 solver.py ../files/constellations
import sys
import struct
data = open(sys.argv[1], "rb").read()
enc = []
for i in range(0xb1):
t = 0xc95e8+i*0x10
p, l = struct.unpack("<QQ", data[t:t+0x10])
p -= 0x400000
enc += [data[p:p+l].decode()]
C = [
"Cancer",
"Aquarius",
"Pisces",
"Aries",
"Leo",
"Virgo",
"Capricorn",
"Gemini",
"Scorpio",
"Sagittarius",
"Libra",
"Taurus",
]
flag = ""
for e in enc:
e = e.split("_")
x = 0
for c in e:
x = x*12+C.index(c)
flag += chr(5*x%256)
print(flag)
| StarcoderdataPython |
149720 | <filename>more_exercise/exercise2.py
students_number=int(input("Enter number of Students :"))
per_student_kharcha=int(input("Enter per student expense :"))
total_kharcha=students_number*per_student_kharcha
if total_kharcha<50000:
print ("Ham kharche ke andar hai ")
else:
print ("kharche se bahar hai ") | StarcoderdataPython |
11245666 | <gh_stars>10-100
"""Rules that make sprites vanish."""
from . import abstract_rule
from .contact_rules import get_contact_indices
import abc
import numpy as np
class Vanish(abstract_rule.AbstractRule, metaclass=abc.ABCMeta):
"""Metaclass for rules that make sprites vanish."""
def __init__(self, layer):
"""Constructor.
Args:
layer: String. Must be a key in the environment state. Sprites in
this layer will be removed from the state if they are indexes by
self._get_vanish_inds().
"""
self._layer = layer
@abc.abstractmethod
def _get_vanish_inds(self, state):
"""Takes in state and returns an iterable of indices.
Returned indices must index which elements of state[self._layer] should
vanish.
"""
pass
def step(self, state, meta_state):
"""Remove sprites in specified indices of vanishing layer."""
del meta_state
vanish_inds = self._get_vanish_inds(state)
count_vanished_already = 0
for i in vanish_inds:
state[self._layer].pop(i - count_vanished_already)
count_vanished_already += 1
class VanishByFilter(Vanish):
"""Makes a sprite vanish based on a boolean function of itself."""
def __init__(self, layer, filter_fn=None):
"""Constructor.
Args:
layer: String. Must be a key in the environment state. Sprites in
this layer will be removed from the state if filter_fn returns
True on them.
filter_fn: None or Function taking in a sprite and return a bool
indicating whether the given sprite should vanish. If None,
defaults to True, i.e. vanishing all sprites in layer.
"""
super(VanishByFilter, self).__init__(layer)
if filter_fn is None:
self._filter_fn = lambda _: True
else:
self._filter_fn = filter_fn
def _get_vanish_inds(self, state):
vanish_inds = np.argwhere(
[self._filter_fn(s) for s in state[self._layer]])[:, 0]
return vanish_inds
class VanishOnContact(Vanish):
"""Makes a sprite vanish if it is in contact with another sprite."""
def __init__(self, vanishing_layer, contacting_layer):
"""Constructor.
Args:
vanishing_layer: String. Must be a key in the environment state.
Sprites in this layer will be removed from the state if they
contact a sprite in contacting_layer.
contacting_layer: String. Must be a key in the environment state.
"""
super(VanishOnContact, self).__init__(vanishing_layer)
self._get_contact_indices = get_contact_indices(
vanishing_layer, contacting_layer)
def _get_vanish_inds(self, state):
contact_inds = self._get_contact_indices(state)
return np.unique([i for (i, j) in contact_inds])
| StarcoderdataPython |
143839 | <gh_stars>0
import torch
import numpy as np
from pathlib import Path
import pickle
from pytorch_memlab import MemReporter
from pytorch_memlab.utils import readable_size as mem_to_str
reporter = MemReporter()
def convert(arg, device:torch.device=None):
if isinstance(arg, tuple):
return tuple([convert(arg_i) for arg_i in arg])
elif isinstance(arg, list):
return [convert(arg_i) for arg_i in arg]
elif isinstance(arg, np.ndarray):
if device is None:
return torch.from_numpy(arg)
else:
return torch.from_numpy(arg).to(device=device)
elif isinstance(arg, dict):
return {k: convert(v) for k, v in arg.items()}
else:
return arg
def check_success(this_res, res):
err = torch.abs(this_res.detach().to(dtype=torch.float32, device='cpu') - res.detach().to(dtype=torch.float32, device='cpu'))
max_err = torch.max(err).item()
mean_err = torch.mean(err).item()
return err.sum().numpy(), max_err, mean_err
def check_recursive(a, b, depth:int=0, key=None, tol_max:float=1e-3, tol_mean=1e-3):
str_depth = ''.join(['--' for i in range(depth)])
if isinstance(a, tuple) or isinstance(a, list):
errs = []
max_errs = []
mean_errs = []
for i, (a_i, b_i) in enumerate(zip(a, b)):
err_i, max_err_i, mean_err_i = check_recursive(a_i, b_i, depth=depth+1, key=i)
errs.append(err_i)
max_errs.append(max_err_i)
mean_errs.append(mean_err_i)
succ = (max_err_i<tol_max) and (mean_err_i<tol_mean)
if succ:
print(f'{str_depth}>{i}: success = {succ}')
else:
print(f'{str_depth}>{i}: success = {succ}:\t{err_i}\t{max_err_i}\t{mean_err_i}')
return np.sum(errs), max(max_errs), np.mean(mean_errs)
if isinstance(a, dict):
errs = []
max_errs = []
mean_errs = []
for key in a.keys():
err_i, max_err_i, mean_err_i = check_recursive(a[key], b[key], depth=depth+1, key=key)
errs.append(err_i)
max_errs.append(max_err_i)
mean_errs.append(mean_err_i)
succ = (max_err_i<tol_max) and (mean_err_i<tol_mean)
if succ:
print(f'{str_depth}>{key}: success = {succ}')
else:
print(f'{str_depth}>{key}: success = {succ}:\t{err_i}\t{max_err_i}\t{mean_err_i}')
return np.sum(errs), max(max_errs), np.mean(mean_errs)
if isinstance(a, np.ndarray):
a = torch.from_numpy(a)
if isinstance(b, np.ndarray):
b = torch.from_numpy(b)
if isinstance(a, float) or isinstance(a, int):
a = torch.Tensor([a])
if isinstance(b, float) or isinstance(b, int):
b = torch.Tensor([b])
err, max_err, mean_err = check_success(a, b)
succ = (max_err<tol_max) and (mean_err<tol_mean)
print(f'{str_depth}> success = {succ}:\t{err}\t{max_err}\t{mean_err}')
return check_success(a, b)
def load_data(args, filename):
with open(Path(args.debug_dir)/Path(f'{filename}.pkl'), 'rb') as f:
data = pickle.load(f)
if len(data) == 4:
fnargs1, fnargs2, params, res = data
return convert(fnargs1), convert(fnargs2), params, convert(res)
if len(data) == 3:
args, params, res = data
return convert(args), params, convert(res)
elif len(data) == 2:
args, res = data
return convert(args), res
def get_total_alloc():
reporter.collect_tensor()
reporter.get_stats()
target_device = torch.device('cuda:0')
total_mem = 0
total_numel = 0
for device, tensor_stats in reporter.device_tensor_stat.items():
if device != target_device:
continue
for stat in tensor_stats:
name, size, numel, mem = stat
total_mem += mem
total_numel += numel
return total_mem | StarcoderdataPython |
11202906 | from allegro.proxy import load_from_file, filter_proxies, scrape_free_proxy_lists
import pytest
@pytest.mark.vcr()
def test_free_proxies():
"""
Test proxy gathering
"""
proxies = scrape_free_proxy_lists()
assert 50 == len(proxies)
def test_proxies_from_file(monkeypatch, tmpdir):
monkeypatch.chdir(tmpdir)
with open("proxies.txt", "w") as proxies:
proxies.write(
"""192.168.3.11:8123
192.168.127.12:8123
192.168.3.11:54256
172.16.17.32:8123
172.16.58.3:8123
192.168.3.11:45396
192.168.3.11:9898
192.168.127.12:8123
172.16.58.3:9300
192.168.127.12:80
192.168.127.12:443
172.16.58.3:80
172.16.58.3:8123
192.168.127.12:8123
192.168.3.11:80
172.16.17.32:80
192.168.127.12:65103
192.168.127.12:8123
192.168.127.12:9999"""
)
proxies = load_from_file("proxies.txt")
assert len(proxies) == 19
@pytest.mark.vcr()
def test_check_proxies():
"""
Test proxy filtering
"""
proxies = scrape_free_proxy_lists()
proxies = filter_proxies(proxies, timeout=1)
assert len(proxies) != 50
| StarcoderdataPython |
4852479 | <filename>controllers/doctors_HISP.py
from bottle import route, view, request, response
from datetime import datetime
import sqlite3
from permissions import permissions
@route('/doctors_HISP')
@view('doctors_HISP')
def doctors_HISP():
return dict(
title='Connect with your doctor',
message='',
year=datetime.now().year
)
@route('/doctors_HISP', method='POST')
@view('doctors_HISP')
def do_doctors_HISP():
server = request.forms.get('hispAddress').strip()
userid = request.get_cookie("userid", secret='teamfin')
# TODO refactor the db out and pass in as an argument to sign_up method
db = sqlite3.connect('database/jogrx.db')
c = db.cursor()
c.execute("UPDATE user SET server=? WHERE id=?", (server, int(userid)))
db.commit()
c.close()
return permissions()
| StarcoderdataPython |
4890112 | <reponame>IBM/api-samples
#!/usr/bin/env python3
# This sample demonstrates how to use the siem endpoint in the
# REST API.
# For this scenario to work there must already be offenses on the system the
# sample is being run against. The scenario demonstrates the following
# actions:
# - How to get offenses.
# - How to filter the data that is returned with the fields parameter.
# - How to filter the data that is returned with the filter parameter.
# - How to page through the results using the range parameter.
# To view a list of the endpoints with the parameters they accept, you can view
# the REST API interactive help page on your deployment at
# https://<hostname>/api_doc. You can also retrieve a list of available
# endpoints with the REST API itself at the /api/help/endpoints endpoint.
import json
import os
import sys
import importlib
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
def main():
# First we have to create our client
client = client_module.RestApiClient(version='6.0')
# -------------------------------------------------------------------------
# Basic 'GET'
# In this example we'll be using the GET endpoint of siem/offenses without
# any parameters. This will print absolutely everything it can find, every
# parameter of every offense.
# Send in the request
SampleUtilities.pretty_print_request(client,
'siem/offenses',
'GET')
response = client.call_api('siem/offenses', 'GET')
# Check if the success code was returned to ensure the call to the API was
# successful.
if (response.code != 200):
print('Failed to retrieve the list of offenses')
SampleUtilities.pretty_print_response(response)
sys.exit(1)
# Since the previous call had no parameters and response has a lot of text,
# we'll just print out the number of offenses
response_body = json.loads(response.read().decode('utf-8'))
print('Number of offenses retrived: ' + str(len(response_body)))
# -------------------------------------------------------------------------
# Using the fields parameter with 'GET'
# If you just print out the result of a call to the siem/offenses GET
# endpoint there will be a lot of fields displayed which you have no
# interest in. Here, the fields parameter will make sure the only the
# fields you want are displayed for each offense.
# Setting a variable for all the fields that are to be displayed
fields = '''id,status,description,offense_type,offense_source,magnitude,\
source_network,destination_networks,assigned_to'''
# Send in the request
SampleUtilities.pretty_print_request(client, 'siem/offenses?fields=' +
fields, 'GET')
response = client.call_api('siem/offenses?fields=' + fields, 'GET')
# Once again, check the response code
if (response.code != 200):
print('Failed to retrieve list of offenses')
SampleUtilities.pretty_print_response(response)
sys.exit(1)
# This time we will print out the data itself
SampleUtilities.pretty_print_response(response)
# -------------------------------------------------------------------------
# Using the filter parameter with 'GET'
# Sometimes you'll want to narrow down your search to just a few offenses.
# You can use the filter parameter to carefully select what is returned
# after the call by the value of the fields.
# Here we're only looking for OPEN offenses, as shown by the value of
# 'status' being 'OPEN'
# Send in the request
SampleUtilities.pretty_print_request(
client, 'siem/offenses?fields=' + fields + '&filter=status=OPEN',
'GET')
response = client.call_api(
'siem/offenses?fields=' + fields + '&filter=status=OPEN', 'GET')
# Always check the response code
if (response.code != 200):
print('Failed to retrieve list of offenses')
SampleUtilities.pretty_print_response(response)
sys.exit(1)
# And output the data
SampleUtilities.pretty_print_response(response)
# -------------------------------------------------------------------------
# Paging the 'GET' data using 'Range'
# If you have a lot of offenses, then you may want to browse through them
# just a few at a time. In that case, you can use the Range header to
# limit the number of offenses shown in a single call.
# In this example only OPEN offenses will be used.
# Call the endpoint so that we can find how many OPEN offenses there are.
response = client.call_api('siem/offenses?filter=status=OPEN', 'GET')
num_of_open_offenses = len(json.loads(response.read().decode('utf-8')))
# Copy the headers into our own variable
range_header = client.get_headers().copy()
# Set the starting point (indexing starts at 0)
page_position = 0
# and choose how many offenses you want to display at a time.
offenses_per_page = 5
# Looping here in order to repeatedly show 5 offenses at a time until we've
# seen all of the OPEN offenses or exit character q is pressed
input_string = ""
while True:
# Change the value for Range in the header in the format item=x-y
range_header['Range'] = ('items=' + str(page_position) + '-' +
str(page_position + offenses_per_page - 1))
# Send in the request
SampleUtilities.pretty_print_request(
client, 'siem/offenses?fields=' + fields + '&filter=status=OPEN',
'GET', headers=range_header)
response = client.call_api(
'siem/offenses?fields=' + fields + '&filter=status=OPEN', 'GET',
headers=range_header)
# As usual, check the response code
if (response.code != 200):
print('Failed to retrieve list of offenses')
SampleUtilities.pretty_print_response(response)
sys.exit(1)
# Output the data
SampleUtilities.pretty_print_response(response)
# Check to see if all the offenses have been displayed
if (page_position + offenses_per_page >= num_of_open_offenses):
print('All offenses have been printed to the screen.')
break
else:
# Wait for the user to display the next set or quit
input_string = input(
'Push enter to bring up the next ' + str(offenses_per_page) +
' offenses, or q to quit. ')
# If the user entered the character 'q', quit.
if (input_string == 'q'):
break
page_position += offenses_per_page
if __name__ == "__main__":
main()
| StarcoderdataPython |
6628394 | <reponame>kosslab-kr/Tizen-NN-Framework
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{2}") # a vector of 2 float32s
i2 = Input("op2", "TENSOR_FLOAT32", "{2}") # another vector of 2 float32s
b0 = Int32Scalar("b0", 0) # an int32_t scalar bias
i3 = Output("op3", "TENSOR_FLOAT32", "{2}")
model = model.Operation("ADD", i1, i2, b0).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 2.0],
i2: # input 1
[3.0, 4.0]}
output0 = {i3: # output 0
[4.0, 6.0]}
# Instantiate an example
Example((input0, output0))
| StarcoderdataPython |
4974839 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-21 10:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('codenerix_geodata', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='citygeonameen',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='citygeonamees',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='continentgeonameen',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='continentgeonamees',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='countrygeonameen',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='countrygeonamees',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='provincegeonameen',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='provincegeonamees',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='regiongeonameen',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
migrations.AlterModelOptions(
name='regiongeonamees',
options={'default_permissions': ('add', 'change', 'delete', 'view', 'list')},
),
]
| StarcoderdataPython |
8048482 | #!/usr/bin/env python
# coding: utf-8
# # section 5: Colloctions
#
# ### writer : <NAME> 1954128
# ### 6.Collections.deque :
#
#
# In[ ]:
from collections import deque
num_op=int(input())
d=deque()
for i in range (num_op):
comments=input().split()
if comments[0]=='append':
d.append(comments[1])
elif comments[0]=='pop':
d.pop()
elif comments[0]=='appendleft':
d.appendleft(comments[1])
else :
d.popleft()
print(*d)
#
| StarcoderdataPython |
1942464 | <reponame>rafaelscnunes/hackerrank
"""
Created on 13/nov/2019 with PyCharm
INPI - Instituto Nacional da Propriedade Industrial
@author: <NAME> - <EMAIL>
@title: hackerrank - test_plusminus.py
--------------------------------------------------------------------------------
*** Description of the module function ***
--------------------------------------------------------------------------------
"""
import unittest
import plusminus
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
pass
def test_something(self):
precision = '{:.6f}'
self.assertEqual([precision.format(0.500000),
precision.format(0.333333),
precision.format(0.166667)],
plusminus.plusMinus([-4, 3, -9, 0, 4, 1], precision))
def tearDown(self) -> None:
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3564901 | <reponame>DengBinbin/BuildingMachineLearningSystemsWithPython
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by <NAME> and <NAME>
# published by PACKT Publishing
#
# It is made available under the MIT License
from collections import defaultdict
from itertools import chain
from gzip import GzipFile
minsupport = 280
dataset = [[int(tok) for tok in line.strip().split()]
for line in GzipFile('retail.dat.gz')]
counts = defaultdict(int)
for elem in chain(*dataset):
counts[elem] += 1
# Only elements that have at least minsupport should be considered.
#valid为所有的频繁单项
valid = set(el for el, c in counts.items() if (c >= minsupport))
# Filter the dataset to contain only valid elements
# (This step is not strictly necessary, but will make the rest of the code
# faster as the itemsets will be smaller):
#对于每一个购物篮,将其中不属于频繁项的那些商品去除
dataset = [[el for el in ds if (el in valid)] for ds in dataset]
# Convert to frozenset for fast processing
dataset = [frozenset(ds) for ds in dataset]
itemsets = [frozenset([v]) for v in valid]
freqsets = itemsets[:]
for i in range(2):
print("At iteration {}, number of frequent baskets: {}".format(
i, len(itemsets)))
nextsets = []
tested = set()
#it为每一个频繁项集,v为每一个频繁单项
for it in itemsets:
for v in valid:
#如果频繁单项不在频繁项集中,就将该单项和频繁项集合并
if v not in it:
# Create a new candidate set by adding v to it
c = (it | frozenset([v]))
# Check if we have tested it already:
if c in tested:
continue
tested.add(c)
# Count support by looping over dataset
# This step is slow.
# Check `apriori.py` for a better implementation.
#对于每一个购物篮d,如果其包含了新增加的频繁项集c,则+1,最后求和;
# 如果和大于最小支持度就加入到长度更长的项集合
support_c = sum(1 for d in dataset if d.issuperset(c))
if support_c > minsupport:
nextsets.append(c)
freqsets.extend(nextsets)
itemsets = nextsets
if not len(itemsets):
break
print("Finished!")
def rules_from_itemset(itemset, dataset, minlift=1.):
nr_transactions = float(len(dataset))
for item in itemset:
consequent = frozenset([item])
antecedent = itemset-consequent
#base:后项的计数
base = 0.0
# acount: antecedent count 前项
acount = 0.0
# ccount : consequent count 前项+后项
ccount = 0.0
#d是一个购物篮,item是一个单项
for d in dataset:
if item in d: base += 1
if d.issuperset(itemset): ccount += 1
if d.issuperset(antecedent): acount += 1
print(base,acount,ccount)
import sys
sys.exit(1)
base /= nr_transactions
p_y_given_x = ccount/acount
lift = p_y_given_x / base
if lift > minlift:
print('Rule {0} -> {1} has lift {2}'
.format(antecedent, consequent,lift))
for itemset in freqsets:
if len(itemset) > 1:
rules_from_itemset(itemset, dataset, minlift=4.)
| StarcoderdataPython |
3380404 | <filename>deployutils/apps/django/themes.py<gh_stars>1-10
# Copyright (c) 2020, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
import logging, os, re, shutil, subprocess, zipfile
from django.conf import settings as django_settings
from django.template.base import (Parser, NodeList, TemplateSyntaxError)
from django.template.backends.django import DjangoTemplates
from django.template.context import Context
from django.utils.encoding import force_text
from django_assets.templatetags.assets import assets
from jinja2.lexer import Lexer
from webassets import Bundle
from . import settings
from .compat import (DebugLexer, TokenType, do_static, get_html_engine,
six, urlparse, urlunparse)
from ...copy import shell_command
LOGGER = logging.getLogger(__name__)
STATE_BLOCK_BEGIN = 1
STATE_ASSETS_BEGIN = 2
STATE_ASSETS_END = 3
STATE_BLOCK_CONTENT = 4
class URLRewriteWrapper(object):
def __init__(self, file_obj, path_prefix=None):
self.wrapped = file_obj
self.path_prefix = path_prefix
def write(self, text):
if self.path_prefix:
text = text.replace(
'="/static', '="/%s/static' % self.path_prefix)
return self.wrapped.write(text)
class Template(object):
def __init__(self, engine):
self.engine = engine
class AssetsParser(Parser):
def __init__(self, tokens, dest_stream,
libraries=None, builtins=None, origin=None):
#pylint:disable=too-many-arguments
super(AssetsParser, self).__init__(tokens,
libraries=libraries, builtins=builtins, origin=origin)
self.dest_stream = dest_stream
self.context = Context()
engine, _, _ = get_html_engine()
self.context.template = Template(engine)
def parse_through(self, parse_until=None):
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
if six.PY2:
contents = token.contents.encode('utf8')
else:
contents = token.contents
if token.token_type == TokenType.TEXT:
self.dest_stream.write(contents)
elif token.token_type == TokenType.VAR:
self.dest_stream.write("{{%s}}" % contents)
elif token.token_type == TokenType.BLOCK:
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
if command in parse_until:
# put token back on token list so calling
# code knows why it terminated
self.prepend_token(token)
return nodelist
if command == 'assets':
try:
# XXX This should work but for some reason debug does
# not get propagated.
# Lost in webassets.bundle.resolve_contents
token.contents += ' debug=False'
assets_string = str(
assets(self, token).render(self.context))
self.dest_stream.write(assets_string)
except TemplateSyntaxError as err:
if hasattr(self, 'error'):
raise self.error(token, err)
# Django < 1.8
if not self.compile_function_error(token, err):
raise
elif command == 'static':
self.dest_stream.write(
do_static(self, token).render(self.context))
else:
self.dest_stream.write("{%% %s %%}" % contents)
elif token.token_type == TokenType.COMMENT:
pass
def _render_assets(tokens, env):
#pylint:disable=too-many-locals
# Construct a bundle with the given options
output = None
filters = None
depends = None
bundle_kwargs = {
'output': output,
'filters': filters,
'debug': False, # because of `Bundle.iterbuild`, this is useless.
'depends': depends,
}
# Resolve bundle names.
files = []
state = None
buffered_tokens = []
for token in tokens:
if state is None:
if token[1] == 'block_begin':
state = STATE_BLOCK_BEGIN
elif state == STATE_BLOCK_BEGIN:
if token[1] == 'name':
# nothing to be done?
pass
elif token[1] == 'string':
files = [token[2][1:-1]] # removes '"'.
if token[1] == 'block_end':
state = STATE_BLOCK_CONTENT
elif state == STATE_BLOCK_CONTENT:
if token[1] == 'block_begin':
state = None
else:
buffered_tokens += [token]
content = ''.join([token[2] for token in buffered_tokens]).strip()
urls = []
bundle_names = []
for fname in files:
try:
bundle = env[fname]
debug = bundle.config.get('debug')
bundle.config.update({'debug': False})
with bundle.bind(env):
urls += bundle.urls()
bundle.config.update({'debug': debug})
except KeyError:
bundle_names.append(fname)
if bundle_names:
bundle = Bundle(*bundle_names, **bundle_kwargs)
# Retrieve urls (this may or may not cause a build)
with bundle.bind(env):
urls += bundle.urls()
# For each url, execute the content of this template tag (represented
# by the macro ```caller`` given to use by Jinja2).
result = content
for url in urls:
look = re.match(r'(.*)({{\s*ASSET_URL.*}})(.*)', content)
if look:
parts = urlparse(url)
url = urlunparse((parts.scheme, parts.netloc, parts.path,
None, None, None))
result = look.group(1) + url + look.group(3)
else:
result = content
return result
def get_template_search_path(app_name=None):
template_dirs = []
if app_name:
candidate_dir = os.path.join(
settings.MULTITIER_THEMES_DIR, app_name, 'templates')
if os.path.isdir(candidate_dir):
template_dirs += [candidate_dir]
# Django 1.8+
for loader in getattr(django_settings, 'TEMPLATES', []):
for dir_path in loader['DIRS']:
if dir_path not in template_dirs:
template_dirs += [dir_path]
# Previous Django versions
for field_name in ['TEMPLATE_DIRS', 'TEMPLATES_DIRS']:
template_dirs += list(getattr(django_settings, field_name, []))
return template_dirs
def init_build_and_install_dirs(app_name, build_dir=None, install_dir=None):
if not build_dir:
build_dir = os.path.join(os.getcwd(), 'build')
if not install_dir:
install_dir = os.getcwd()
build_dir = os.path.join(
os.path.normpath(os.path.abspath(build_dir)), app_name)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
os.makedirs(build_dir)
install_dir = os.path.normpath(os.path.abspath(install_dir))
if not os.path.isdir(install_dir):
os.makedirs(install_dir)
return build_dir, install_dir
def package_assets(app_name, build_dir):#pylint:disable=unused-argument
resources_dest = os.path.join(build_dir, 'public')
# Copy local resources (not under source control) to resources_dest.
excludes = ['--exclude', '*~', '--exclude', '.DS_Store',
'--exclude', '.webassets-cache']
app_static_root = django_settings.STATIC_ROOT
assert app_static_root is not None and app_static_root
# When app_static_root ends with the static_url, we will want
# to insert the app_name prefix.
static_root_parts = app_static_root.split(os.sep)
root_parts_idx = len(static_root_parts)
root_idx = len(app_static_root)
found = False
orig_static_url = django_settings.STATIC_URL
orig_static_url_parts = orig_static_url.split('/')
if not orig_static_url_parts[0]:
orig_static_url_parts = orig_static_url_parts[1:]
if orig_static_url_parts[0] == app_name:
orig_static_url_parts = orig_static_url_parts[1:]
for url_part in reversed(orig_static_url_parts):
found = True # With ``break`` later on to default to False
# when zero iteration.
if url_part:
root_parts_idx = root_parts_idx - 1
root_idx = root_idx - len(static_root_parts[root_parts_idx]) - 1
if url_part != static_root_parts[root_parts_idx]:
found = False
break
if found:
app_static_root = os.path.join(
app_static_root[:root_idx], django_settings.STATIC_URL[1:-1])
# static_url is required per-Django to start and ends with a '/'
# (i.e. '/static/').
# If we have a trailing '/', rsync will copy the content
# of the directory instead of the directory itself.
cmdline = (['/usr/bin/rsync']
+ excludes + ['-az', '--safe-links', '--rsync-path', '/usr/bin/rsync']
+ [app_static_root, resources_dest])
LOGGER.info(' '.join(cmdline))
shell_command(cmdline)
def package_theme(app_name, build_dir,
excludes=None, includes=None, path_prefix=None,
template_dirs=None):
"""
Package resources and templates for a multi-tier environment
into a zip file.
Templates are pre-compiled into ``*build_dir*/*app_name*/templates``.
Compilation means {% assets '*path*' %} and {% static '*path*' %} tags
are replaced by their compiled expression.
"""
#pylint:disable=too-many-locals,too-many-arguments
templates_dest = os.path.join(build_dir, 'templates')
# override STATIC_URL to prefix APP_NAME.
orig_static_url = django_settings.STATIC_URL
if (app_name != settings.APP_NAME
and not django_settings.STATIC_URL.startswith('/' + app_name)):
django_settings.STATIC_URL = '/' + app_name + orig_static_url
if not os.path.exists(templates_dest):
os.makedirs(templates_dest)
if template_dirs is None:
template_dirs = get_template_search_path(app_name)
for template_dir in template_dirs:
# The first of template_dirs usually contains the most specialized
# templates (ie. the ones we truely want to install).
if (templates_dest
and not os.path.samefile(template_dir, templates_dest)):
install_templates(template_dir, templates_dest,
excludes=excludes, includes=includes, path_prefix=path_prefix)
def fill_package(app_name, build_dir=None, install_dir=None):
"""
Creates the theme package (.zip) from templates and optionally
assets installed in the ``build_dir``.
"""
zip_path = os.path.join(install_dir, '%s.zip' % app_name)
with zipfile.ZipFile(zip_path, 'w') as zip_file:
fill_package_zip(zip_file, os.path.dirname(build_dir), prefix=app_name)
return zip_path
def fill_package_zip(zip_file, srcroot, prefix=''):
for pathname in os.listdir(os.path.join(srcroot, prefix)):
pathname = os.path.join(prefix, pathname)
full_path = os.path.join(srcroot, pathname)
if os.path.isfile(full_path):
zip_file.write(full_path, pathname)
if os.path.isdir(full_path):
fill_package_zip(zip_file, srcroot, prefix=pathname)
def install_templates(srcroot, destroot, prefix='', excludes=None,
includes=None, path_prefix=None):
#pylint:disable=too-many-arguments,too-many-statements
"""
Expand link to compiled assets all templates in *srcroot*
and its subdirectories.
"""
#pylint: disable=too-many-locals
if excludes is None:
excludes = []
if includes is None:
includes = []
if not os.path.exists(os.path.join(prefix, destroot)):
os.makedirs(os.path.join(prefix, destroot))
for pathname in os.listdir(os.path.join(srcroot, prefix)):
pathname = os.path.join(prefix, pathname)
excluded = False
for pat in excludes:
if re.match(pat, pathname):
excluded = True
break
if excluded:
for pat in includes:
if re.match(pat, pathname):
excluded = False
break
if excluded:
LOGGER.debug("skip %s", pathname)
continue
source_name = os.path.join(srcroot, pathname)
dest_name = os.path.join(destroot, pathname)
if os.path.isfile(source_name) and not os.path.exists(dest_name):
# We don't want to overwrite specific theme files by generic ones.
with open(source_name) as source:
template_string = source.read()
try:
template_string = force_text(template_string)
lexer = DebugLexer(template_string)
tokens = lexer.tokenize()
if not os.path.isdir(os.path.dirname(dest_name)):
os.makedirs(os.path.dirname(dest_name))
engine, libraries, builtins = get_html_engine()
if isinstance(engine, DjangoTemplates):
with open(dest_name, 'w') as dest:
parser = AssetsParser(tokens,
URLRewriteWrapper(dest, path_prefix),
libraries=libraries,
builtins=builtins,
origin=None)
parser.parse_through()
else:
template_name = None
tokens = Lexer(engine.env).tokeniter(template_string,
template_name, filename=source_name)
buffered_tokens = []
state = None
with open(dest_name, 'w') as dest:
for token in tokens:
if state is None:
if token[1] == 'block_begin':
state = STATE_BLOCK_BEGIN
elif state == STATE_BLOCK_BEGIN:
if token[1] == 'name':
if token[2] == 'assets':
state = STATE_ASSETS_BEGIN
else:
buffered_tokens += [token]
state = None
elif state == STATE_ASSETS_BEGIN:
if (token[1] == 'name'
and token[2] == 'endassets'):
state = STATE_ASSETS_END
elif state == STATE_ASSETS_END:
if token[1] == 'block_end':
buffered_tokens += [token]
state = None
if state is None:
if buffered_tokens:
for tok in buffered_tokens:
if (tok[1] == 'name' and
tok[2] == 'assets'):
dest.write(_render_assets(
buffered_tokens,
engine.env.assets_environment))
buffered_tokens = []
break
if buffered_tokens:
dest.write("%s" % ''.join([token[2]
for token in buffered_tokens]))
buffered_tokens = []
elif six.PY2:
dest.write("%s" % token[2].encode('utf-8'))
else:
dest.write("%s" % str(token[2]))
else:
buffered_tokens += [token]
if buffered_tokens:
dest.write("%s" % ''.join([
token[2] for token in buffered_tokens]))
buffered_tokens = []
dest.write("\n")
cmdline = ['diff', '-u', source_name, dest_name]
cmd = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
lines = cmd.stdout.readlines()
cmd.wait()
# Non-zero error codes are ok here. That's how diff
# indicates the files are different.
if lines:
verb = 'compile'
else:
verb = 'install'
dest_multitier_name = dest_name.replace(destroot,
'*MULTITIER_TEMPLATES_ROOT*')
LOGGER.debug("%s %s to %s", verb,
source_name.replace(
django_settings.BASE_DIR, '*APP_ROOT*'),
dest_multitier_name)
except UnicodeDecodeError:
LOGGER.warning("%s: Templates can only be constructed "
"from unicode or UTF-8 strings.", source_name)
elif os.path.isdir(source_name):
install_templates(srcroot, destroot, prefix=pathname,
excludes=excludes, includes=includes, path_prefix=path_prefix)
| StarcoderdataPython |
11368675 | #!/usr/bin/python
# encoding:utf-8
# ##############################################################################
# The MIT License (MIT)
#
# Copyright (c) [2015] [dangdang]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ##############################################################################
"""
doc string
"""
import traceback
import time
import util
import urllib2
import json
import sys
class RPC(object):
"""RPC request"""
ADD_POI = 1 #新增
CHK_POI_SINGLE = 6 #判重
DEL_POI = 2 #删除
CHK_POI_INNER = 5 #内部判重
NAME_ANA = 10
ADDR_ANA = 11
FEATURE_ANA = 12
def __init__(self, service_name='compute', host='127.0.0.1', port=8000):
self.host = host
self.port = port
def feature_ana(self, **arg):
"""特征解析"""
res = self._call_method(RPC.FEATURE_ANA, arg.get("detail_list", [arg]))
return res
def name_ana(self, **arg):
"""名称解析"""
res = self._call_method(RPC.NAME_ANA, arg.get("detail_list", [arg]))
return res
def addr_ana(self, **arg):
"""地址解析"""
res = self._call_method(RPC.ADDR_ANA, arg.get("detail_list", [arg]))
return res
def check_poi_inner(self, **arg):
"""内部判重"""
res = self._call_method(RPC.CHK_POI_INNER, arg.get("detail_list", [arg]))
return res
def del_poi(self, **arg):
"""删除poi数据"""
res = self._call_method(RPC.DEL_POI, arg.get("detail_list", [arg]))
return res
def add_poi(self, **arg):
"""增加poi数据"""
res = self._call_method(RPC.ADD_POI, arg.get("detail_list", [arg]))
return res
def check_poi_single(self, **arg):
"""poi判重"""
res = self._call_method(RPC.CHK_POI_SINGLE, arg.get("detail_list", [arg]))
return res
def _call_method(self, method_type, detail_list):
"""call-request"""
url = ""
if method_type == RPC.ADD_POI:
url = "http://%s:%s/ComputeService/add_poi" % (self.host, self.port)
elif method_type == RPC.DEL_POI:
url = "http://%s:%s/ComputeService/delete_poi" % (self.host, self.port)
elif method_type == RPC.CHK_POI_SINGLE:
url = "http://%s:%s/ComputeService/check_poi" % (self.host, self.port)
elif method_type == RPC.CHK_POI_INNER:
url = "http://%s:%s/ComputeService/check_poi_inner" % (self.host, self.port)
elif method_type == RPC.NAME_ANA:
url = "http://%s:%s/CompareService/name_ana" % (self.host, self.port + 2)
elif method_type == RPC.ADDR_ANA:
url = "http://%s:%s/CompareService/addr_ana" % (self.host, self.port + 2)
elif method_type == RPC.FEATURE_ANA:
url = "http://%s:%s/CompareService/feature_ana" % (self.host, self.port + 2)
else:
return ""
query_map = {}
query_map["type"] = method_type
query_map["list"] = []
for detail in detail_list:
query_map["list"].append(detail)
if method_type == RPC.NAME_ANA or method_type == RPC.ADDR_ANA or \
method_type == RPC.FEATURE_ANA:
query_map_tmp = {}
query_map_tmp["pack"] = query_map["list"]
query_map = query_map_tmp
try:
query_json = json.dumps(query_map, ensure_ascii=False)
res = self._send_to_remote(url, query_json)
return res
except Exception as e:
print e
return ""
def _send_to_remote(self, url, pack):
"""post-request"""
headers = {"Content-Type": "application/json"}
req = urllib2.Request(url, pack, headers=headers)
response = urllib2.urlopen(req)
the_page = response.read()
response.close()
return the_page
if __name__ == "__main__":
pass
#rpc = RPC(host='nj03-inf-bce-waimai-m12-127.nj03.baidu.com', port=8000)
#es = Multi()
#poi0 = {
# 'id' : 54716711,
# 'point_x' : 13515949,
# 'point_y' : 3504194,
# 'catalog' : 0,
# 'name' : u'7080'.encode('GBK'),
# 'address' : u'宁波市慈溪市观海卫路197号(东旺家俬北10米)'.encode('GBK'),
# 'city' : u'宁波市'.encode('GBK'),
# 'phone' : u'13805822921'.encode('GBK')
#}
#re_map = json.loads(rpc.check_poi_single(**poi0), encoding = "gbk")
#print "poi0: ", json.dumps(re_map, indent = 2, ensure_ascii=False)
| StarcoderdataPython |
3416189 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 21:44:42 2019
@author: Ham
HackerRanch Challenge: Capitalize!
You are asked to ensure that the first and last names of people
begin with a capital letter in their passports.
For example, alison heck should be capitalised correctly as Alison Heck.
Given a full name, your task is to capitalize the name appropriately.
Input Format
A single line of input containing the full name, .
Constraints
The string consists of alphanumeric characters and spaces.
Note: in a word only the first character is capitalized.
Example 12abc when capitalized remains 12abc.
Output Format
Print the capitalized string, S.
Sample Input
<NAME>
Sample Output
<NAME>
"""
import re
# Complete the solve function below.
def solve(s):
"""doc"""
return "".join([w.capitalize() for w in re.split(r"(\W+)", s)])
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
#s = input()
result = solve(input())
#fptr.write(result + '\n')
#fptr.close()
print(result)
| StarcoderdataPython |
6408128 | <filename>AsciiBot_Example/dots/world.py
# Char inheritance works similar to the Decorator Pattern
# TODO: make the iters have a default arg of self._data_array?
import os
from dots.vector import Pos
from .chars import *
class World(object):
def __init__(self, env, world_map, program_dir):
"""
Create a new world to do dots races !
:param dots.environment.Env env: The environment for the program
:param str world_map: The string representing the world.
:param str program_dir: The directory of the program
"""
self.env = env
self.env.world = self
self.program_dir = program_dir
self.map = self.map_from_raw(world_map)
self._worldwide_warp_id_counter = 0
self._setup_warps_for(self.map)
self._import_libraries()
self._setup_operators()
self._connect_warps()
self._update_class_of_dots()
def get_coords_of_dots(self):
"""Yiels the cordinates of every dot char in the world."""
for y, line in enumerate(self.map):
if line and line[0] == '%':
continue
for x, char in enumerate(line):
if char.isDot():
yield Pos(x, y)
# ✓
def get_char_at(self, pos: Pos):
"""Get the Char at the given position."""
# NOTE: _data_array has to be accesed using y, x due to the way it is created
return self.map[pos.row][pos.col]
# ✓
def does_loc_exist(self, loc: Pos):
"""True if this location exists on the map."""
return 0 <= loc.row < len(self.map) and 0 <= loc.col < len(self.map[loc.row])
def is_char_at(self, pos, char):
"""True iff the pos exists and is the char is the same."""
if not self.does_loc_exist(pos):
return False
return self.get_char_at(pos) == char
# NOTE: Hopefully done?
def _import_libraries(self, map=None):
"""
Import the library for a given map.
:param str map: The map to import libraries from. Defaults to the world map.
"""
if map is None:
map = self.map
lib_filenames_for_chars = self._get_lib_files_by_alias(map)
lib_chars = lib_filenames_for_chars.keys()
self._update_class_of_lib_chars(map, lib_chars)
singleton_ids = {}
for y, line in enumerate(map):
if line and line[0] == '%':
continue
for x, char in enumerate(line):
if char.isLibWarp() and char.get_id() is None:
if char not in singleton_ids:
this_warp_id = self._worldwide_warp_id_counter
map[y][x].set_id(this_warp_id)
self._worldwide_warp_id_counter += 1
if char.isSingletonLibWarp():
singleton_ids[char] = this_warp_id
if char in lib_filenames_for_chars:
filename = lib_filenames_for_chars[char]
self._import_lib_file_with_warp_id(map, filename, this_warp_id,
is_singleton=char.isSingletonLibWarp())
else:
map[y][x].set_id(singleton_ids[char])
# NOTE: Hopefully done?
def _import_lib_file_with_warp_id(self, char_obj_array, filename, warp_id, is_singleton):
path = self._get_path_of_lib_file(filename)
with open(path, 'r') as lib_file:
lib_code = lib_file.read()
lib_char_obj_array = self.map_from_raw(lib_code)
exposed_char_str = None
for y, char_list in enumerate(lib_char_obj_array):
line = ''.join(char_list).rstrip()
if line[:2] == '%+':
print(('%+ notation has become replaced by a new notation\n' +
'you now define a single warp char as an entry point to your code using %$\n' +
'for this code, it is recommended that your replace\n\n' +
'%+{0}{1}{2}{3}\n\n' +
'with\n\n' +
'%^X `` make sure that X doesn\'t conflict with anything\n' +
'%${0}{1}{2}{3}\n\n' +
' {3}\n' +
' |\n' +
'{2}-X-{0}\n' +
' |\n' +
' {1}\n').format(*line[2:]))
raise Exception('obsolete code (unable to run)')
elif line[:2] == '%^':
exposed_char_str = line[2] # FIXME: This only allows exposing one char!
elif len(line) > 0 and line[0] == '%':
continue
else:
for x, char in enumerate(char_list):
if char == exposed_char_str:
if is_singleton:
lib_char_obj_array[y][x] = SingletonLibInnerWarpChar(char)
else:
lib_char_obj_array[y][x] = LibWarpChar(char)
lib_char_obj_array[y][x].set_id(warp_id)
self._setup_warps_for(lib_char_obj_array)
self._import_libraries(lib_char_obj_array)
char_obj_array.extend(lib_char_obj_array)
# ✓
def _update_class_of_lib_chars(self, char_obj_array, lib_chars):
is_singleton_dict = self._get_dict_of_is_singleton_for_lib_chars_in(char_obj_array)
for y, line in enumerate(char_obj_array):
if line and line[0] == '%':
continue
for x, char in enumerate(line):
if char in lib_chars:
if is_singleton_dict[char]:
char_obj_array[y][x] = SingletonLibOuterWarpChar(char)
else:
char_obj_array[y][x] = LibWarpChar(char)
# ✓
def _get_path_of_lib_file(self, filename):
dir_paths_to_try = [
self.program_dir,
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libs'),
os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'libs'),
]
for dir_path in dir_paths_to_try:
path = os.path.join(dir_path, filename)
if os.path.isfile(path):
return path
raise RuntimeError('Native library "{}" cannot be found'.format(filename))
# ✓✓
@staticmethod
def _get_lib_files_by_alias(map_):
"""
Get the librairy files by alias char defined.
:param List[List[Char]] map_: The map to import
"""
filename_by_alias = {}
for row in map_:
# get back the string from the Char array and remove the trailling white spaces
line = ''.join(row).rstrip()
# if it's an import
if line.startswith('%!'):
# Retrieve the filename and the alias from the line
pieces = line[2:].split(' ')
filename = pieces[0]
alias = pieces[1]
# add it to the mapping
filename_by_alias[alias] = filename
return filename_by_alias
# ✓
def _get_dict_of_is_singleton_for_lib_chars_in(self, char_obj_array):
is_singleton_dict = {}
for char_list in char_obj_array:
line = ''.join(char_list).rstrip()
if line[:2] == '%!':
pieces = line[2:].split(' ')
char = pieces[1]
is_singleton_dict[char] = True
# if len(pieces) >= 3 and pieces[2] == '&':
# is_singleton_dict[char] = True
# else:
# is_singleton_dict[char] = False
return is_singleton_dict
# ✓
def _connect_warps(self):
for y, line in enumerate(self.map):
if line and line[0] == '%':
continue
for x, char in enumerate(line):
if char.isWarp() and not isinstance(char, SingletonLibInnerWarpChar):
warp_id = char.get_id()
companion_warp_loc = self._find_companion_warp_char_loc_of(char, warp_id, Pos(x, y))
if companion_warp_loc is not None:
self.map[y][x].set_dest_loc(companion_warp_loc)
# ✓
def _find_companion_warp_char_loc_of(self, orig_char, warp_id, orig_pos: Pos):
for y, line in enumerate(self.map):
if line and line[0] == '%':
continue
for x, char in enumerate(line):
if char.isWarp() and char.get_id() == warp_id and orig_pos != (x, y):
if isinstance(orig_char, SingletonLibOuterWarpChar):
if not isinstance(char, SingletonLibInnerWarpChar):
continue
return Pos(x, y)
# ✓
def _setup_warps_for(self, char_obj_array):
self._correct_class_of_warp_chars_in(char_obj_array)
# {letter: id}
assigned_ids_for_letters = {}
for (x, y), char in self._char_obj_array_iter_with_coords(char_obj_array):
if char.isWarp() and char.get_id() is None:
if char in assigned_ids_for_letters:
char_obj_array[y][x].set_id(assigned_ids_for_letters[char])
else:
this_id = self._worldwide_warp_id_counter
assigned_ids_for_letters[char] = this_id
char.set_id(this_id)
self._worldwide_warp_id_counter += 1
# ✓
def _correct_class_of_warp_chars_in(self, char_obj_array):
warp_list = self._get_warp_chars_list_from(char_obj_array)
for y, line in enumerate(char_obj_array):
for x, char in enumerate(line):
if char in warp_list:
char_obj_array[y][x] = WarpChar(char)
# TODO check if the char is inside of a ascii dots text string
def _update_class_of_dots(self):
for y, char_list in enumerate(self.map):
last_was_backtick = False
for x, char in enumerate(char_list):
if char == '`':
if not last_was_backtick:
last_was_backtick = True
else:
break
if char == '.':
self.map[y][x] = DotChar(char)
# ✓
def _setup_operators(self):
for y, line in enumerate(self.map):
for x, char in enumerate(line):
if 0 < x < len(line) - 1:
if line[x - 1] == '{' and line[x + 1] == '}':
self.map[y][x] = CurlyOperChar(char)
elif line[x - 1] == '[' and line[x + 1] == ']':
self.map[y][x] = SquareOperChar(char)
# ✓
def _get_warp_chars_list_from(self, char_obj_array):
warp_chars = []
for char_list in char_obj_array:
line = ''.join(char_list).rstrip()
if line[:2] == '%$':
string_with_chars = line[2:]
string_with_chars = string_with_chars.rstrip()
list_with_chars = list(string_with_chars)
warp_chars.extend(list_with_chars)
return warp_chars
# ✓
def _char_obj_array_iter(self, obj_array):
for char_list in obj_array:
for char in char_list:
yield char
# ✓
def _char_obj_array_iter_with_coords(self, obj_array):
for y, char_list in enumerate(obj_array):
for x, char in enumerate(char_list):
yield Pos(x, y), char
# ✓✓
@staticmethod
def map_from_raw(raw_map: str):
"""
Convert a code in a string to a usable map.
This will suppress the comments and convert each chr of the string to the corresponding Char.
Creates a 2D array accessible by map[row][col].
:param str raw_map: The program as it is stored in files.
"""
map = []
# for each line
for raw_line in raw_map.split('\n'):
# removing the comments
line = raw_line.partition('``')[0]
# remove inline comments
new_line = ''
inside_comment = False
for char in line:
if char == '`':
inside_comment = not inside_comment
new_line += ' '
else:
new_line += ' ' if inside_comment else char
line = new_line
# Convert the str to a list of Char
line = [Char(c) for c in line]
# add aech row to the map
map.append(line)
return map
| StarcoderdataPython |
11244877 | # -*- coding: utf-8 -*-
"""
Created on 2020.12.19
@author: MiniUFO
Copyright 2018. All rights reserved. Use is subject to license terms.
"""
#%%
from xgrads.xgrads import open_CtlDataset
ds = open_CtlDataset('D:/SOR.ctl')
vor = ds.vor[0].rename('vorticity')
div = ds.div[0].rename('divergence')
#%%
import numpy as np
from xinvert.xinvert import invert_Poisson_animated
sf = invert_Poisson_animated(vor, BCs=['extend', 'periodic'],
loop_per_frame=1, max_loop=40)
#%% plot vector
import proplot as pplt
import xarray as xr
import numpy as np
u = ds.u.where(ds.u!=0)[0].load()
v = ds.v.where(ds.v!=0)[0].load()
m = np.hypot(u, v)
lat, lon = xr.broadcast(u.lat, u.lon)
fig, axes = pplt.subplots(nrows=1, ncols=1, figsize=(11, 6), sharex=3, sharey=3,
proj=pplt.Proj('cyl', lon_0=180))
fontsize = 16
axes.format(abc=True, abcloc='l', abcstyle='(a)', coast=True,
lonlines=60, latlines=30, lonlabels='b', latlabels='l',
grid=True, labels=False)
ax = axes[0]
ax.contourf(lon, lat, sf[0], levels=31, cmap='jet')
p = ax.quiver(lon.values, lat.values, u.values, v.values,
width=0.0006, headwidth=12., headlength=15.)
# headwidth=1, headlength=3, width=0.002)
ax.set_title('wind field', fontsize=fontsize)
# ax.colorbar(p, loc='r', label='', ticks=0.25, length=0.83)
# ax.set_xticklabels([0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360],
# fontsize=fontsize)
| StarcoderdataPython |
275560 | import os
EXAMPLES_DIR = "./examples/"
EXAMPLE_NAME_TEMPLATE = "example-policy{}.yml"
VALID_BEGINNGINGS = ["scenarios", "config"]
cwd = os.path.dirname(os.path.realpath(__file__)) + "/../docs/"
files = []
for (dirpath, dirnames, filenames) in os.walk(cwd):
for filename in filenames:
if filename.endswith(".md"):
#print(dirpath + "/" + filename)
files.append(dirpath + "/" + filename)
counter = 0
for filename in files:
#print("Reading " + filename)
with open(filename) as f:
content = ""
inside = False
for line in f.readlines():
if line.startswith("```yaml"):
#print("Starting " + line)
inside = True
elif line.startswith("```"):
is_valid = False
for beg in VALID_BEGINNGINGS:
if content.startswith(beg):
is_valid = True
if is_valid and inside:
#print("Finishing " + line)
output = EXAMPLES_DIR + EXAMPLE_NAME_TEMPLATE.format(counter)
print(output)
with open(output, "w") as policy_file:
policy_file.write(content)
inside = False
content = ""
counter += 1
elif inside:
content += line
| StarcoderdataPython |
4819358 | #!/usr/bin/python3
#
# Obtaining a node session key. Usually, the Boot
# Manager obtains it, then writes it to /etc/planetlab/session.
#
# <NAME> <<EMAIL>>
# Copyright (C) 2006 The Trustees of Princeton University
#
import os, sys
import getopt
from config import Config
from plcapi import PLCAPI
def main():
# Defaults
config = None
node_id = None
key = None
# Help
def usage():
print("Usage: %s [OPTION]..." % sys.argv[0])
print("Options:")
print(" -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)")
print(" -n, --node-id=FILE Node ID (or file)")
print(" -k, --key=FILE Node key (or file)")
print(" --help This message")
sys.exit(1)
# Get options
try:
(opts, argv) = getopt.getopt(sys.argv[1:], "f:n:k:h",
["config=", "cfg=", "file=",
"node=", "nodeid=", "node-id", "node_id",
"key=",
"help"])
except getopt.GetoptError as err:
print("Error: " + err.msg)
usage()
for (opt, optval) in opts:
if opt == "-f" or opt == "--config" or opt == "--cfg" or opt == "--file":
config = Config(optval)
elif opt == "-n" or opt == "--node" or opt == "--nodeid" or opt == "--node-id" or opt == "--node_id":
if os.path.exists(optval):
with open(optval) as optfile:
node_id = optfile.read().strip()
else:
node_id = int(optval)
elif opt == "-k" or opt == "--key":
if os.path.exists(optval):
with open(optval) as optfile:
key = optfile.read().strip()
else:
key = optval
else:
usage()
if config is None:
config = Config()
if node_id is None or \
key is None:
usage()
# Authenticate as the Boot Manager would and get a session key
plc = PLCAPI(config.plc_api_uri, config.cacert, (node_id, key))
session = plc.BootGetNodeDetails()['session']
plc = PLCAPI(config.plc_api_uri, config.cacert, session)
assert session == plc.GetSession()
print(session)
if __name__ == '__main__':
main()
| StarcoderdataPython |
12827713 | <reponame>ThomThio/CCCThree
class Currency(object):
iso = ""
name = ""
units = ""
buying = ""
selling = ""
vendor = ""
available = True
def __init__(self, iso, name, units, buying, selling, vendor=None):
if vendor is None:
self.iso = iso
self.available = False
else:
self.iso = iso
self.name = name
self.units = units
self.buying = buying
self.selling = selling
self.vendor = vendor
def __eq__(self, other):
# return self.iso is other.iso and self.name is other.name and self.units is other.units and self.buying is other.buying and self.selling is other.selling
return self.iso == other.iso and self.name == other.name and self.units == other.units and self.buying == other.buying and self.selling == other.selling and self.vendor == other.vendor
# def __hash__(self):
# return hash(self.iso) and hash(self.name) and hash(self.units) and hash(self.buying) and hash(self.selling)
# def __repr__(self):
# return str(self.iso) + str(self.name) + str(self.units) + str(self.buying) + str(self.selling)
# def __ne__(self,other):
# if __eq__(self,other) == False:
# return False
# else:
# return True
# def __lt__(self,other):
# return
| StarcoderdataPython |
1753408 | <reponame>JamesonNetworks/dotfiles<filename>setup.py<gh_stars>0
#!/usr/bin/env python3
from sys import platform, argv
from shared.setup import main
print('Starting environment initialization script...')
print('Platform is ' + platform)
if platform == "linux" or platform == "linux2":
from linux.setup import main as linuxMain
linuxMain(args=argv)
if platform == "darwin":
from macos.setup import main as macMain
macMain(args=argv)
main(args=argv)
if platform == "linux" or platform == "linux2":
from linux.setup import after as linuxAfter
linuxAfter(args=argv)
| StarcoderdataPython |
3200754 | """Integration test cases for the ready route."""
from unittest.mock import Mock
from aiohttp.test_utils import TestClient
import pytest
from tests.test_data import (
altinn_catalog_turtle,
or_catalog_turtle,
seres_catalog_turtle,
)
@pytest.mark.integration
async def test_altinn(client: TestClient, mock_load_altinn_from_cache: Mock) -> None:
"""Should return OK."""
response = await client.get("/altinn")
response_content = await response.content.read()
assert response.status == 200
assert response_content.decode() == altinn_catalog_turtle
@pytest.mark.integration
async def test_or(client: TestClient, mock_load_or_from_cache: Mock) -> None:
"""Should return OK."""
response = await client.get("/or")
response_content = await response.content.read()
assert response.status == 200
assert response_content.decode() == or_catalog_turtle
@pytest.mark.integration
async def test_seres(client: TestClient, mock_load_seres_from_cache: Mock) -> None:
"""Should return OK."""
response = await client.get("/seres")
response_content = await response.content.read()
assert response.status == 200
assert response_content.decode() == seres_catalog_turtle
| StarcoderdataPython |
12813518 | """
Last edited: February 20, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
from FINE import utils
import FINE as fn
import numpy as np
def optimizeTSAmultiStage(esM,
declaresOptimizationProblem=True,
relaxIsBuiltBinary=False,
numberOfTypicalPeriods=30,
numberOfTimeStepsPerPeriod=24,
clusterMethod='hierarchical',
logFileName='',
threads=3,
solver='gurobi',
timeLimit=None,
optimizationSpecs='',
warmstart=False):
"""
Call the optimize function for a temporally aggregated MILP (so the model has to include
hasIsBuiltBinaryVariables in all or some components). Fix the binary variables and run it again
without temporal aggregation. Furthermore, a LP with relaxed binary variables can be solved to
obtain both, an upper and lower bound for the fully resolved MILP.
**Required arguments:**
:param esM: energy system model to which the component should be added. Used for unit checks.
:type esM: EnergySystemModel instance from the FINE package
**Default arguments:**
:param declaresOptimizationProblem: states if the optimization problem should be declared (True) or not (False).
(a) If true, the declareOptimizationProblem function is called and a pyomo ConcreteModel instance is built.
(b) If false a previously declared pyomo ConcreteModel instance is used.
|br| * the default value is True
:type declaresOptimizationProblem: boolean
:param relaxIsBuiltBinary: states if the optimization problem should be solved as a relaxed LP to get the lower
bound of the problem.
|br| * the default value is False
:type declaresOptimizationProblem: boolean
:param numberOfTypicalPeriods: states the number of typical periods into which the time series data
should be clustered. The number of time steps per period must be an integer multiple of the total
number of considered time steps in the energy system.
Note: Please refer to the tsam package documentation of the parameter noTypicalPeriods for more
information.
|br| * the default value is 30
:type numberOfTypicalPeriods: strictly positive integer
:param numberOfTimeStepsPerPeriod: states the number of time steps per period
|br| * the default value is 24
:type numberOfTimeStepsPerPeriod: strictly positive integer
:param clusterMethod: states the method which is used in the tsam package for clustering the time series
data. Options are for example 'averaging','k_means','exact k_medoid' or 'hierarchical'.
Note: Please refer to the tsam package documentation of the parameter clusterMethod for more information.
|br| * the default value is 'hierarchical'
:type clusterMethod: string
:param logFileName: logFileName is used for naming the log file of the optimization solver output
if gurobi is used as the optimization solver.
If the logFileName is given as an absolute path (e.g. logFileName = os.path.join(os.getcwd(),
'Results', 'logFileName.txt')) the log file will be stored in the specified directory. Otherwise,
it will be stored by default in the directory where the executing python script is called.
|br| * the default value is 'job'
:type logFileName: string
:param threads: number of computational threads used for solving the optimization (solver dependent
input) if gurobi is used as the solver. A value of 0 results in using all available threads. If
a value larger than the available number of threads are chosen, the value will reset to the maximum
number of threads.
|br| * the default value is 3
:type threads: positive integer
:param solver: specifies which solver should solve the optimization problem (which of course has to be
installed on the machine on which the model is run).
|br| * the default value is 'gurobi'
:type solver: string
:param timeLimit: if not specified as None, indicates the maximum solve time of the optimization problem
in seconds (solver dependent input). The use of this parameter is suggested when running models in
runtime restricted environments (such as clusters with job submission systems). If the runtime
limitation is triggered before an optimal solution is available, the best solution obtained up
until then (if available) is processed.
|br| * the default value is None
:type timeLimit: strictly positive integer or None
:param optimizationSpecs: specifies parameters for the optimization solver (see the respective solver
documentation for more information). Example: 'LogToConsole=1 OptimalityTol=1e-6'
|br| * the default value is an empty string ('')
:type timeLimit: string
:param warmstart: specifies if a warm start of the optimization should be considered
(not always supported by the solvers).
|br| * the default value is False
:type warmstart: boolean
Last edited: February 20, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
lowerBound=None
if relaxIsBuiltBinary:
esM.optimize(declaresOptimizationProblem=True, timeSeriesAggregation=False, relaxIsBuiltBinary=True,
logFileName='relaxedProblem', threads=threads, solver=solver, timeLimit=timeLimit,
optimizationSpecs=optimizationSpecs, warmstart=warmstart)
lowerBound = esM.objectiveValue
esM.cluster(numberOfTypicalPeriods=numberOfTypicalPeriods, numberOfTimeStepsPerPeriod=numberOfTimeStepsPerPeriod,
clusterMethod=clusterMethod, solver=solver, sortValues=True)
esM.optimize(declaresOptimizationProblem=True, timeSeriesAggregation=True, relaxIsBuiltBinary=False,
logFileName='firstStage', threads=threads, solver=solver, timeLimit=timeLimit,
optimizationSpecs=optimizationSpecs, warmstart=warmstart)
# Set the binary variables to the values resulting from the first optimization step
fn.fixBinaryVariables(esM)
esM.optimize(declaresOptimizationProblem=True, timeSeriesAggregation=False, relaxIsBuiltBinary=False,
logFileName='secondStage', threads=threads, solver=solver, timeLimit=timeLimit,
optimizationSpecs=optimizationSpecs, warmstart=False)
upperBound = esM.objectiveValue
if lowerBound is not None:
delta = upperBound - lowerBound
gap = delta/upperBound
esM.lowerBound, esM.upperBound = lowerBound, upperBound
esM.gap = gap
print('The real optimal value lies between ' + str(round(lowerBound,2)) + ' and ' +
str(round(upperBound,2)) + ' with a gap of ' + str(round(gap*100,2)) + '%.')
def fixBinaryVariables(esM):
""""
Search for the optimized binary variables and set them as fixed.
:param esM: energy system model to which the component should be added. Used for unit checks.
:type esM: EnergySystemModel instance from the FINE package
Last edited: February 20, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
for mdl in esM.componentModelingDict.keys():
compValues = esM.componentModelingDict[mdl].getOptimalValues('isBuiltVariablesOptimum')['values']
if compValues is not None:
for comp in compValues.index.get_level_values(0).unique():
values = utils.preprocess2dimData(compValues.loc[comp].fillna(value=-1).round(decimals=0).astype(np.int64), discard=False)
esM.componentModelingDict[mdl].componentsDict[comp].isBuiltFix = values
| StarcoderdataPython |
1829908 | <reponame>illidanlab/tensorpack
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train-atari.py
# Author: <NAME>
import argparse
import cv2
import gym
import multiprocessing as mp
import numpy as np
import pickle
import os
import six
import sys
import uuid
import tensorflow as tf
from six.moves import queue
from tensorpack import *
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
from tensorpack.utils.concurrency import ensure_proc_terminate, start_proc_mask_signal
from tensorpack.utils.gpu import get_num_gpu
from tensorpack.utils.serialize import dumps
from atari_wrapper import FireResetEnv, FrameStack, LimitLength, MapState
from common import Evaluator, eval_model_multithread, play_n_episodes
from simulator import SimulatorMaster, SimulatorProcess, TransitionExperience
import random
if six.PY3:
from concurrent import futures
CancelledError = futures.CancelledError
else:
CancelledError = Exception
import settings
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
GAMMA = 0.99
STATE_SHAPE = IMAGE_SIZE + (3, )
BATCH_SIZE = 128
def process_rewards(env_name, rewards):
episodes = len(rewards) # how many episodes are in this file
if env_name.startswith("Pong"):
discounted_rewards = []
for i in range(episodes):
rs = rewards[i]
discounted_r = np.zeros((rs.shape[0], 1))
rs = np.clip(rs, -1, 1)
R = 0
for t in reversed(range(len(rs))):
if rs[t] == 0:
R = R * GAMMA + rs[t]
else:
R = rs[t]
discounted_r[t] = R
discounted_rewards.append(discounted_r)
return discounted_rewards
else:
discounted_rewards = []
for i in range(episodes):
rs = rewards[i]
discounted_r = np.zeros((rs.shape[0], 1))
rs = np.clip(rs, -1, 1)
R = 0
for t in reversed(range(len(rs))):
R = R * GAMMA + rs[t]
discounted_r[t] = R
discounted_rewards.append(discounted_r)
return discounted_rewards
def find_available_data(args):
# filename = os.path.join(settings.expert_data_path, "batch_{}.npz").format(args.expert_data_id)
pathdir = settings.expert_data_path[args.env]
file_ids = []
for file in os.listdir(pathdir):
if file.endswith(".npz"):
file_id = file.strip(".npz").split("_")[1]
file_ids.append(file_id)
return file_ids
def get_player(train=False, dumpdir=None):
env = gym.make(ENV_NAME)
if dumpdir:
env = gym.wrappers.Monitor(env, dumpdir, video_callable=lambda _: True)
env = FireResetEnv(env)
env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE))
env = FrameStack(env, 4)
if train:
env = LimitLength(env, 60000)
return env
class SupervisedModel(ModelDesc):
def inputs(self):
assert NUM_ACTIONS is not None
return [tf.TensorSpec((None,) + STATE_SHAPE + (FRAME_HISTORY, ), tf.uint8, 'state'),
tf.TensorSpec((None,), tf.int64, 'action'),
tf.TensorSpec((None,), tf.float32, 'futurereward'),
]
def _get_NN_prediction(self, state):
assert state.shape.rank == 5 # Batch, H, W, Channel, History
state = tf.transpose(state, [0, 1, 2, 4, 3]) # swap channel & history, to be compatible with old models
image = tf.reshape(state, [-1] + list(STATE_SHAPE[:2]) + [STATE_SHAPE[2] * FRAME_HISTORY])
image = tf.cast(image, tf.float32) / 255.0
with argscope(Conv2D, activation=tf.nn.relu):
l = Conv2D('conv0', image, 32, 5)
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, 32, 5)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2', l, 64, 4)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv3', l, 64, 3)
l = FullyConnected('fc0', l, 512)
l = PReLU('prelu', l)
logits = FullyConnected('fc-pi', l, NUM_ACTIONS) # unnormalized policy
return logits
def build_graph(self, resume=False):
## create graph, session
tf.reset_default_graph()
sess = tf.Session()
action = tf.placeholder(dtype=tf.int64, shape=(None,1))
state = tf.placeholder(dtype=tf.uint8, shape= (None,) + STATE_SHAPE + (FRAME_HISTORY, ) )
futurereward = tf.placeholder(dtype=tf.float32, shape=(None,1))
logits = self._get_NN_prediction(state)
policy = tf.nn.softmax(logits, name='policy')
log_probs = tf.log(policy + 1e-6)
one_hot_actions = tf.one_hot(action, NUM_ACTIONS)
one_hot_actions = tf.reshape(one_hot_actions, [-1, NUM_ACTIONS])
policy_loss = tf.losses.softmax_cross_entropy(
one_hot_actions, # one-hot-labels
logits, # logits
)
confience_a_given_s = tf.reduce_mean(
tf.reduce_sum(
policy * one_hot_actions, 1)
)
cost = policy_loss
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
optimizer_op = tf.train.AdamOptimizer(lr, epsilon=1e-3).minimize(cost)
# Create a summary to monitor cost tensor
tf.summary.scalar("loss", cost)
# Create a summary to monitor confidence tensor
tf.summary.scalar("mean_pi_a_given_s", confience_a_given_s)
# Merge all summaries into a single op
merged = tf.summary.merge_all()
## load parameter, or init parameter
saver = tf.compat.v1.train.Saver()
if resume:
print('loading and building pretrained policy')
#saver.restore(sess, tf.train.latest_checkpoint(settings.supervised_model_checkpoint[self.args.env]))
checkpoint = settings.supervised_model_checkpoint[self.args.env]
saver.restore(sess, tf.train.latest_checkpoint(checkpoint))
print('loaded and built successfully')
else:
init = tf.global_variables_initializer()
sess.run(init)
print('model initialized successfully')
writer = tf.compat.v1.summary.FileWriter(self.args.supervised_model_path, sess.graph)
results = {}
results["cost"] = cost
results["policy"] = policy
results["logits"] = logits
results["merged"] = merged
results["writer"] = writer
results["actions_ph"] = action
results["futurereward_ph"] = futurereward
results["states_ph"] = state
results["optimizer"] = optimizer_op
results["saver"] = saver
self.handler = results
self.sess = sess
def train(self, file_ids, epoches=1, initial_episode=0):
episode_index = initial_episode
for epoch in range(epoches):
for file_id in file_ids:
try:
states, actions, rewards = self.load_data(file_id=file_id)
except AttributeError:
logger.info("Skipping file {}".format(file_id))
continue
rewards = process_rewards(self.args.env, rewards) # get discounted rewards
## start training
for e in range(episodes):
episode_index += 1
# get each episode
print("File id = {}, Episode id ={}".format(file_id, episode_index))
e_state, e_action, e_reward = states[e], actions[e], rewards[e]
# state steps should be 1 more than action/reward steps
stride = BATCH_SIZE
pos, frame_size = 0, len(e_action)
while True:
end = frame_size if pos+stride>=frame_size else pos+stride
batch_x = np.reshape(e_state[pos:end], (-1,) + STATE_SHAPE + (FRAME_HISTORY,) )
batch_y = np.reshape(e_action[pos:end], (-1, 1))
batch_r = np.reshape(e_reward[pos:end], (-1,1))
_, loss_val, tf_summary = self.sess.run(
[
self.handler["optimizer"],
self.handler["cost"],
self.handler["merged"],
],
feed_dict={
self.handler["states_ph"]:batch_x,
self.handler["futurereward_ph"]:batch_r,
self.handler["actions_ph"]:batch_y
}
)
pos = end
## release memory space for each mini-batch
del batch_x, batch_y, batch_r
if pos >= frame_size:
# end of pisode
break
## print("Weight value: ", weight)
information = "Update Episode {:2d}, Episode Length {:5d}, Running Loss {:.4f}".format(episode_index, frame_size, loss_val)
logger.info(information)
self.handler["writer"].add_summary(tf_summary, episode_index)
## save session and Episode index
self.handler["saver"].save(self.sess, os.path.join(settings.supervised_model_checkpoint[self.args.env], "checkpoint.ckpt") )
fp = open(os.path.join(settings.supervised_model_checkpoint[self.args.env], "step.p"), "wb")
pickle.dump(episode_index, fp)
fp.close()
del states, actions, rewards
def load_data(self, file_id):
pathdir = settings.expert_data_path[self.args.env]
path = os.path.join(pathdir, "batch_{}.npz".format(file_id))
data = np.load(path, allow_pickle=True)
states = data["observations"]
actions = data["actions"]
rewards = data["rewards"]
return states, actions, rewards
def evaluate(self):
self.env = get_player()
for episode in range(args.expert_episode):
score = self._evaluate_one_episode()
logger.info("Episode {} Player Score: {:.1f}".format(episode, score))
def _evaluate_one_episode(self):
ob = self.env.reset()
isOver = False
sum_r = 0
while not isOver:
# get prediction
ob = np.expand_dims(ob, 0) # batch
policy = self.sess.run(
[
self.handler["policy"],
],
feed_dict={
self.handler["states_ph"]:ob,
}
)
## get actions based on prediction
act = np.argmax(policy)
if random.random() < 0.01: # eplison-greedy
spc = self.env.action_space
act = spc.sample()
## step with the environment
ob, r, isOver, info = self.env.step(act)
if self.args.render:
self.env.render()
sum_r += r
if isOver:
return sum_r
def train(args):
assert tf.test.is_gpu_available(), "Training requires GPUs!"
logger.set_logger_dir(args.supervised_model_path)
# assign GPUs for training & inference
num_gpu = args.num_gpu
# setup model
model=SupervisedModel()
model.args = args
model.build_graph(resume=args.resume)
# training model using loaded expert data
file_ids = find_available_data(args)
step_file = os.path.join(settings.supervised_model_checkpoint[args.env], "step.p")
if args.resume and os.path.exists(step_file):
with open(step_file, 'rb') as f:
initial_episode = pickle.load(f)
else:
initial_episode = 0
model.train(file_ids, epoches=args.train_epochs, initial_episode=initial_episode)
def generate_expert_demonstration(args):
logger.info("Loaded Model Path: {}".format(args.expert_model_path))
Model = SupervisedModel()
Model.args = args
Model.build_graph(resume=True)
Model.evaluate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', help='env', default="Pong-v0", type=str)
parser.add_argument('--num_gpu', help='Number of GPUs', default=1, type=int)
parser.add_argument('--task', help='task to perform',
choices=['eval', 'train'], default='train')
parser.add_argument('--resume', help='If Resume Training Supervised Model', default=False)
parser.add_argument('--supervised_model_path', help='supervised-model log path', default="/mnt/research/judy/reward_shaping/sanity/supervised-atari-{}", type=str)
parser.add_argument('--train_epochs', help='Number of epoches to train using expert data', default=1000, type=int)
parser.add_argument('--expert_model_path', help='Model used to evaluate expert demonstration', default=None)
parser.add_argument('--expert_data_id', help='file id to save expert data', default=1, type=int)
parser.add_argument('--expert_episode', help='number of expert episodes to eval', default=10, type=int)
parser.add_argument('--expert_save', help='If save episodes', default=False, type=bool)
parser.add_argument('--render', help='If render the environment', default=False, type=bool)
args = parser.parse_args()
ENV_NAME = args.env
NUM_ACTIONS = get_player().action_space.n
logger.info("Environment: {}, number of actions: {}".format(ENV_NAME, NUM_ACTIONS))
args.supervised_model_path = args.supervised_model_path.format(ENV_NAME)
if not args.expert_model_path:
args.expert_model_path = settings.supervised_model_checkpoint[args.env]
if args.task == 'eval':
generate_expert_demonstration(args)
elif args.task == "train":
logger.info("Logger/Model Path: {}".format(args.supervised_model_path))
train(args)
| StarcoderdataPython |
4856087 | <filename>Stone_Paper_Scissors.py
import random
tie=0
tries=0
score_ur=0
score_comp=0
user_ins=""
array_names=["stone","paper","scissor"]
print(array_names)
start_input=input("Do you want to start the game?\n")
if start_input=="yes" or start_input=="yup" or start_input=="s":
score_final = int(input("How many points Do you want to play for?🤔\n"))
print("Your final score is:",score_final)
while score_ur<=score_final:
if score_comp!=score_final and score_ur!=score_final:
# print("test for the if condition")
random_num = random.randint(0, 2)
random_ins=array_names[random_num]
#print(random_ins)
print("What do you want to prefer Enter numbers")
print("1-stone")
print("2-paper")
user_ins_num=int(input("3-scissor\n"))
if (user_ins_num==1):
user_ins="stone"
elif (user_ins_num==2):
user_ins="paper"
elif (user_ins_num==3):
user_ins="scissor"
else:
print("You have to enter a vaild number between 1 to 3 to select the tool you need")
if random_ins=="stone" and user_ins=="stone":
print(random_ins+" vs "+user_ins)
print("It is a tie! Try again")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tie+=1
tries+=1
elif random_ins=="stone" and user_ins=="paper":
print(random_ins + " vs " + user_ins)
score_ur+=1
print("Good!!!")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tries+=1
elif random_ins=="stone" and user_ins=="scissor":
print(random_ins + " vs " + user_ins)
score_comp+=1
print("Oops!!!")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tries+=1
elif random_ins=="paper" and user_ins=="paper":
print(random_ins+" vs "+user_ins)
print("It is a tie! Try again")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tie += 1
tries += 17
elif random_ins=="paper" and user_ins=="scissor":
print(random_ins + " vs " + user_ins)
score_ur += 1
print("Good!!!")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tries += 1
elif random_ins=="paper" and user_ins=="stone":
print(random_ins + " vs " + user_ins)
score_comp += 1
print("Oops!!!")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tries += 1
elif random_ins=="scissor" and user_ins=="scissor":
print(random_ins + " vs " + user_ins)
print("It is a tie! Try again")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tie += 1
tries += 17
elif random_ins=="scissor" and user_ins=="paper":
print(random_ins + " vs " + user_ins)
score_comp += 1
print("Oops!!!")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tries += 1
elif random_ins=="scissor" and user_ins=="stone":
print(random_ins + " vs " + user_ins)
score_ur += 1
print("Good!!!")
print("Your score 👇🏻")
print(score_ur)
print("Computer score 👇🏻")
print(score_comp)
tries += 1
else:
print("I am sorry!")
print("Some thing is wrong")
#else:
#print("Ok Bye!! See you later")
#print(user_ins)
elif score_ur==score_final:
print("You have won the match 😃")
#print("You have taken ", end=" ")
#print(tries, end=" ")
break
else:
print("Computer has won the match 😰😥")
break
| StarcoderdataPython |
75501 | import pytest
from app.api.services import abr_service
from app.api.business.errors import AbrError
import requests
import mock
from mock import patch
class TestAbrService():
def mocked_find_business_by_abn(self):
data = '<ABR><response><stateCode>NSW</stateCode><postcode>2750</postcode>'\
'<organisationName>yay</organisationName></response></ABR>'
return data
def mocked_payload_exception(self):
data = '<ABR><response><exception><exceptionDescription>Search text is not a '\
'valid ABN or ACN</exceptionDescription><exceptionCode>WEBSERVICES</exceptionCode>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_description(self):
data = '<ABR><response><exception><exceptionCode>WEBSERVICES</exceptionCode>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_code(self):
data = '<ABR><response><exception><exceptionDescription>Search text is not a '\
'valid ABN or ACN</exceptionDescription>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_code_and_no_description(self):
data = '<ABR><response></response></ABR>'
return data
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_response_can_be_parsed(self, mocked_find_business_by_abn):
expected_parsed_data = {'state': 'NSW', 'organisation_name': 'yay', 'postcode': '2750'}
data = abr_service.get_data(self.mocked_find_business_by_abn())
assert data == expected_parsed_data
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed(self, mocked_payload_exception):
expected_msg = 'WEBSERVICES: Search text is not a valid ABN or ACN'
result = abr_service.get_abr_exception(self.mocked_payload_exception())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed_with_no_exception_desc(self, mocked_payload_exception_with_no_description):
expected_msg = 'WEBSERVICES: No exception description found'
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_description())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed_with_no_exception_code(self, mocked_payload_exception_with_no_code):
expected_msg = 'No exception code found: Search text is not a valid ABN or ACN'
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_code())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_parsed_with_no_ex_code_desc(self, mocked_payload_exception_with_no_code_and_no_description):
expected_msg = None
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_code_and_no_description())
assert result == expected_msg
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_connecton_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ConnectionError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.ConnectionError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_ssl_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.SSLError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.SSLError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_http_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.HTTPError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.HTTPError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_proxy_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ProxyError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.ProxyError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_http_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.HTTPError('HTTP Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.HTTPError) as ex_info:
abr_service.call_abr_api(url)
assert str(ex_info.value) == 'HTTP Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_proxy_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ProxyError('Proxy Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.ProxyError) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'Proxy Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_ssl_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.SSLError('SSL Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.SSLError) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'SSL Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.RequestException('Unexpected request error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.RequestException) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'Unexpected request error'
| StarcoderdataPython |
6545730 | # -*- coding: utf-8 -*-
__version__ = '0.0.1a'
__author__ = '<NAME>'
| StarcoderdataPython |
1843569 | <reponame>Hydraverse/hypy<gh_stars>1-10
from hydra.test import Test
# noinspection PyUnresolvedReferences
from hydra.test.app import *
Test.main()
| StarcoderdataPython |
3311453 | from django.db import models
from django.contrib.auth.models import User
import datetime as dt
from PIL import Image
# Create your models here.
class Profile(models.Model):
"""
class facilitates the creation of profile objects
"""
bio = models.CharField(max_length=70)
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_pic = models.ImageField(default = 'default.jpg',upload_to='profiles/')
def __str__(self):
"""
function returns informal representations of the models' objects
"""
return f'{self.user.username} Profile'
def save_profile(self):
"""
method saves entered profiles to the database
"""
super().save()
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300,300)
img.thumbnail(output_size)
img.save(self.image.path)
def update_profile(self, using=None, fields=None, **kwargs):
"""
method updates saved profile
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_profile(self):
"""
method deletes saved profile
"""
self.delete()
@classmethod
def get_user_by_profile(cls,user_search):
profile = cls.objects.filter(user__username__icontains=user_search)
return profile
@classmethod
def get_profile_by_id(cls, id):
"""
methods gets and returns a profile with a given id
"""
profile = Profile.objects.get(pk=id)
return profile
#Image class kwa lms
class Post(models.Model):
"""
class containing post objects
"""
author = models.ForeignKey(User, on_delete=models.CASCADE, default=None, null=True, blank=True)
image = models.ImageField(blank=True,upload_to='posts/')
image_name = models.CharField(max_length=30, blank=True)
caption = models.CharField(max_length=255)
pub_date = models.DateTimeField(auto_now_add=True, null=True)
likes = models.IntegerField(default=0)
def __str__(self):
return f'{self.author.username} post'
@classmethod
def display_posts(cls):
posts = cls.objects.all()
return posts
@classmethod
def get_post(cls,pk):
posts = cls.objects.get(pk=pk)
return posts
class Meta:
ordering = ["-pk"]
def save_post(self):
"""
method saves added post object
"""
self.save()
def update_post(self, using=None, fields=None, **kwargs):
"""
method updates saved post
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_post(self):
"""
method deletes saved post object
"""
self.delete()
class Comment(models.Model):
"""
class containing comment objects
"""
author = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
body = models.TextField(max_length=500, blank=False)
def __str__(self):
return self.body
def save_comment(self):
"""
method saves added comment
"""
self.save()
def delete_comment(self):
"""
method deletes saved comment
"""
self.delete()
@classmethod
def get_comment(cls,id):
comments = cls.objects.filter(image__pk=id)
return comments
@classmethod
def get_post_comment(cls,pk):
post = Post.get_single_post(pk)
comments = []
all_comments = Comments.objects.filter(image_id=post.id).all()
comments += all_comments
comment_count = len(comments)
return comments
class Follow(models.Model):
follower = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='following')
followed = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='followers')
def __str__(self):
return f'{self.follower} Follow' | StarcoderdataPython |
9667422 | <gh_stars>0
from mailchimp3.baseapi import BaseApi
class Folder(BaseApi):
def __init__(self, *args, **kwargs):
super(Folder, self).__init__(*args, **kwargs)
self.endpoint = 'file-manager/folders'
def all(self):
return self._mc_client._get(url=self.endpoint)
def get(self, folder_id):
"""
returns a specific folder's information.
"""
return self._mc_client._get(url=self._build_path(folder_id))
def update(self, folder_id, data):
"""
updates a folder's information.
"""
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
removes a folder from the File Manager.
"""
return self._mc_client._delete(url=self._build_path(folder_id))
| StarcoderdataPython |
3322995 | <reponame>PacktPublishing/Tkinter-GUI-Application-Development-Projects<gh_stars>10-100
from tkinter import *
def show_event_details(event):
event_name = {"2": "KeyPress", "4": "ButtonPress", "6": "Motion", "9":"FocusIn"}
print('='*50)
print("EventName=" + event_name[str(event.type)])
print("EventKeySymbol=" + str(event.keysym))
print("EventType=" + str(event.type))
print("EventWidgetId=" + str(event.widget))
print("EventCoordinate (x,y)=(" + str(event.x)+","+str(event.y)+")")
print("Time:", str(event.time))
root = Tk()
button = Button(root, text="Button Bound to: \n Keyboard Enter & Mouse Click") #create button
button.pack(pady=5,padx=4)
button.focus_force()
button.bind("<Button-1>", show_event_details) #bind button to mouse click
button.bind("<Return>", show_event_details)#bind button to Enter Key
Label(text="Entry is Bound to Mouseclick \n, FocusIn and Keypress Event").pack()
entry = Entry(root) #creating entry widget
entry.pack()
#binding entry widget to mouse click and focus in
entry.bind("<Button-1>", show_event_details) # left mouse click
entry.bind("<Button-2>", show_event_details) # right mouse click
entry.bind("<FocusIn>", show_event_details)
#binding entry widget alphabets and numbers from keyboard
alpha_num_keys = '<KEY>'
for key in alpha_num_keys:
entry.bind("<KeyPress-%s>"%key, show_event_details)
#binding entry widget to keysym
keysyms = ['Alt_L', 'Alt_R','BackSpace', 'Cancel', 'Caps_Lock','Control_L',
'Control_R','Delete', 'Down', 'End', 'Escape', 'Execute','F1',
'F2', 'Home', 'Insert', 'Left','Linefeed','KP_0','KP_1','KP_2',
'KP_3','KP_4','KP_5','KP_6','KP_7','KP_8','KP_9','KP_Add',
'KP_Decimal','KP_Divide']
for i in keysyms:
entry.bind("<KeyPress-%s>"%i, show_event_details)
#binding Canvas widget to Motion Event
Label(text="Canvas Bound to Motion Event\n(Hover over the area \nto see motion event )").pack()
canvas = Canvas(root, background='white',width=100, height=30)
canvas.pack()
canvas.bind('<Motion>', show_event_details)
Label(text="Entry Widget Bound to \n<Any KeyPress>").pack()
entry_1 = Entry(root) #creating entry widget
entry_1.pack(pady=7)
#binding entry widget to mouse click and focus in
entry_1.bind("<Any KeyPress>", show_event_details) # right mouse click
root.mainloop()
| StarcoderdataPython |
11249294 | """
The simplest of fabfiles.
Two commands: host_type and diskspace.
"""
from fabric.api import run
def host_type():
run('uname -s')
def diskspace():
run('df')
| StarcoderdataPython |
12855992 | """season.py: Generates random NJBA season data."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, University of Delaware, CISC 637 Database Systems"
__email__ = "<EMAIL>"
from datetime import timedelta
import calendar
import csv
'''
Steps to run this project:
1. Create a virtual env and activate source
virtualenv -p python3 .
./bin/activate
2. Install names PyPi Module - https://pypi.org/project/names/
pip install names
3. Run the project
python3 generate-seasons.py
'''
numOfSeasons = 50
seasonType = ["Pre", "Regular", "Post"]
id = 1
cal = calendar.Calendar(firstweekday = calendar.SUNDAY)
year = 2019 # Start Year
# month = 10 # October
# month2 = 4 # April
# month3 = 6 # June
with open('data/seasons2.csv', mode = 'w') as season_file:
season_writer = csv.writer(season_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for j in range(numOfSeasons):
for index in range(len(seasonType)):
# id, start-date, end-date, start-year, end-year, seasonType
# Create the season list
season = []
# monthcal = cal.monthdatescalendar(year,month)
if (seasonType[index] == "Pre"):
monthcal = cal.monthdatescalendar(year, 9)
elif (seasonType[index] == "Regular"):
monthcal = cal.monthdatescalendar(year, 10)
else:
monthcal = cal.monthdatescalendar(year + 1, 4)
# ID
season.append(id)
if (seasonType[index] == "Pre"):
# Pre Season
# Start date is 4th Saturday of every September
start_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.SATURDAY][3]
# Start date
season.append(start_date)
# End date is 3rd Monday of every October
monthcal = cal.monthdatescalendar(year, 10)
end_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.TUESDAY][2]
end_date = end_date - timedelta(days = 1)
# End date
season.append(end_date)
if (seasonType[index] == "Regular"):
# Regular Season
# Start date is 3rd Tuesday of every October
start_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.TUESDAY][2]
# Start date
season.append(start_date)
# End date is 2nd Wednesday of every April
monthcal2 = cal.monthdatescalendar(year + 1, 4)
end_date = [day for week in monthcal2 for day in week if \
day.weekday() == calendar.WEDNESDAY][1]
# End date
season.append(end_date)
if (seasonType[index] == "Post"):
# Post Season
# Start date is 2nd Thursday of every April
start_date = [day for week in monthcal2 for day in week if \
day.weekday() == calendar.WEDNESDAY][1]
start_date = start_date + timedelta(days = 1)
# Start date
season.append(start_date)
# End date is 3rd Tursday of every June
monthcal = cal.monthdatescalendar(year + 1, 6)
end_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.THURSDAY][2]
# End date
season.append(end_date)
# # Year Abbreviation
# abbr = str(year + 1)
# season.append(str(year) + "-" + str(year + 1))
# seasonType
season.append(seasonType[index])
id += 1
season_writer.writerow(season)
year += 1
| StarcoderdataPython |
1761554 | import os
def clear():
if os.name == "nt":
os.system("cls")
else:
os.system("clear") | StarcoderdataPython |
3516459 | from datetime import datetime, timedelta
class BlindsAndAntes:
def __init__(self):
# The blinds and antes are defined by:
# startTime (expressed as hour,minute,second in a datetime object)
# [smallBlind, bigBlind]
# ante
# So, for example: [40, [50, 100], 10]
# means:
# At time=40 minutes after start of game
# [smallBlind=50, bigBlind=100]
# ante = 10
#
self.blindsAndAntes = [[ 0, [10, 20], 0], \
[20, [20, 40], 0], \
[40, [50, 100], 10], \
[60, [100, 200], 20] ]
def getBlindsAndAntes(self):
return self.blindsAndAntes
# The TexasHoldemGameDefinition encapsulates the
# characteristics of a Texas Hold'em game
class TexasHoldemGameDefinition:
def __init__(self):
# Texas hold'em has
self.numBettingRounds = 4
# numCardsPerBettingRound holds the number of hole cards and
# board cards to be dealt on each betting round
# In the definition below there are:
# bettingRound 0 (pre-flop) [2 hole cards, 0 board cards]
# bettingRound 1 (flop) [0 hole cards, 3 board cards]
# bettingRound 2 (turn) [0 hole cards, 1 board card]
# bettingRound 3 (river) [0 hole cards, 1 board card]
self.numCardsPerBettingRound = [[2, 0], [0, 3], [0, 1], [0, 1]]
# blindsAndANtes holds the blinds and antes as a function
# of time.
self.blindsAndAntes = BlindsAndAntes()
def getNumBettingRounds(self):
return self.numBettingRounds
# Returns the current blinds and antes structure
# Not recommended to use this function as the internal
# structure of blindsAndAntes may change and break your
# code
def getBlindsAndAntes(self):
return self.blindsAndAntes.getBlindsAndAntes()
# Returns a list of the current blinds and antes at currentTime
# indexed by 0 being the player immediately clockwise from
# the dealer, and the current blind level (indexed starting from
# 0)
def getCurrentBlindsAndAntes(self, timeGameStart):
# Find the time range that bounds timeGameStart
for i in range(len(self.blindsAndAntes.getBlindsAndAntes()[0])-1):
# Element [i][0] is the time
# Element [i][1] contains the blinds
# Element [i][2] contains the ante
currentTime = datetime.now()
timeIntervalStart = timeGameStart+ timedelta(minutes=self.blindsAndAntes.getBlindsAndAntes()[i][0])
timeIntervalEnd = timeGameStart+ timedelta(minutes=self.blindsAndAntes.getBlindsAndAntes()[i+1][0])
if currentTime <= timeIntervalEnd and \
currentTime > timeIntervalStart:
return [self.blindsAndAntes.getBlindsAndAntes()[i][1],
self.blindsAndAntes.getBlindsAndAntes()[i][2], i]
# Else return the largest blind (assume that the blinds
# are maxed out)
maxIndex = len(self.blindsAndAntes)-1
return [self.blindsAndAntes.getBlindsAndAntes()[maxIndex][1],
self.blindsAndAntes.getBlindsAndAntes()[maxIndex][2], maxIndex]
# This function returns the number of cards that are dealt per
# betting round.
# Input: bettingRound
# Output: a list of [numHoleCards, numBoardCards] for this betting
# round.
def getNumCardsPerBettingRound(self, bettingRound):
return self.numCardsPerBettingRound[bettingRound]
""" # Returns a list of the current blinds at currentTime
# indexed by 0 being the player immediately clockwise from
# the dealer, and the current blind level (indexed starting from
# 0)
def getCurrentBlinds(self, timeGameStart):
# Find the time range that bounds timeGameStart
for i in range(len(self.blindsAndAntes[0])-1):
# Element [i][0] is the time
# Element [i][1] contains the blinds
currentTime = datetime.now()
timeIntervalStart = timeGameStart+ timedelta(minutes=self.blindsAndAntes[i][0])
timeIntervalEnd = timeGameStart+ timedelta(minutes=self.blindsAndAntes[i+1][0])
if currentTime <= timeIntervalEnd and \
currentTime > timeIntervalStart:
return [self.currentBlindsAndAntes[i][1], i]
# Else return the largest blind (assume that the blinds
# are maxed out)
return self.blindsAndAntes[len(self.blindsAndAntes)-1][1]
# Returns the current antes at the currentTime
#
def getCurrentAntes(self, timeGameStart):
# Find the time range that bounds timeGameStart
for i in range(len(self.blindsAndAntes[0])-1):
# Element [i][0] is the time
# Element [i][2] contains the antes
currentTime = datetime.now()
timeIntervalStart = timeGameStart+ timedelta(minutes=self.blindsAndAntes[i][0])
timeIntervalEnd = timeGameStart+ timedelta(minutes=self.blindsAndAntes[i+1][0])
if currentTime <= timeIntervalEnd and \
currentTime > timeIntervalStart:
return self.currentBlindsAndAntes[i][2]
# Else return the last ante (assume that the antes
# are maxed out)
return self.blindsAndAntes[len(self.blindsAndAntes)-1][2] """
| StarcoderdataPython |
5164840 | """
VCD (Video Content Description) library v4.3.1
Project website: http://vcd.vicomtech.org
Copyright (C) 2021, Vicomtech (http://www.vicomtech.es/),
(Spain) all rights reserved.
VCD is a Python library to create and manage VCD content version 4.3.1.
VCD is distributed under MIT License. See LICENSE.
"""
import unittest
import os
import vcd.core as core
import vcd.types as types
import uuid
import re
class TestBasic(unittest.TestCase):
# Create some basic content, without time information, and do some basic search
def test_uid_types(self):
# 1.- Create a VCD instance
vcd = core.VCD()
# We can add elements and get UIDs as strings
uid0 = vcd.add_object("Mike", "Person")
self.assertEqual(isinstance(uid0, str), True)
self.assertEqual(uid0, "0")
# We can also specify which UID we will like our elements to have
# We can use integers and stringified integers
# Response is always string
uid1 = vcd.add_object(name="George", semantic_type="Person", uid=1) # integer
uid2 = vcd.add_object(name="Susan", semantic_type="Person", uid="2") # stringified integer
self.assertEqual(vcd.has(core.ElementType.object, uid1), True)
self.assertEqual(vcd.has(core.ElementType.object, uid2), True)
self.assertEqual(uid1, "1")
self.assertEqual(uid2, "2")
# In general, the user can use integers or stringified integers for all public functions
vcd.add_object_data(2, types.boolean("checked", True))
vcd.add_object_data("2", types.boolean("double-checked", True))
# Same happens with ontology uids
ont_uid_0 = vcd.add_ontology(ontology_name="http://www.vicomtech.org/viulib/ontology")
self.assertEqual(isinstance(ont_uid_0, str), True)
uid3 = vcd.add_object(name="Mark", semantic_type="#Pedestrian", ont_uid=ont_uid_0)
uid4 = vcd.add_object(name="Rose", semantic_type="#Pedestrian", ont_uid=0)
self.assertEqual(vcd.get_object(uid3)['ontology_uid'], '0')
self.assertEqual(vcd.get_object(uid4)['ontology_uid'], '0')
# All returned UIDs are strings, and when written into JSON as well
#print(vcd.stringify(False))
self.assertEqual(vcd.stringify(False), '{"vcd":{"metadata":{"schema_version":"4.3.1"},"objects":{"0":{"name":"Mike","type":"Person"},"1":{"name":"George","type":"Person"},"2":{"name":"Susan","type":"Person","object_data":{"boolean":[{"name":"checked","val":true},{"name":"double-checked","val":true}]},"object_data_pointers":{"checked":{"type":"boolean","frame_intervals":[]},"double-checked":{"type":"boolean","frame_intervals":[]}}},"3":{"name":"Mark","type":"#Pedestrian","ontology_uid":"0"},"4":{"name":"Rose","type":"#Pedestrian","ontology_uid":"0"}},"ontologies":{"0":"http://www.vicomtech.org/viulib/ontology"}}}')
def test_uuid_usage_explicit_1(self):
vcd = core.VCD()
uuid1 = str(uuid.uuid4())
# Adding an object and specifying its uid to be a previously defined UUID, from this call on VCD uses UUID
uid1 = vcd.add_object(name='marcos', semantic_type='person', uid=uuid1)
object = vcd.get_object(uid1)
self.assertEqual(object['name'], 'marcos')
uid2 = vcd.add_object(name='orti', semantic_type='person')
matches = bool(re.match(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", uid2))
self.assertEqual(matches, True)
#print(vcd.stringify(False))
def test_uuid_usage_explicit_2(self):
vcd = core.VCD()
# We can ask VCD to use UUIDs
vcd.set_use_uuid(True)
uid1 = vcd.add_object("marcos", "person")
object = vcd.get_object(uid1)
self.assertEqual(object['name'], 'marcos')
matches = bool(re.match(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", uid1))
self.assertEqual(matches, True)
uid2 = vcd.add_object('orti', 'person')
matches = bool(re.match(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", uid2))
self.assertEqual(matches, True)
#print(vcd.stringify(False))
if __name__ == '__main__': # This changes the command-line entry point to call unittest.main()
print("Running " + os.path.basename(__file__))
unittest.main() | StarcoderdataPython |
1628496 | <reponame>mateusnr/hackerrank-solutions
a = int(raw_input())
b = int(raw_input())
m = int(raw_input())
print pow(a,b)
print pow(a,b,m)
| StarcoderdataPython |
8009045 | import os
import unittest
from pprint import pprint
from flask import current_app
from config import config
from project import create_app
class TestDevelopmentConfig(unittest.TestCase):
"""Test development config"""
def setUp(self):
self.app = create_app('development')
def test_app_is_development(self):
self.assertFalse(self.app.config['TESTING'])
self.assertFalse(current_app is None)
self.assertTrue(
self.app.config['SQLALCHEMY_DATABASE_URI'] ==
os.environ.get('SQLALCHEMY_DATABASE_URI')
)
class TestTestingConfig(unittest.TestCase):
"""Test testing config"""
def setUp(self):
self.app = create_app('testing')
def test_app_is_testing(self):
self.assertTrue(self.app.config['TESTING'])
self.assertTrue(
self.app.config['SQLALCHEMY_DATABASE_URI'] ==
os.environ.get('TEST_DATABASE_URL')
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1603097 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-11 13:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("projectroles", "0006_add_remote_projects"),
("barcodes", "0002_auto_20181211_1413"),
]
operations = [
migrations.AlterField(
model_name="barcodeset",
name="short_name",
field=models.CharField(
db_index=True,
help_text="Short, unique identifier of barcode adapter set",
max_length=100,
),
),
migrations.AlterUniqueTogether(
name="barcodeset", unique_together=set([("project", "short_name")])
),
]
| StarcoderdataPython |
1700045 | """
Visualize map with COVID-19 cases
"""
from os.path import join
import logging
import numpy as np
from bokeh.plotting import figure
from bokeh.models import DateSlider
from bokeh.models import (
CustomJS,
GeoJSONDataSource,
HoverTool,
Legend,
LinearColorMapper,
Select,
GroupFilter,
CDSView,
Button,
Label
)
from bokeh.layouts import column, row
from bokeh.io import curdoc
from bokeh.palettes import Purples
from bokeh.themes import Theme
from database import DataBase
from utilities import cwd
from sql import (
US_MAP_PIVOT_VIEW_TABLE,
OPTIONS_TABLE
)
from nytimes import (
LEVELS_TABLE,
DATES_TABLE
)
from wrangler import STATE_MAP_TABLE
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class Map:
"""
Map Layout Class
"""
def __init__(self, **kwargs):
self.palette = kwargs.pop('palette')
# init metadata dictionary
self.meta = dict()
# get data and metadata from database
_db = DataBase()
self.counties = _db.get_geotable(US_MAP_PIVOT_VIEW_TABLE)
self.meta['levels'] = _db.get_table(LEVELS_TABLE)
self.meta['dates'] = _db.get_table(DATES_TABLE, parse_dates=['date'])
self.meta['options'] = _db.get_table(OPTIONS_TABLE)
_cols = ['state_id', 'geometry']
self.states = _db.get_geotable(STATE_MAP_TABLE, columns=_cols)
_db.close()
# format metadata
self.meta['levels'] = list(self.meta['levels']['level'])
self.meta['dates'] = list(self.meta['dates']['date'])
_id, _state = self.meta['options']['id'], self.meta['options']['state']
self.meta['options'] = list(zip(_id, _state))
# init plot
self.plot = figure(match_aspect=True, toolbar_location='right',
tools="box_zoom, wheel_zoom, pan, reset, save",
name='maps', **kwargs)
# hide axes
self.plot.axis.visible = False
# init class variables
self.controls = dict()
self.srcs = dict(counties=GeoJSONDataSource(geojson=self.counties.to_json()),
states=GeoJSONDataSource(geojson=self.states.to_json()))
# build map
self.plot_map()
log.debug('map init')
def __add_counties(self):
"""Add county patches to figure
"""
# build county colors and line parameters
_color_mapper = LinearColorMapper(palette=self.palette, low=0, high=9)
_color = dict(field='m', transform=_color_mapper)
_params = dict(line_color='darkgrey',
fill_color=_color, line_width=0.5)
_params['name'] = 'counties'
# add counties to plot
self.plot.patches(xs='xs', ys='ys',
source=self.srcs['counties'], **_params)
log.debug('patches added')
def __add_states(self):
"""Add state lines to figure
"""
# build state colors and line parameters
_params = dict(line_color='darkgrey', line_width=0.5, name='states')
# add state to plot
self.plot.multi_line(
xs='xs', ys='ys', source=self.srcs['states'], **_params)
log.debug('state lines added')
def __add_label(self):
""" Add date label for animation
"""
self.controls['label'] = Label(x=0.35 * self.plot.plot_width,
y=0.01 * self.plot.plot_height,
x_units='screen', y_units='screen',
text='', render_mode='css',
text_font_size=f"{0.10*self.plot.plot_height}px",
text_color='#eeeeee')
self.plot.add_layout(self.controls['label'])
log.debug('label added')
def __add_hover(self):
"""Add hove tool to figure
"""
_hover = HoverTool(renderers=self.plot.select('counties'),
tooltips=[('County', '@name'),
('Cases', '@c{0,0}'),
('Deaths', '@d{0,0}'),
('Population', '@pop{0,0}')])
self.plot.add_tools(_hover)
log.debug('hover tool added')
def __add_legend(self):
"""Add legend to plot
"""
_levels = self.meta['levels']
# names for custom legend
_names = []
for _level, _lead in zip(_levels, _levels[1:] + [np.nan]):
if _level == 0:
_names.append(f'{_level:,.0f}')
elif not np.isinf(_lead):
_names.append(f'{_level:,.0f} to {_lead:,.0f}')
else:
_names.append(f'{_level:,.0f}+')
break
# quad parameters
_params = dict(top=0, bottom=0, left=0, right=0, fill_color=None,
visible=False)
_items = []
for i in reversed(range(len(self.palette))):
_params['fill_color'] = self.palette[i]
_items += [(_names[i], [self.plot.quad(**_params)])]
# add lagend to plot
self.plot.add_layout(Legend(items=_items, location='bottom_right'))
self.plot.x_range.only_visible = True
self.plot.y_range.only_visible = True
log.debug('legend added added')
def add_select(self):
"""Build select control
"""
# select control
self.controls['select'] = Select(value='a', options=self.meta['options'],
max_width=self.plot.plot_width-35)
# map views
_filter = GroupFilter(column_name='state_id', group='12')
_counties_on = CDSView(source=self.srcs['counties'], filters=[_filter])
_counties_off = CDSView(source=self.srcs['counties'], filters=[])
_states_on = CDSView(source=self.srcs['states'], filters=[_filter])
_states_off = CDSView(source=self.srcs['states'], filters=[])
_args = dict(counties_src=self.srcs['counties'], states_src=self.srcs['states'],
counties_glyph=self.plot.select('counties')[0],
states_glyph=self.plot.select(
'states')[0], filter=_filter,
counties_view_on=_counties_on, states_view_on=_states_on,
counties_view_off=_counties_off, states_view_off=_states_off)
_callback = CustomJS(args=_args,
code="""
if (cb_obj.value != '00'){
console.log(cb_obj.value);
filter.group = cb_obj.value;
counties_glyph.view = counties_view_on;
states_glyph.view = states_view_on;
}
else{
console.log(cb_obj.value);
counties_glyph.view = counties_view_off;
states_glyph.view = states_view_off;
}
counties_src.change.emit();
states_src.change.emit();
""")
self.controls['select'].js_on_change('value', _callback)
log.debug('select control added')
def add_slider(self):
"""Build slider
"""
self.controls['slider'] = DateSlider(start=self.meta['dates'][-1].date(),
end=self.meta['dates'][0].date(),
value=self.meta['dates'][0].date(),
width=self.plot.plot_width-40-84,
title='Reported Date')
_callback = CustomJS(args=dict(source=self.srcs['counties'],
date=self.controls['slider']),
code="""
// javascript code
var data = source.data;
var cur_day = data['day'];
// from DateSlider
var day = Math.floor((date.end - date.value)/(1000*60*60*24));
// create column names
var ci = 'c'.concat(day.toString());
var di = 'd'.concat(day.toString());
var mi = 'm'.concat(day.toString());
// change data
if (cur_day[0] != day){
for (var i=0; i < cur_day.length; i++){
data['c'][i] = data[ci][i];
data['d'][i] = data[di][i];
data['m'][i] = data[mi][i];
cur_day[0] = day;
}
}
source.change.emit();
""")
self.controls['slider'].js_on_change('value', _callback)
log.debug('slider added')
def add_button(self):
"""Build animation button
"""
self.controls['button'] = Button(label='► Play', width=80, height=60)
_callback = CustomJS(args=dict(button=self.controls['button'],
slider=self.controls['slider'],
label=self.controls['label']),
code="""
function fDate(ms){
const months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
var d = new Date(ms);
var date = d.getDate();
if (date < 10){
date = '0' + date;
}
return `${date} ${months[d.getMonth()]} ${d.getFullYear()}`
};
var increment_slider = function(){
if (button.label == '► Play'){
label.text = ""
clearInterval(interval);
}
else{
// update slider value
var temp = slider.value;
temp = temp + 1000*60*60*24;
if (temp > slider.end){
temp = slider.start;
}
slider.value = temp;
// add date label to graph
var d = new Date(temp + 1000*60*60*24);
label.text = fDate(d)
}
};
if (button.label == '► Play'){
button.label = '❚❚ Pause';
var interval = setInterval(increment_slider, 750, slider);
}
else{
button.label = '► Play';
clearInterval(interval);
};
""")
self.controls['button'].js_on_click(_callback)
log.debug('button added')
def plot_map(self):
""" Build map elements
"""
self.__add_counties()
self.__add_states()
self.__add_hover()
self.__add_label()
self.__add_legend()
self.add_select()
self.add_slider()
self.add_button()
if __name__[:9] == 'bokeh_app':
print('unit testing...')
# unit test module in stand alone mode
PALETTE = list(reversed(Purples[8]))
PLOT = Map(plot_width=800, plot_height=400, palette=PALETTE)
LAYOUT = column(PLOT.controls['select'],
PLOT.plot,
row(PLOT.controls['slider'], PLOT.controls['button']))
curdoc().add_root(LAYOUT)
curdoc().title = 'maps'
curdoc().theme = Theme(filename=join(cwd(), "theme.yaml"))
| StarcoderdataPython |
228771 |
import urllib.request, json
from .models import Sources, Articles, News
# Getting api key
api_key = None
# Getting the news and article base url
news_base_url = None
article_base_url = None
source_base_url = None
search_base_url = None
def configure_request(app):
global api_key, news_base_url, article_base_url, source_base_url
api_key = app.config['NEWS_API_KEY']
news_base_url = app.config['NEWS_API_BASE_URL']
article_base_url = app.config['ARTICLE_API_BASE_URL']
source_base_url = app.config['SOURCE_API_BASE_URL']
def process_results(news_list):
"""
Processes movie results obtained by api
:param news_list: list of news objects
:return: news_results: list of new objects
"""
news_results = []
for news_item in news_list:
author = news_item.get('author')
title = news_item.get('title')
description = news_item.get('description')
image_url = news_item.get('urlToImage')
published = news_item.get('publishedAt')
content = news_item.get('content')
if image_url:
news_object = News(author, title, description, image_url, published)
news_results.append(news_object)
return news_results
def get_news():
"""
Gets json response to our url request
:param source:
:return:
"""
get_news_url = news_base_url.format(api_key)
print(get_news_url)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_sources(news_results_list)
return news_results
def get_sources(category):
get_sources_url = source_base_url.format(category, api_key)
print(get_sources_url)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
return sources_results
def process_sources(sources_list):
"""
This function processes the sources result
:param sources_list: A list of dictionaries
:return: A list of source objects
"""
sources_results = []
for sources_item in sources_list:
id = sources_item.get('id')
name = sources_item.get('name')
description = sources_item.get('description')
url = sources_item.get('url')
category = sources_item.get('category')
language = sources_item.get('language')
country = sources_item.get('country')
print(sources_item)
sources_object = Sources(id, name, description, url)
sources_results.append(sources_object)
return sources_results
def get_articles(id):
"""
Function that gets the json response to our url request
"""
get_articles_url = article_base_url.format(id, api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
articles_details_data = url.read()
articles_details_response = json.loads(articles_details_data)
articles_results=None
if articles_details_response['articles']:
articles_results_list = articles_details_response['articles']
articles_results = process_articles(articles_results_list)
return articles_results
def process_articles(articles_list):
"""
This is a function that processes the articles result and transform them to a list of articles
Args:
articles_list: A list of dictionaries that contain articles
Returns :
articles_results: A list of article objects
"""
articles_results = []
for articles_item in articles_list:
author = articles_item.get('author')
title = articles_item.get('title')
description = articles_item.get('description')
url = articles_item.get('url')
urlToImage = articles_item.get('urlToImage')
publishedAt = articles_item.get('publishedAt')
content = articles_item.get('content')
article_object = Articles(author, title, description, url, urlToImage, publishedAt, content)
articles_results.append(article_object)
return articles_results
def search_news(keyword):
url = search_base_url.format(keyword, api_key)
with urllib.request.urlopen(search_base_url) as response:
data = json.loads(response.read())
articles = []
if data['articles']:
articles_list = data['articles']
articles = process_articles(articles_list)
return articles
| StarcoderdataPython |
3531437 | <filename>src/movement.py
#!/usr/bin/env python3
# coding: utf-8
import sfml as sf
from .helper import time
from .types import *
class Movement:
"""
Linear interpolation movement between two given positions.
"""
def __init__(self, vec, duration=1):
self.speed = Vec(*vec) / duration
self.duration = duration
self.time = 0
self.start_pos = None
def __repr__(self):
return '<Movement {} ending in {}>'.format(self.speed, self.duration)
@classmethod
def link(cls, start, dest, duration=None):
"""
Creates a Movement object from two points.
:param start: The starting position
:param dest: The destination
:param duration: The duration of the movement
:return: A Movement object
"""
delta = Vec(*dest) - Vec(*start)
if duration is None:
return cls(delta)
else:
return cls(delta, duration)
def __bool__(self):
return bool(self.speed * (self.duration - self.time))
@property
def end_pos(self):
return self.start_pos + self.speed * self.duration
def apply(self, pos, dt):
"""
Time-dependent movement of the given position. Returns the remaining
movement to achieve to finish the movement.
:param pos: The position to move
:param dt: The time between the current and the previous frame
"""
if self.start_pos is None:
self.start_pos = pos
if self.time < self.duration:
self.time += dt
elif self.time > self.duration:
self.time = self.duration
return self.start_pos + self.speed * self.time
def terminate(self):
"""
Stop the movement.
"""
self.duration = 0
self.speed = Vec(0, 0)
def copy(self):
mov = Movement(Vec(0, 0))
mov.speed = self.speed.copy()
mov.duration = self.duration
return mov
idle = Movement((0, 0)) | StarcoderdataPython |
6432988 | from spikeextractors import RecordingExtractor
import numpy as np
class CommonReferenceRecording(RecordingExtractor):
preprocessor_name = 'CommonReference'
installed = True # check at class level if installed or not
preprocessor_gui_params = [
{'name': 'reference', 'type': 'str', 'value': 'median', 'default': 'median',
'title': "Reference type ('median', 'average', or 'single')"},
{'name': 'groups', 'type': 'int_list_list', 'value': None, 'default': None, 'title': "List of int lists containins the channels for splitting the reference, \
The CMR, CAR, or referencing with respect to single channels are applied group-wise. It is useful when dealing with different channel groups, e.g. multiple tetrodes."},
{'name': 'ref_channels', 'type': 'int_list', 'value': None, 'default': None, 'title': "If no 'groups' are specified, all channels are referenced to 'ref_channels'. \
If 'groups' is provided, then a list of channels to be applied to each group is expected. If 'single' reference, a list of one channel is expected."},
{'name': 'dtype', 'type': 'dtype', 'value': None, 'default': None,
'title': "Traces dtype. If None, dtype is maintained."},
{'name': 'verbose', 'type': 'bool', 'value': False, 'default': False,
'title': "If True, then the function will be verbose"}
]
installation_mesg = "" # err
def __init__(self, recording, reference='median', groups=None, ref_channels=None, dtype=None, verbose=False):
if not isinstance(recording, RecordingExtractor):
raise ValueError("'recording' must be a RecordingExtractor")
if reference != 'median' and reference != 'average' and reference != 'single':
raise ValueError("'reference' must be either 'median' or 'average'")
self._recording = recording
self._ref = reference
self._groups = groups
if self._ref == 'single':
assert ref_channels is not None, "With 'single' reference, provide 'ref_channels'"
if self._groups is not None:
assert len(ref_channels) == len(self._groups), "'ref_channel' and 'groups' must have the " \
"same length"
else:
if isinstance(ref_channels, (list, np.ndarray)):
assert len(ref_channels) == 1, "'ref_channel' with no 'groups' can be int or a list of one element"
else:
assert isinstance(ref_channels, (int, np.integer)), "'ref_channels' must be int"
ref_channels = [ref_channels]
self._ref_channel = ref_channels
if dtype is None:
self._dtype = recording.get_dtype()
else:
self._dtype = dtype
self.verbose = verbose
RecordingExtractor.__init__(self)
self.copy_channel_properties(recording=self._recording)
def get_sampling_frequency(self):
return self._recording.get_sampling_frequency()
def get_num_frames(self):
return self._recording.get_num_frames()
def get_channel_ids(self):
return self._recording.get_channel_ids()
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_frames()
if channel_ids is None:
channel_ids = self.get_channel_ids()
if isinstance(channel_ids, (int, np.integer)):
channel_ids = [channel_ids]
if self._ref == 'median':
if self._groups is None:
if self.verbose:
print('Common median reference using all channels')
traces = self._recording.get_traces(start_frame=start_frame, end_frame=end_frame)
traces = traces - np.median(traces, axis=0, keepdims=True)
return traces[channel_ids].astype(self._dtype)
else:
new_groups = []
for g in self._groups:
new_chans = []
for chan in g:
if chan in self._recording.get_channel_ids():
new_chans.append(chan)
new_groups.append(new_chans)
if self.verbose:
print('Common median in groups: ', new_groups)
traces = np.vstack(np.array([self._recording.get_traces(channel_ids=split_group,
start_frame=start_frame, end_frame=end_frame)
- np.median(self._recording.get_traces(channel_ids=split_group,
start_frame=start_frame,
end_frame=end_frame),
axis=0, keepdims=True) for split_group in new_groups]))
return traces[channel_ids].astype(self._dtype)
elif self._ref == 'average':
if self.verbose:
print('Common average reference using all channels')
if self._groups is None:
traces = self._recording.get_traces(start_frame=start_frame, end_frame=end_frame)
traces = traces - np.mean(traces, axis=0, keepdims=True)
return traces[channel_ids].astype(self._dtype)
else:
new_groups = []
for g in self._groups:
new_chans = []
for chan in g:
if chan in self._recording.get_channel_ids():
new_chans.append(chan)
new_groups.append(new_chans)
if self.verbose:
print('Common average in groups: ', new_groups)
traces = np.vstack(np.array([self._recording.get_traces(channel_ids=split_group,
start_frame=start_frame, end_frame=end_frame)
- np.mean(self._recording.get_traces(channel_ids=split_group,
start_frame=start_frame,
end_frame=end_frame),
axis=0, keepdims=True) for split_group in new_groups]))
return traces[channel_ids].astype(self._dtype)
elif self._ref == 'single':
if self._groups is None:
if self.verbose:
print('Reference to channel', self._ref_channel)
traces = self._recording.get_traces(channel_ids=channel_ids, start_frame=start_frame,
end_frame=end_frame) \
- self._recording.get_traces(channel_ids=self._ref_channel, start_frame=start_frame,
end_frame=end_frame)
return traces.astype(self._dtype)
else:
new_groups = []
for g in self._groups:
new_chans = []
for chan in g:
if chan in self._recording.get_channel_ids():
new_chans.append(chan)
new_groups.append(new_chans)
if self.verbose:
print('Reference', new_groups, 'to channels', self._ref_channel)
traces = np.vstack(np.array([self._recording.get_traces(channel_ids=split_group,
start_frame=start_frame, end_frame=end_frame)
- self._recording.get_traces(channel_ids=[ref], start_frame=start_frame,
end_frame=end_frame)
for (split_group, ref) in zip(new_groups, self._ref_channel)]))
return traces.astype(self._dtype)
def common_reference(recording, reference='median', groups=None, ref_channels=None, dtype=None, verbose=False):
'''
Re-references the recording extractor traces.
Parameters
----------
recording: RecordingExtractor
The recording extractor to be re-referenced
reference: str
'median', 'average', or 'single'.
If 'median', common median reference (CMR) is implemented (the median of
the selected channels is removed for each timestamp).
If 'average', common average reference (CAR) is implemented (the mean of the selected channels is removed
for each timestamp).
If 'single', the selected channel(s) is remove from all channels.
groups: list
List of lists containins the channels for splitting the reference. The CMR, CAR, or referencing with respect to
single channels are applied group-wise. It is useful when dealing with different channel groups, e.g. multiple
tetrodes.
ref_channels: list or int
If no 'groups' are specified, all channels are referenced to 'ref_channels'. If 'groups' is provided, then a
list of channels to be applied to each group is expected. If 'single' reference, a list of one channel or an
int is expected.
dtype: str
dtype of the returned traces. If None, dtype is maintained
verbose: bool
If True, output is verbose
Returns
-------
referenced_recording: CommonReferenceRecording
The re-referenced recording extractor object
'''
return CommonReferenceRecording(
recording=recording, reference=reference, groups=groups, ref_channels=ref_channels, dtype=dtype, verbose=verbose
)
| StarcoderdataPython |
4879249 | # Copyright The IETF Trust 2019-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-10-01 04:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('review', '0016_add_remind_days_open_reviews'),
]
operations = [
migrations.AddField(
model_name='reviewteamsettings',
name='remind_days_unconfirmed_assignments',
field=models.PositiveIntegerField(blank=True, help_text="To send a periodic email reminder to reviewers of review assignments that are not accepted yet, enter the number of days between these reminders. Clear the field if you don't want these reminders to be sent.", null=True, verbose_name='Periodic reminder of not yet accepted or rejected review assignments to reviewer every X days'),
),
]
| StarcoderdataPython |
1667001 | <filename>calvin/runtime/north/plugins/port/queue/fanout_mapped_fifo.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.north.plugins.port.queue.common import QueueFull
from calvin.runtime.north.plugins.port.queue.fanout_base import FanoutBase
from calvin.utilities import calvinlogger
_log = calvinlogger.get_logger(__name__)
class FanoutMappedFIFO(FanoutBase):
"""
A FIFO which route tokens based on a mapping to peers
"""
def __init__(self, port_properties, peer_port_properties):
super(FanoutMappedFIFO, self).__init__(port_properties, peer_port_properties)
self._type = "dispatch:mapped"
def _state(self):
state = super(FanoutMappedFIFO, self)._state()
state['mapping'] = self.mapping
return state
def _set_state(self, state):
super(FanoutMappedFIFO, self)._set_state(state)
self.mapping = state['mapping']
def _set_port_mapping(self, mapping):
if not set(mapping.values()) == set(self.readers):
print mapping, self.readers
raise Exception("Illegal port mapping dictionary")
self.mapping = mapping
def _unwrap_data(self, data):
# data is a Token whose value is wrapped in a {selector:value} dict
mapped_value = data.value
select, value = mapped_value.popitem()
data.value = value
peer = self.mapping[select]
return data, peer
def write(self, data, metadata):
# print data, metadata
# metadata is port_id of containing port
data, peer = self._unwrap_data(data)
if not self.slots_available(1, peer):
# if not slots_available:
raise QueueFull()
# Write token in peer's FIFO
write_pos = self.write_pos[peer]
#_log.debug("WRITE2 %s %s %d\n%s" % (metadata, peer, write_pos, str(map(str, self.fifo[peer]))))
self.fifo[peer][write_pos % self.N] = data
self.write_pos[peer] = write_pos + 1
return True
def slots_available(self, length, metadata):
# print "slots_available", length, metadata
# Sometimes metadata = id of the outport owning this queue (called from @condition?)
# Darn. That means that we can only check for the case where EVERY sub-queue has at least 'length' slots free...
# Oh well, such is life.
if metadata in self.readers:
return self.write_pos[metadata] - self.read_pos[metadata] < self.N - length
return all(self.write_pos[r] - self.read_pos[r] < self.N - length for r in self.readers)
| StarcoderdataPython |
1882223 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
import pytest
from resilient_sdk.util import constants
from resilient_sdk.util import package_file_helpers as package_helpers
from resilient_sdk.util.sdk_validate_issue import SDKValidateIssue
def mock_warning_issue():
return SDKValidateIssue(
"this issue failed",
"description of failed issue",
SDKValidateIssue.SEVERITY_LEVEL_WARN,
"here's a solution"
)
def mock_issue_with_defaults():
return SDKValidateIssue("name is name", "a simple description")
def mock_debug_issue():
return SDKValidateIssue("bugged", "descr", SDKValidateIssue.SEVERITY_LEVEL_DEBUG, "can't fix")
def test_sdk_validate_issue_warning():
issue = mock_warning_issue()
assert issue.get_logging_level() == constants.VALIDATE_LOG_LEVEL_WARNING
assert issue < mock_debug_issue()
assert issue > mock_issue_with_defaults()
assert package_helpers.COLORS["WARNING"] in issue.error_str()
def test_sdk_validate_issue_with_defaults():
issue = mock_issue_with_defaults()
assert issue.severity == SDKValidateIssue.SEVERITY_LEVEL_CRITICAL
assert issue.get_logging_level() == constants.VALIDATE_LOG_LEVEL_CRITICAL
assert package_helpers.COLORS["CRITICAL"] in issue.error_str()
def test_sdk_validate_issue_debug():
issue = mock_debug_issue()
assert issue.severity == SDKValidateIssue.SEVERITY_LEVEL_DEBUG
assert issue.get_logging_level() == constants.VALIDATE_LOG_LEVEL_DEBUG
assert package_helpers.COLORS["PASS"] in issue.error_str()
| StarcoderdataPython |
9607959 | <gh_stars>1-10
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='mousetrap',
version='3.0.5',
description='An X11 utility that hides the mouse pointer after a specified interval of time',
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Topic :: Utilities'
],
keywords='hide mouse cursor pointer x11 xlib',
url='https://github.com/eazar001/mousetrap',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['mousetrap'],
entry_points={"console_scripts" : ["mousetrap = mousetrap.mousetrap:main",]},
install_requires=['python-xlib'],
include_package_data=True,
zip_safe=False)
| StarcoderdataPython |
1857904 | <reponame>StonyBrookNLP/dire
from typing import List, Dict
from dataclasses import dataclass, field
from collections import defaultdict
from copy import deepcopy
from metrics import hotpotqa_eval, BaseMetric
@dataclass
class LabelPredictionInstance:
label_supporting_facts: List = field(default_factory=lambda: deepcopy([]))
predicted_supporting_facts: List = field(default_factory=lambda: deepcopy([]))
class SupportingFactsMetric(BaseMetric):
def __init__(self) -> None:
self.prediction_store = defaultdict(LabelPredictionInstance)
self.score_store = defaultdict(dict)
def compute_question_scores(self, group: LabelPredictionInstance) -> Dict[str, float]:
sp_f1, \
sp_prec, \
sp_recall = hotpotqa_eval.sp_f1(group.predicted_supporting_facts, group.label_supporting_facts)
sp_em = hotpotqa_eval.sp_em(group.predicted_supporting_facts, group.label_supporting_facts)
question_scores = {"f1": sp_f1, "em": sp_em,
"precision": sp_prec, "recall": sp_recall}
return question_scores
def store_prediction(self,
predicted_supporting_facts: List,
label_supporting_facts: List,
question_id: str) -> None:
self.prediction_store[question_id].label_supporting_facts = label_supporting_facts
self.prediction_store[question_id].predicted_supporting_facts = predicted_supporting_facts
def reset(self):
self.prediction_store = defaultdict(LabelPredictionInstance)
self.score_store = defaultdict(dict)
| StarcoderdataPython |
4910568 | <filename>sl1m/constants_and_tools.py
import numpy as np
from sl1m.tools.obj_to_constraints import load_obj, as_inequalities, rotate_inequalities, inequalities_to_Inequalities_object
from numpy import array, asmatrix, matrix, zeros, ones
from numpy import array, dot, stack, vstack, hstack, asmatrix, identity, cross, concatenate
from numpy.linalg import norm
import numpy as np
from scipy.spatial import ConvexHull
from .qp import solve_lp
#~ import eigenpy
#from curves import bezier3
from random import random as rd
from random import randint as rdi
from numpy import squeeze, asarray
Id = array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
g = array([0.,0.,-9.81])
g6 = array([0.,0.,-9.81,0.,0.,0.])
mu = 0.45
x = array([1.,0.,0.])
z = array([0.,0.,1.])
zero3 = zeros(3)
eps =0.000001
#### surface to inequalities ####
def convert_surface_to_inequality(s):
#TODO does normal orientation matter ?
#it will for collisions
n = cross(s[:,1] - s[:,0], s[:,2] - s[:,0])
if n[2] <= 0.:
for i in range(3):
n[i] = -n[i]
n /= norm(n)
return surfacePointsToIneq(s, n)
def replace_surfaces_with_ineq_in_phaseData(phase):
phase["S"] = [convert_surface_to_inequality(S) for S in phase["S"]]
def replace_surfaces_with_ineq_in_problem(pb):
[ replace_surfaces_with_ineq_in_phaseData(phase) for phase in pb ["phaseData"]]
def ineqQHull(hull):
A = hull.equations[:,:-1]
b = -hull.equations[:,-1]
return A,b
def vectorProjection (v, n):
v = v / norm(v)
n = n / norm(n)
proj = v - np.dot(v,n)*n
return proj/norm(proj)
def addHeightConstraint(K,k, val):
K1 = vstack([K, -z])
k1 = concatenate([k, -ones(1) * val]).reshape((-1,))
return K1, k1
def default_transform_from_pos_normal_(transform, pos, normal):
#return vstack( [hstack([transform,pos.reshape((-1,1))]), [ 0. , 0. , 0. , 1. ] ] ) # FIXME : temp stuff, only work on flat floor
# FIXME : there is something wrong the the code above
#~ print "pos ", pos
#~ print "normal ", normal
#align the x-axis of the foot to the root heading direction
f = x
xp = np.dot(transform, x).tolist()
t = vectorProjection(xp, normal)
v = np.cross(f, t)
c = np.dot(f, t)
if abs(c) > 0.99 :
return vstack( [hstack([transform,pos.reshape((-1,1))]), [ 0. , 0. , 0. , 1. ] ] )
else:
u = v/norm(v)
h = (1. - c)/(1. - c**2)
vx, vy, vz = v
rot1 =array([[c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy],
[h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx],
[h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2]])
#align the z-axis of the foot to the surface normal
f = z
t = array(normal)
t = t / norm(t)
v = np.cross(f, t)
c = np.dot(f, t)
if abs(c) > 0.99 :
rot2 = identity(3)
else:
u = v/norm(v)
h = (1. - c)/(1. - c**2)
vx, vy, vz = v
rot2 =array([[c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy],
[h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx],
[h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2]])
rot = np.dot(rot1,rot2)
return vstack( [hstack([rot,pos.reshape((-1,1))]), [ 0. , 0. , 0. , 1. ] ] )
def default_transform_from_pos_normal(pos, normal):
f = array([0.,0.,1.])
t = array(normal)
t = t / norm(t)
v = np.cross(f, t)
c = np.dot(f, t)
if abs(c) > 0.99 :
rot = identity(3)
else:
u = v/norm(v)
h = (1. - c)/(1. - c**2)
vx, vy, vz = v
rot =array([[c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy],
[h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx],
[h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2]])
return vstack( [hstack([rot,pos.reshape((-1,1))]), [ 0. , 0. , 0. , 1. ] ] )
#last is equality
def surfacePointsToIneq(S, normal):
n = array(normal)
tr = default_transform_from_pos_normal(array([0.,0.,0.]),n)
trinv = tr.copy(); trinv[:3,:3] = tr[:3,:3].T;
trpts = [tr[:3,:3].dot(s)[:2] for s in S.T]
hull = ConvexHull(array(trpts))
A,b = ineqQHull(hull)
A = hstack([A,zeros((A.shape[0],1))])
ine = inequalities_to_Inequalities_object(A,b)
ine = rotate_inequalities(ine, trinv)
#adding plane constraint
#plane equation given by first point and normal for instance
d = array([n.dot(S[:,0])])
A = vstack([ine.A, n , -n ])
b = concatenate([ine.b, d, -d]).reshape((-1,))
A = vstack([ine.A, n])
b = concatenate([ine.b, d]).reshape((-1,))
return A, b
############ BENCHMARKING ###############
try:
from time import perf_counter as clock
except ImportError:
from time import clock
def timMs(t1, t2):
return (t2-t1) * 1000.
| StarcoderdataPython |
3428944 | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A storage for persisting results and managing stage."""
import abc
from typing import Iterable, List
import tensorflow.compat.v2 as tf
class ModelContainer:
"""A container for a model and its metadata."""
def __init__(self, score: float, model: tf.keras.Model, metrics: List[float]):
self.score = score
self.model = model
self.metrics = metrics
def __eq__(self, other: 'ModelContainer'):
return self.score == other.score
def __lt__(self, other: 'ModelContainer'):
return self.score < other.score
class Storage(abc.ABC):
"""A storage for persisting results and managing state."""
@abc.abstractmethod
def save_model(self, model_container: ModelContainer):
"""Stores a model and its metadata."""
# TODO: How do we enforce that save_model is called only once per
# model?
pass
@abc.abstractmethod
def get_models(self) -> Iterable[tf.keras.Model]:
"""Returns all stored models."""
pass
@abc.abstractmethod
def get_best_models(self, num_models: int = 1) -> Iterable[tf.keras.Model]:
"""Returns the top `num_models` stored models in descending order."""
pass
@abc.abstractmethod
def get_model_metrics(self) -> Iterable[Iterable[float]]:
"""Returns the metrics for all stored models."""
pass
| StarcoderdataPython |
379336 | from HPC_Task import Task, Workloads
from HPC_Cluster import Cluster
import os
import math
import json
import time
import sys
import random
from random import shuffle
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import scipy.signal
import gym
from gym import spaces
from gym.spaces import Box, Discrete
from gym.utils import seeding
#define MAX queue Size
MAX_QUEUE_SIZE = 512
#define MLP Size
MLP_SIZE = 1024
MAX_WAIT_TIME = 8 * 60 * 60 # wait time is 8 hours.
MAX_RUN_TIME = 8 * 60 * 60 # runtime is 8 hours
# each task has three features: wait_time, cost , runtime, machine states,
TASK_FEATURES = 4
DEBUG = False
TASK_SEQUENCE_SIZE = 512
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=combined_shape(None, dim))
def placeholders(*args):
return [placeholder(dim) for dim in args]
def placeholder_from_space(space):
if isinstance(space, Box):
return placeholder(space.shape)
elif isinstance(space, Discrete):
return tf.placeholder(dtype=tf.int32, shape=(None,))
raise NotImplementedError
def placeholders_from_spaces(*args):
return [placeholder_from_space(space) for space in args]
def get_vars(scope=''):
return [x for x in tf.trainable_variables() if scope in x.name]
def count_vars(scope=''):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def discount_cumsum(x, discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class HPC_Environment(gym.Env):
def __init__(self):
super(HPC_Environment, self).__init__()
print("Initialize")
self.action_space = spaces.Discrete(MAX_QUEUE_SIZE)
self.observation_space = spaces.Box(low=0.0, high=1.0,
shape=(TASK_FEATURES * MAX_QUEUE_SIZE,),
dtype=np.float32)
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = 0
self.start = 0
self.next_arriving_task_idx = 0
self.last_task_in_batch = 0
self.num_task_in_batch = 0
self.start_idx_last_reset = 0
self.loads = None
self.cluster = None
self.bsld_algo_dict = {}
self.scheduled_rl = {}
self.penalty = 0
self.pivot_task = False
self.scheduled_scores = []
self.enable_preworkloads = False
self.pre_workloads = []
def my_init(self, workload_file='', sched_file=''):
print("loading from dataset:", workload_file)
self.loads = Workloads(workload_file)
self.cluster = Cluster("Cluster", self.loads.max_nodes, self.loads.max_procs / self.loads.max_nodes)
self.penalty_task_score = TASK_SEQUENCE_SIZE * self.loads.max_exec_time / 10
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def f1_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (np.log10(request_time) * request_processors + 870 * np.log10(submit_time))
def f2_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (np.sqrt(request_time) * request_processors + 25600 * np.log10(submit_time))
def f3_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (request_time * request_processors + 6860000 * np.log10(submit_time))
def f4_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (request_time * np.sqrt(request_processors) + 530000 * np.log10(submit_time))
def sjf_score(self, task):
request_time = task.request_time
submit_time = task.submit_time
return (request_time, submit_time)
def smallest_score(self, task):
request_processors = task.request_number_of_processors
submit_time = task.submit_time
return (request_processors, submit_time)
def wfp_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
waiting_time = task.scheduled_time - task.submit_time
return -np.power(float(waiting_time) / request_time, 3) * request_processors
def uni_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
waiting_time = task.scheduled_time - task.submit_time
return -(waiting_time + 1e-15) / (np.log2(request_processors + 1e-15) * request_time)
def fcfs_score(self, task):
submit_time = task.submit_time
return submit_time
def gen_preworkloads(self, size):
running_task_size = size
for i in range(running_task_size):
_task = self.loads[self.start - i - 1]
req_num_of_processors = _task.request_number_of_processors
runtime_of_task = _task.request_time
task_tmp = Task()
task_tmp.task_id = (-1 - i)
task_tmp.request_number_of_processors = req_num_of_processors
task_tmp.run_time = runtime_of_task
if self.cluster.can_allocated(task_tmp):
self.running_tasks.append(task_tmp)
task_tmp.scheduled_time = max(0, (self.current_timestamp - random.randint(0, max(runtime_of_task, 1))))
task_tmp.allocated_machines = self.cluster.allocate(task_tmp.task_id, task_tmp.request_number_of_processors)
self.pre_workloads.append(task_tmp)
else:
break
def refill_preworkloads(self):
for _task in self.pre_workloads:
self.running_tasks.append(_task)
_task.allocated_machines = self.cluster.allocate(_task.task_id, _task.request_number_of_processors)
def reset(self):
self.cluster.reset()
self.loads.reset()
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = 0
self.start = 0
self.next_arriving_task_idx = 0
self.last_task_in_batch = 0
self.num_task_in_batch = 0
self.scheduled_rl = {}
self.penalty = 0
self.pivot_task = False
self.scheduled_scores = []
task_sequence_size = TASK_SEQUENCE_SIZE
self.pre_workloads = []
self.start = self.np_random.randint(task_sequence_size, (self.loads.size() - task_sequence_size - 1))
self.start_idx_last_reset = self.start
self.num_task_in_batch = task_sequence_size
self.last_task_in_batch = self.start + self.num_task_in_batch
self.current_timestamp = self.loads[self.start].submit_time
self.task_queue.append(self.loads[self.start])
self.next_arriving_task_idx = self.start + 1
if self.enable_preworkloads:
self.gen_preworkloads(task_sequence_size + self.np_random.randint(task_sequence_size))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.sjf_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.smallest_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.fcfs_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f1_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f2_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f3_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f4_score).values()))
return self.build_observation(), self.build_critic_observation()
def reset_for_test(self, num, start):
self.cluster.reset()
self.loads.reset()
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = 0
self.start = 0
self.next_arriving_task_idx = 0
self.last_task_in_batch = 0
self.num_task_in_batch = 0
self.scheduled_rl = {}
self.penalty = 0
self.pivot_task = False
self.scheduled_scores = []
task_sequence_size = num
self.start = self.np_random.randint(task_sequence_size, (self.loads.size() - task_sequence_size - 1))
self.start_idx_last_reset = self.start
self.num_task_in_batch = task_sequence_size
self.last_task_in_batch = self.start + self.num_task_in_batch
self.current_timestamp = self.loads[self.start].submit_time
self.task_queue.append(self.loads[self.start])
self.next_arriving_task_idx = self.start + 1
def moveforward_for_resources_backfill_greedy(self, task, scheduled_logs):
assert not self.cluster.can_allocated(task)
earliest_start_time = self.current_timestamp
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.request_time))
free_processors = self.cluster.free_node * self.cluster.num_procs_per_node
for running_task in self.running_tasks:
free_processors += len(running_task.allocated_machines) * self.cluster.num_procs_per_node
earliest_start_time = (running_task.scheduled_time + running_task.request_time)
if free_processors >= task.request_number_of_processors:
break
while not self.cluster.can_allocated(task):
#backfill tasks
self.task_queue.sort(key=lambda _j: self.fcfs_score(_j))
task_queue_iter_copy = list(self.task_queue)
for _j in task_queue_iter_copy:
if self.cluster.can_allocated(_j) and (self.current_timestamp + _j.request_time) < earliest_start_time:
assert _j.scheduled_time == -1
_j.scheduled_time = self.current_timestamp
_j.allocated_machines = self.cluster.allocate(_j.task_id, _j.request_number_of_processors)
self.running_tasks.append(_j)
score = (self.task_score(_j) / self.num_task_in_batch)
scheduled_logs[_j.task_id] = score
self.task_queue.remove(_j)
assert self.running_tasks
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.run_time))
next_resource_release_time = (self.running_tasks[0].scheduled_time + self.running_tasks[0].run_time)
next_resource_release_machines = self.running_tasks[0].allocated_machines
if self.next_arriving_task_idx < self.last_task_in_batch \
and self.loads[self.next_arriving_task_idx].submit_time <= next_resource_release_time:
self.current_timestamp = max(self.current_timestamp, self.loads[self.next_arriving_task_idx].submit_time)
self.task_queue.append(self.loads[self.next_arriving_task_idx])
self.next_arriving_task_idx += 1
else:
self.current_timestamp = max(self.current_timestamp, next_resource_release_time)
self.cluster.release(next_resource_release_machines)
self.running_tasks.pop(0)
def schedule_curr_sequence_reset(self, score_fn):
scheduled_logs = {}
while True:
self.task_queue.sort(key=lambda j: score_fn(j))
task_for_scheduling = self.task_queue[0]
if not self.cluster.can_allocated(task_for_scheduling):
self.moveforward_for_resources_backfill_greedy(task_for_scheduling, scheduled_logs)
assert task_for_scheduling.scheduled_time == -1
task_for_scheduling.scheduled_time = self.current_timestamp
task_for_scheduling.allocated_machines = self.cluster.allocate(task_for_scheduling.task_id,
task_for_scheduling.request_number_of_processors)
self.running_tasks.append(task_for_scheduling)
score = (self.task_score(task_for_scheduling) / self.num_task_in_batch)
scheduled_logs[task_for_scheduling.task_id] = score
self.task_queue.remove(task_for_scheduling)
not_empty = self.moveforward_for_task()
if not not_empty:
break
self.cluster.reset()
self.loads.reset()
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = self.loads[self.start].submit_time
self.task_queue.append(self.loads[self.start])
self.last_task_in_batch = self.start + self.num_task_in_batch
self.next_arriving_task_idx = self.start + 1
if self.enable_preworkloads:
self.refill_preworkloads()
return scheduled_logs
def build_critic_observation(self):
vector = np.zeros(TASK_SEQUENCE_SIZE * 3, dtype=float)
earlist_task = self.loads[self.start_idx_last_reset]
earlist_submit_time = earlist_task.submit_time
pairs = []
for i in range(self.start_idx_last_reset, self.last_task_in_batch + 1):
task = self.loads[i]
submit_time = task.submit_time - earlist_submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
normalized_submit_time = min(float(submit_time) / float(MAX_WAIT_TIME), 1.0 - 1e-5)
normalized_run_time = min(float(request_time) / float(self.loads.max_exec_time), 1.0 - 1e-5)
normalized_request_nodes = min(float(request_processors) / float(self.loads.max_procs), 1.0 - 1e-5)
pairs.append([normalized_submit_time, normalized_run_time, normalized_request_nodes])
for i in range(TASK_SEQUENCE_SIZE):
vector[i * 3:(i + 1) * 3] = pairs[i]
return vector
def build_observation(self):
vector = np.zeros((MAX_QUEUE_SIZE) * TASK_FEATURES, dtype=float)
self.task_queue.sort(key=lambda task: self.fcfs_score(task))
self.visible_tasks = []
for i in range(0, MAX_QUEUE_SIZE):
if i < len(self.task_queue):
self.visible_tasks.append(self.task_queue[i])
else:
break
self.visible_tasks.sort(key=lambda j: self.fcfs_score(j))
self.visible_tasks = []
if len(self.task_queue) <= MAX_QUEUE_SIZE:
for i in range(0, len(self.task_queue)):
self.visible_tasks.append(self.task_queue[i])
else:
visible_f1 = []
f1_index = 0
self.task_queue.sort(key=lambda task: self.f1_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_f1.append(self.task_queue[i])
visible_f2 = []
f2_index = 0
self.task_queue.sort(key=lambda task: self.f2_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_f2.append(self.task_queue[i])
visible_sjf = []
sjf_index = 0
self.task_queue.sort(key=lambda task: self.sjf_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_sjf.append(self.task_queue[i])
visible_small = []
small_index = 0
self.task_queue.sort(key=lambda task: self.smallest_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_small.append(self.task_queue[i])
visible_random = []
random_index = 0
shuffled = list(self.task_queue)
shuffle(shuffled)
for i in range(0, MAX_QUEUE_SIZE):
visible_random.append(shuffled[i])
index = 0
while index < MAX_QUEUE_SIZE:
f1_task = visible_f1[f1_index]
f1_index += 1
f2_task = visible_f2[f2_index]
f2_index += 1
sjf_task = visible_sjf[sjf_index]
sjf_index += 1
small_task = visible_small[small_index]
small_index += 1
random_task = visible_sjf[random_index]
random_index += 1
if (not sjf_task in self.visible_tasks) and index < MAX_QUEUE_SIZE:
self.visible_tasks.append(sjf_task)
index += 1
if (not small_task in self.visible_tasks) and index < MAX_QUEUE_SIZE:
self.visible_tasks.append(small_task)
index += 1
if (not random_task in self.visible_tasks) and index < MAX_QUEUE_SIZE:
self.visible_tasks.append(random_task)
index += 1
self.pairs = []
add_skip = False
for i in range(0, MAX_QUEUE_SIZE):
if i < len(self.visible_tasks) and i < (MAX_QUEUE_SIZE):
task = self.visible_tasks[i]
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
wait_time = self.current_timestamp - submit_time
normalized_wait_time = min(float(wait_time) / float(MAX_WAIT_TIME), 1.0 - 1e-5)
normalized_run_time = min(float(request_time) / float(self.loads.max_exec_time), 1.0 - 1e-5)
normalized_request_nodes = min(float(request_processors) / float(self.loads.max_procs), 1.0 - 1e-5)
if self.cluster.can_allocated(task):
can_schedule_now = 1.0 - 1e-5
else:
can_schedule_now = 1e-5
self.pairs.append(
[task, normalized_wait_time, normalized_run_time, normalized_request_nodes, can_schedule_now])
else:
self.pairs.append([None, 0, 1, 1, 0])
for i in range(0, MAX_QUEUE_SIZE):
vector[i * TASK_FEATURES:(i + 1) * TASK_FEATURES] = self.pairs[i][1:]
return vector
def moveforward_for_resources_backfill(self, task):
assert not self.cluster.can_allocated(task)
earliest_start_time = self.current_timestamp
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.request_time))
free_processors = self.cluster.free_node * self.cluster.num_procs_per_node
for running_task in self.running_tasks:
free_processors += len(running_task.allocated_machines) * self.cluster.num_procs_per_node
earliest_start_time = (running_task.scheduled_time + running_task.request_time)
if free_processors >= task.request_number_of_processors:
break
while not self.cluster.can_allocated(task):
self.task_queue.sort(key=lambda _j: self.fcfs_score(_j))
task_queue_iter_copy = list(self.task_queue)
for _j in task_queue_iter_copy:
if self.cluster.can_allocated(_j) and (self.current_timestamp + _j.request_time) < earliest_start_time:
assert _j.scheduled_time == -1
_j.scheduled_time = self.current_timestamp
_j.allocated_machines = self.cluster.allocate(_j.task_id, _j.request_number_of_processors)
self.running_tasks.append(_j)
score = (self.task_score(_j) / self.num_task_in_batch)
self.scheduled_rl[_j.task_id] = score
self.task_queue.remove(_j)
assert self.running_tasks
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.run_time))
next_resource_release_time = (self.running_tasks[0].scheduled_time + self.running_tasks[0].run_time)
next_resource_release_machines = self.running_tasks[0].allocated_machines
if self.next_arriving_task_idx < self.last_task_in_batch \
and self.loads[self.next_arriving_task_idx].submit_time <= next_resource_release_time:
self.current_timestamp = max(self.current_timestamp, self.loads[self.next_arriving_task_idx].submit_time)
self.task_queue.append(self.loads[self.next_arriving_task_idx])
self.next_arriving_task_idx += 1
else:
self.current_timestamp = max(self.current_timestamp, next_resource_release_time)
self.cluster.release(next_resource_release_machines)
self.running_tasks.pop(0)
def skip_for_resources(self):
assert self.running_tasks
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.run_time))
next_resource_release_time = (self.running_tasks[0].scheduled_time + self.running_tasks[0].run_time)
next_resource_release_machines = self.running_tasks[0].allocated_machines
if self.next_arriving_task_idx < self.last_task_in_batch and self.loads[
self.next_arriving_task_idx].submit_time <= next_resource_release_time:
self.current_timestamp = max(self.current_timestamp, self.loads[self.next_arriving_task_idx].submit_time)
self.task_queue.append(self.loads[self.next_arriving_task_idx])
self.next_arriving_task_idx += 1
else:
self.current_timestamp = max(self.current_timestamp, next_resource_release_time)
self.cluster.release(next_resource_release_machines)
self.running_tasks.pop(0)
return False
def moveforward_for_task(self):
if self.task_queue:
return True
if self.next_arriving_task_idx >= self.last_task_in_batch:
assert not self.task_queue
return False
while not self.task_queue:
if not self.running_tasks:
next_resource_release_time = sys.maxsize
next_resource_release_machines = []
else:
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.run_time))
next_resource_release_time = (self.running_tasks[0].scheduled_time + self.running_tasks[0].run_time)
next_resource_release_machines = self.running_tasks[0].allocated_machines
if self.loads[self.next_arriving_task_idx].submit_time <= next_resource_release_time:
self.current_timestamp = max(self.current_timestamp, self.loads[self.next_arriving_task_idx].submit_time)
self.task_queue.append(self.loads[self.next_arriving_task_idx])
self.next_arriving_task_idx += 1
return True
else:
self.current_timestamp = max(self.current_timestamp, next_resource_release_time)
self.cluster.release(next_resource_release_machines)
self.running_tasks.pop(0)
def task_score(self, task_for_scheduling):
COST = tf.random.normal(task_for_scheduling.run_time)
#COST = math.log(task_for_scheduling.run_time)
_tmp = COST * max(1.0, (float(
task_for_scheduling.scheduled_time - task_for_scheduling.submit_time + task_for_scheduling.run_time)
/
max(task_for_scheduling.run_time, 10)))
return _tmp
def has_only_one_task(self):
if len(self.task_queue) == 1:
return True
else:
return False
def skip_schedule(self):
next_resource_release_time = sys.maxsize
next_resource_release_machines = []
if self.running_tasks:
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.run_time))
next_resource_release_time = (self.running_tasks[0].scheduled_time + self.running_tasks[0].run_time)
next_resource_release_machines = self.running_tasks[0].allocated_machines
if self.next_arriving_task_idx >= self.last_task_in_batch and not self.running_tasks:
if not self.pivot_task:
self.pivot_task = True
return False, 0
else:
return False, 0
if self.next_arriving_task_idx < self.last_task_in_batch and self.loads[
self.next_arriving_task_idx].submit_time <= next_resource_release_time:
self.current_timestamp = max(self.current_timestamp, self.loads[self.next_arriving_task_idx].submit_time)
self.task_queue.append(self.loads[self.next_arriving_task_idx])
self.next_arriving_task_idx += 1
else:
self.current_timestamp = max(self.current_timestamp, next_resource_release_time)
self.cluster.release(next_resource_release_machines)
self.running_tasks.pop(0)
return False, 0
def schedule(self, task_for_scheduling):
if not self.cluster.can_allocated(task_for_scheduling):
self.moveforward_for_resources_backfill(task_for_scheduling)
assert task_for_scheduling.scheduled_time == -1
task_for_scheduling.scheduled_time = self.current_timestamp
task_for_scheduling.allocated_machines = self.cluster.allocate(task_for_scheduling.task_id,
task_for_scheduling.request_number_of_processors)
self.running_tasks.append(task_for_scheduling)
score = (self.task_score(task_for_scheduling) / self.num_task_in_batch)
self.scheduled_rl[task_for_scheduling.task_id] = score
self.task_queue.remove(task_for_scheduling)
not_empty = self.moveforward_for_task()
if not_empty:
return False
else:
return True
def valid(self, a):
action = a[0]
return self.pairs[action][0]
def step(self, a):
task_for_scheduling = self.pairs[a][0]
if not task_for_scheduling:
done, _ = self.skip_schedule()
else:
task_for_scheduling = self.pairs[a][0]
done = self.schedule(task_for_scheduling)
if not done:
obs = self.build_observation()
return [obs, 0, False, 0]
else:
rl_total = sum(self.scheduled_rl.values())
best_total = min(self.scheduled_scores)
rwd2 = (best_total - rl_total)
rwd = -rl_total
return [None, rwd, True, rwd2]
def step_for_test(self, a):
task_for_scheduling = self.pairs[a][0]
if not task_for_scheduling:
done, _ = self.skip_schedule()
else:
task_for_scheduling = self.pairs[a][0]
done = self.schedule(task_for_scheduling)
if not done:
obs = self.build_observation()
return [obs, 0, False, None]
else:
rl_total = sum(self.scheduled_rl.values())
return [None, rl_total, True, None]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--workload', type=str, default='./Dataset/synthetic_small.swf')
args = parser.parse_args()
current_dir = os.getcwd()
workload_file = os.path.join(current_dir, args.workload)
env = HPC_Environment()
env.my_init(workload_file=workload_file, sched_file=workload_file)
env.seed(0)
for _ in range(100):
_, r = env.reset(), 0
while True:
_, r, d, _ = env.step(0)
if d:
print("HPC Reward:", r)
break
| StarcoderdataPython |
6626642 | #!/usr/bin/env python
"""
update compile_commands.json file used in unit test framework
"""
import argparse
import os
import sys
import re
import json
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'json_file',
type=str,
nargs='?',
default='compile_commands.json',
help='compile_commands.json file to udpate')
args = parser.parse_args()
json_file = args.json_file
json_file = os.path.abspath(json_file)
if not os.path.exists(json_file):
sys.exit('compile_commands.json file does not exist!')
compdbs = None
with open(json_file, "r") as file:
compdbs = json.load(file)
cwd = os.getcwd()
for compdb in compdbs:
# update directory
compdb['directory'] = cwd;
# write compdbs back to compile_commands.json
with open(json_file, "w") as file:
file.write(json.dumps(compdbs, indent=4))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
9753598 | <filename>write_me/stp_info.py
"""Transform setup.py into a python dictionary."""
import ast
from write_me.list_files import get_setup_file
from write_me.git_description import get_git_description
from write_me.project_data import get_project_url
setup_parsed = {}
setup_keys = ['version',
'description',
'author_email',
'packages',
'author=']
def parse_authors():
"""Turn string of authors into list of authors."""
author_string = setup_parsed['author']
if ',' in author_string:
author_list = author_string.split(',')
remove_quotes = [author.replace('"', '') for author in author_list]
remove_quotes = [author.replace("'", "") for author in author_list]
strip_white_space = [author.strip() for author in remove_quotes]
return strip_white_space
author_string = author_string.replace("'", "")
author_string = author_string.replace('"', '')
author_string = author_string.strip()
return [author_string]
def parse_setup_py():
"""Convert needed info from setup.py into dict."""
project_dict = get_project_url()
setup_files = get_setup_file()
if not setup_files:
setup_parsed['version'] = "YOUR VERSION HERE"
setup_parsed['description'] = get_git_description()
setup_parsed['author_email'] = "YOUR EMAIL HERE"
setup_parsed['packages'] = "YOUR PACKAGES HERE"
setup_parsed['author'] = [project_dict['project_user']]
return setup_parsed
with open(setup_files[0], 'r') as sf:
create_list = []
appending = False
for line in sf:
line = line.strip()
line = line.rstrip(',')
if not appending:
for key in setup_keys:
if line.startswith(key):
try:
k, v = line.split('=')
if v.startswith('['):
if v.endswith(']'):
v = ast.literal_eval(v)
setup_parsed[k] = v
continue
else:
appending = True
v = v.lstrip('[')
create_list.append(v.strip("'"))
continue
else:
setup_parsed[k] = v.strip("'")
continue
except:
setup_parsed[key] = "NO INFO FOUND"
continue
else:
continue
else:
if line.endswith(']'):
appending = False
line = line.rstrip(']')
create_list.append(line.strip("'"))
if key == "author=":
key = key.replace("=", "")
setup_parsed[key] = create_list
else:
create_list.append(line.strip("'"))
if 'packages' in setup_parsed:
if setup_parsed['packages'] == 'find_packages()':
setup_parsed['packages'] = ''
if 'author' in setup_parsed:
if isinstance(setup_parsed['author'], str):
setup_parsed['author'] = parse_authors()
if 'author' not in setup_parsed:
# get from author from setup_data dict instead.
setup_parsed['author'] = [project_dict['project_user']]
if 'author_email' not in setup_parsed:
setup_parsed['author_email'] = "YOUR EMAIL HERE"
if 'version' not in setup_parsed:
setup_parsed['version'] = "YOUR VERSION HERE"
if 'description' not in setup_parsed:
setup_parsed['description'] = get_git_description()
if 'packages' not in setup_parsed:
setup_parsed['packages'] = "YOUR PACKAGES HERE"
return setup_parsed
if __name__ == '__main__': # pragma no cover
print(parse_setup_py())
| StarcoderdataPython |
1637727 | <reponame>TuongL94/MasterThesis
# -*- coding: utf-8 -*-
"""
Created on Thur Feb 15 11:22:22 2018
@author: <NAME>
"""
import single_nn_model_mnist as sm
import numpy as np
import tensorflow as tf
import os
import utilities as util
def batch_mnist(batch_size, counter, train_labels, train_data):
b_data = train_data[counter:counter+batch_size,:,:,:]
b_labels = train_labels[counter:counter+batch_size]
return b_data,b_labels
def main(unused_argv):
# Load mnist training and eval data and perform necessary data reshape
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
mnist_train_data = util.reshape_grayscale_data(mnist.train.images) # Returns np.array
mnist_train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
output_dir = "/tmp/single_mnist_model/" # directory where the model will be saved
nbr_of_training_images = 55000 # number of images to use from the training data set
# parameters for training
batch_size = 100
train_iter = 20000
learning_rate = 0.001
momentum = 0.99
image_dims = np.shape(mnist_train_data)
placeholder_dims = [batch_size, image_dims[1], image_dims[2], image_dims[3]]
# parameters for evaluation
batch_size_test = 10000
tf.reset_default_graph()
# if models exists use the existing one otherwise create a new one
if not os.path.exists(output_dir + ".meta"):
print("No previous model exists, creating a new one.")
is_model_new = True
# create placeholders
data,label,test_data = sm.placeholder_inputs(placeholder_dims,batch_size_test)
train_output = sm.inference(data)
test_output = sm.inference(test_data)
onehot_labels = tf.one_hot(indices = tf.cast(label, tf.int32), depth = 10)
onehot_labels = tf.squeeze(onehot_labels)
loss = tf.losses.softmax_cross_entropy(onehot_labels = onehot_labels, logits = train_output)
tf.add_to_collection("loss",loss)
tf.add_to_collection("train_output",train_output)
tf.add_to_collection("test_output",test_output)
saver = tf.train.Saver()
else:
print("Using existing model in the directory " + output_dir)
is_model_new = False
saver = tf.train.import_meta_graph(output_dir + ".meta")
g = tf.get_default_graph()
data = g.get_tensor_by_name("data:0")
label = g.get_tensor_by_name("label:0")
loss = tf.get_collection("loss")[0]
train_output = tf.get_collection("train_output")[0]
with tf.Session() as sess:
if is_model_new:
train_op = sm.training(loss, learning_rate, momentum)
sess.run(tf.global_variables_initializer()) # initialize all trainable parameters
tf.add_to_collection("train_op",train_op)
else:
saver.restore(sess, tf.train.latest_checkpoint(output_dir))
train_op = tf.get_collection("train_op")[0]
graph = tf.get_default_graph()
conv1_layer = graph.get_tensor_by_name("conv_layer_1/kernel:0")
nbr_of_filters_conv1 = sess.run(tf.shape(conv1_layer)[-1])
conv2_layer = graph.get_tensor_by_name("conv_layer_2/kernel:0")
hist_conv1 = tf.summary.histogram("hist_conv1", conv1_layer)
hist_conv2 = tf.summary.histogram("hist_conv2", conv2_layer)
conv1_layer = tf.transpose(conv1_layer, perm = [3,0,1,2])
filter1 = tf.summary.image('Filter_1', conv1_layer, max_outputs=nbr_of_filters_conv1)
conv1_layer = tf.transpose(conv1_layer, perm = [1,2,3,0])
# conv2_layer = tf.transpose(conv2_layer, perm = [3,0,1,2])
# filter2 = tf.summary.image('Filter_2', conv2_layer, max_outputs=32)
bias_conv1 = graph.get_tensor_by_name("conv_layer_1/bias:0")
hist_bias1 = tf.summary.histogram("hist_bias1", bias_conv1)
bias_conv2 = graph.get_tensor_by_name("conv_layer_2/bias:0")
hist_bias2 = tf.summary.histogram("hist_bias2", bias_conv2)
summary_op = tf.summary.scalar('training_loss', loss)
x_image = tf.summary.image('input', data)
summary_op = tf.summary.merge([summary_op, x_image, filter1, hist_conv1, hist_conv2, hist_bias1, hist_bias2])
# Summary setup
writer = tf.summary.FileWriter(output_dir + "/summary", graph=tf.get_default_graph())
counter = 0
for i in range(1,train_iter + 1):
b_data, b_labels = batch_mnist(batch_size, counter, mnist_train_labels, mnist_train_data)
_,loss_value,train_o,summary = sess.run([train_op, loss, train_output,summary_op],feed_dict={data:b_data, label:b_labels})
if i % 100 == 0:
print("Iteration %d: loss = %.5f" % (i, loss_value))
writer.add_summary(summary, i)
counter = (counter + batch_size) % nbr_of_training_images
save_path = tf.train.Saver().save(sess,output_dir)
print("Trained model saved in path: %s" % save_path)
if __name__ == "__main__":
tf.app.run()
| StarcoderdataPython |
5010265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from zabbix_api import ZabbixAPI
zapi = ZabbixAPI(server="http://192.168.25.3")
zapi.login("Admin", "zabbix")
history = zapi.history.get({
"itemids": [
29262
],
"history": 0,
"output": "extend",
"time_from": "1575216457",
"time_till": "1575216457"
})
for x in history:
print x["clock"], "- ", x["value"]
| StarcoderdataPython |
9791018 | <filename>atcoder/abc195/d.py
#☆𝒐𝒎𝒂𝒋𝒊𝒏𝒂𝒊☆#
import sys
import math
import itertools
from functools import lru_cache
from collections import deque
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
'''''✂'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
n,m,q=map(int,input().split())
item=[]
for i in range(n):
item.append(tuple(map(int,input().split())))
item=sorted(item, key=lambda x:(-x[1],-x[0]))
x=list(map(int,input().split()))
for _ in range(q):
ans=0
a,b=map(int,input().split())
ub=sorted(x[:a-1]+x[b:])
used=[False for i in range(n)]
for i in range(len(ub)):
for j in range(n):
if item[j][0]<=ub[i] and not used[j]:
ans+=item[j][1]
used[j]=True
break
print(ans)
| StarcoderdataPython |
287309 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
import midisw.gtk3
##########################################################################
class Layer(Gtk.VBox):
PIANO_WIDTH = 480
PIANO_HEIGHT = 32
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.portinfof = Gtk.HBox()
self.pack_start(self.portinfof, False, False, 0)
# dismissbutton
self.dismiss_button = Gtk.Button()
self.dismiss_button.set_label("X")
self.dismiss_button.connect("clicked", self.on_dismiss_button_clicked)
self.portinfof.pack_start(self.dismiss_button, False, False, 0)
# port selection/portprofile information
self.portsel = midisw.gtk3.PortSelector(port_type="jack/midi/input")
self.portinfof.pack_start(self.portsel, True, True, 0)
self.portlabel = Gtk.Label()
self.portlabel.set_text("port-information")
self.portinfof.pack_start(self.portlabel, True, True, 0)
# channel
self.chf = Gtk.VBox()
self.chlabel = Gtk.Label()
self.chlabel.set_text("channel")
self.chadj = Gtk.Adjustment(value=0, lower=0, upper=15, step_increment=1)
self.ch = Gtk.SpinButton()
self.ch.set_adjustment(self.chadj)
self.ch.set_wrap(True)
self.ch.set_size_request(32,16)
self.chf.add(self.chlabel)
self.chf.add(self.ch)
self.portinfof.pack_start(self.chf, False, False, 2)
#
# port parameters
#
params={"volume":96, "pan": 64,
"cutoff": 64, "resonance": 32,
"reverb": 64, "chorus": 64}
self.portparamf = Gtk.HBox()
self.pack_start(self.portparamf, False, False, 0)
self.param_widget={}
for k in params.keys():
self.param_widget[k] = self.volume = midisw.gtk3.DialWithSpin(label=k, initial_value=params[k])
self.param_widget[k].set_size_request(64,128)
self.portparamf.pack_start(self.param_widget[k], True, True, 0)
# tone-select
#
# range selection
#
self.piano = midisw.gtk3.PianoNoteRangeSelector()
self.piano.set_size_request(self.PIANO_WIDTH,self.PIANO_HEIGHT)
self.pack_start(self.piano, False,False,0)
def on_dismiss_button_clicked(self, widget):
self.destroy()
##########################################################################
class LayerStack(Gtk.VBox):
WIDTH = 640
HEIGHT = 768
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add_button = Gtk.Button()
self.add_button.set_label("Create New Layer")
self.add_button.connect("clicked", self.on_add_button_clicked)
self.pack_start(self.add_button, False, True, 0)
self.scrollw = Gtk.ScrolledWindow()
self.pack_start(self.scrollw, True, True, 0)
self.layervbox = Gtk.VBox()
self.scrollw.add(self.layervbox)
self.set_size_request(self.WIDTH, self.HEIGHT)
def on_add_button_clicked(self, widget):
new_layer = Layer()
self.layervbox.pack_start(new_layer, False, False, 0)
self.show_all()
##########################################################################
if __name__=="__main__":
top = Gtk.Window()
layer_stack = LayerStack()
top.add(layer_stack)
top.connect("destroy", Gtk.main_quit)
top.show_all()
Gtk.main()
pass
| StarcoderdataPython |
1651374 | <filename>src/alfred3/exceptions.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Das Modul definiert alle Exceptions des Frameworks
"""
class AlfredError(Exception):
u"""
Jede Exception des Frameworks ist von dieser Klasse abgeleitet.
"""
pass
class ValidationError(AlfredError): pass
class AbortMove(AlfredError): pass
class MoveError(AlfredError):
pass
class SavingAgentRunException(AlfredError):
pass
class SavingAgentException(AlfredError):
pass
class SessionTimeout(AlfredError):
pass
class AllSlotsFull(AlfredError):
pass
class AllConditionsFull(AllSlotsFull):
pass
class SlotInconsistency(AlfredError):
pass
class ConditionInconsistency(SlotInconsistency):
pass
| StarcoderdataPython |
11205718 | <reponame>jason-neal/Starfish
#!/usr/bin/env python
#Use flot JavaScript plotting library to visualize a single order plot
import numpy as np
import json
import jinja2
def np_to_json(arr0, arr1):
'''
Take two numpy arrays, as in a plot, and return the JSON serialization for flot.
'''
data = np.array([arr0, arr1]).T #Transpose the arrays
listdata = data.tolist() #Convert numpy array to a list
return json.dumps(listdata) #Serialize to JSON
def order_json(wl, fl, sigma, mask, flm, cheb):
'''
Given the quantities from a fit, create the JSON necessary for flot.
'''
residuals = fl - flm
# create the three lines necessary for the plot, in JSON
# data = [[wl0, fl0], [wl1, fl1], ...]
# model = [wl0, flm0], [wl1, flm1], ...]
# residuals = [[wl0, residuals0], [wl1, residuals1], ...]
plot_data = {"data":np_to_json(wl[mask], fl[mask]), "model":np_to_json(wl, flm), "residuals": np_to_json(wl[mask],
residuals[mask]), "sigma": np_to_json(wl[mask], sigma[mask]), "cheb": np_to_json(wl, cheb) }
return plot_data
def render_template(base, plot_data):
templateLoader = jinja2.FileSystemLoader(searchpath=base + "templates")
# An environment provides the data necessary to read and
# parse our templates. We pass in the loader object here.
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template('flot_plot.jinja')
templateVars = {"base": base}
templateVars.update(plot_data)
#Render plot using plot_data
outputText = template.render(templateVars)
f = open('index_flot.html', 'w')
f.write(outputText)
f.close()
def main():
#Use argparse to determine if we've specified a config file
import argparse
parser = argparse.ArgumentParser(prog="flot_model.py", description="Plot the model and residuals using flot.")
parser.add_argument("json", help="*.json file describing the model.")
parser.add_argument("params", help="*.yaml file specifying run parameters.")
# parser.add_argument("-o", "--output", help="*.html file for output")
args = parser.parse_args()
import json
import yaml
if args.json: #
#assert that we actually specified a *.json file
if ".json" not in args.json:
import sys
sys.exit("Must specify a *.json file.")
if args.params: #
#assert that we actually specified a *.yaml file
if ".yaml" in args.params:
yaml_file = args.params
f = open(args.params)
config = yaml.load(f)
f.close()
else:
import sys
sys.exit("Must specify a *.yaml file.")
yaml_file = args.params
from StellarSpectra.model import Model
from StellarSpectra.spectrum import DataSpectrum
from StellarSpectra.grid_tools import TRES, HDF5Interface
#Figure out what the relative path is to base
import StellarSpectra
base = StellarSpectra.__file__[:-26]
myDataSpectrum = DataSpectrum.open(base + config['data'], orders=config['orders'])
myInstrument = TRES()
myHDF5Interface = HDF5Interface(base + config['HDF5_path'])
myModel = Model.from_json(args.json, myDataSpectrum, myInstrument, myHDF5Interface)
for model in myModel.OrderModels:
#If an order has regions, read these out from model_final.json
region_dict = model.get_regions_dict()
print("Region dict", region_dict)
#loop through these to determine the wavelength of each
wl_regions = [value["mu"] for value in region_dict.values()]
#Make vertical markings at the location of the wl_regions.
#Get the data, sigmas, and mask
wl, fl, sigma, mask = model.get_data()
#Get the model flux
flm = model.get_spectrum()
#Get chebyshev
cheb = model.get_Cheb()
name = "Order {}".format(model.order)
plot_data = order_json(wl, fl, sigma, mask, flm, cheb)
plot_data.update({"wl_regions":wl_regions})
print(plot_data['wl_regions'])
render_template(base, plot_data)
#Get the covariance matrix
# S = model.get_Cov()
if __name__=="__main__":
main()
| StarcoderdataPython |
256465 | import os
from starlette.background import BackgroundTask
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.routing import Route
from gsheet_service import (
service,
sheet_service,
)
BASE_DIR = os.path.dirname(os.path.abspath(__name__))
service_api = sheet_service.service_api
async def bg_task():
print("Clearing db cache")
await sheet_service.clear_database()
print("cleared db cache")
async def fetch_groups(request: Request):
data = await request.json()
result = await sheet_service.fetch_groups(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def read_row(request: Request):
data = await request.json()
result: service.Result = await sheet_service.read_row(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def read_sheetnames(request: Request):
data = await request.json()
result: service.Result = await sheet_service.read_sheetnames(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def create_new_sheet(request: Request):
data = await request.json()
result: service.Result = await sheet_service.new_sheet(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
task = BackgroundTask(bg_task)
return JSONResponse({"status": True, "data": result.data}, background=task)
async def edit_existing_sheet(request: Request):
data = await request.json()
result: service.Result = await sheet_service.edit_sheet(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
task = BackgroundTask(bg_task)
return JSONResponse({"status": True, "data": result.data}, background=task)
# async def secrets(request: Request):
# return JSONResponse(service.config)
async def update_existing(request: Request):
data = await request.json()
result: service.Result = await sheet_service.update_existing(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
async def remove_row():
print("Clearing row cache")
await sheet_service.delete_key(
link=data["link"], sheet=data["sheet"], key=data["key"], value=data["value"]
)
print("cleared row cache")
task = BackgroundTask(remove_row)
return JSONResponse({"status": True, "data": result.data}, background=task)
async def read_last(request: Request):
data = await request.json()
result: service.Result = await sheet_service.read_last(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def add_new(request: Request):
data = await request.json()
result: service.Result = await sheet_service.add_new(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
task = BackgroundTask(bg_task)
return JSONResponse({"status": True, "data": result.data}, background=task)
async def clear_all_rows(request: Request):
data = await request.json()
result: service.Result = await sheet_service.clear_all_rows(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
task = BackgroundTask(bg_task)
return JSONResponse({"status": True, "data": result.data}, background=task)
async def add_multiple_rows(request: Request):
data = await request.json()
result: service.Result = await sheet_service.add_multiple_rows(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
task = BackgroundTask(bg_task)
return JSONResponse({"status": True, "data": result.data}, background=task)
async def read_new_row(request: Request):
data = await request.json()
result: service.Result = await sheet_service.read_new_row(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def read_referenced_cell(request: Request):
data = await request.json()
result: service.Result = await sheet_service.read_referenced_cell(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def read_new_row(request: Request):
data = await request.json()
result: service.Result = await sheet_service.read_new_row(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def clear_db(request: Request):
result = await sheet_service.clear_database()
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def delete_key(request: Request):
data = await request.json()
result: service.Result = await sheet_service.delete_key(**data)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
routes = [
Route("/read-single", read_row, methods=["POST"]),
Route("/read-new-single", read_new_row, methods=["POST"]),
Route("/read-referenced-cells", read_referenced_cell, methods=["POST"]),
Route("/read-sheetnames", read_sheetnames, methods=["POST"]),
Route("/update", update_existing, methods=["POST"]),
Route("/add", add_new, methods=["POST"]),
Route("/add-sheet", create_new_sheet, methods=["POST"]),
Route("/edit-sheet", edit_existing_sheet, methods=["POST"]),
Route("/read-last", read_last, methods=["POST"]),
Route("/fetch-groups", fetch_groups, methods=["POST"]),
Route("/clear-db", clear_db, methods=["GET"]),
Route("/delete-record", delete_key, methods=["POST"]),
Route("/clear-all-rows", clear_all_rows, methods=["POST"]),
Route("/add-multiple-rows", add_multiple_rows, methods=["POST"]),
]
async def on_startup_task():
if service_api:
await service_api.db_action("connect")
async def on_shutdown_task():
if service_api:
await service_api.db_action("disconnect")
on_startup = [on_startup_task]
on_shutdown = [on_shutdown_task] | StarcoderdataPython |
3405627 | # necessary imports
import socket
import logging
import threading
import time
import RPi.GPIO as GPIO
import camera_stream
# setup the pins to use the BCM mode and disable GPIO warnings
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
message = ""
distance = 0.0
speed = 0
ultra_trig = 22 # ultrasonic trigger pin number
ultra_echo = 23 # ultrasonic echo pin number
# Drive pin setups
GPIO.setup(18, GPIO.OUT) # setup the forward drive motor (pin 18)
fpwm = GPIO.PWM(18, 50)
fpwm.start(0)
GPIO.setup(17, GPIO.OUT) # setup the backward drive motor (pin 17)
bpwm = GPIO.PWM(17, 50)
bpwm.start(0)
# Servo setup
GPIO.setup(24, GPIO.OUT) # setup the servo motor (pin 24)
servo = GPIO.PWM(24, 50)
angle = 150
servo.start(angle/18 + 2)
# main thread (interpreting messages, driving, steering)
def thread_function(name):
global message, distance, speed, angle
time.sleep(2)
while True:
print(distance)
if message:
if message in ('w', 'W') and speed < 100:
speed += 10
elif message in ('s', 'S') and speed > -100:
speed -= 10
elif message in ('d', 'D') and angle > 125:
angle -= 2.5
elif message in ('a', 'A') and angle < 175:
angle += 2.5
drive(speed)
steer(angle)
message = ""
time.sleep(0.1)
# ultrasonic sensor thread
def ultrasonic_distance(name):
global ultra_trig, ultra_echo, distance
GPIO.setup(ultra_trig, GPIO.OUT)
GPIO.setup(ultra_echo, GPIO.IN)
GPIO.output(ultra_trig, 0)
MAX_PULSE = 23200.0/58.0
while True:
GPIO.output(ultra_trig, 1)
time.sleep(0.001*0.01)
GPIO.output(ultra_trig, 0)
t1 = 0
t2 = 0
while GPIO.input(ultra_echo) == 0:
pass
t1 = int(round(time.time() * 1000*1000))
while GPIO.input(ultra_echo) == 1:
pass
t2 = int(round(time.time() * 1000*1000))
width = ((t2 - t1)*343/2)/(1000*1000)
distance = width
time.sleep(0.06)
# UPD thread (message handling)
def UDP_Thread(name):
global message
UDP_IP = socket.gethostbyname('raspberrypi.local')
UDP_PORT = 5005
print("IP: {}:{}".format(UDP_IP, UDP_PORT))
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
message = data.decode('utf-8')
print("received message: %s" % data)
# drive the robot with a given speed
def drive(speed):
global distance, fpwm, bpwm
print('driving with speed {} and distance {}'.format(speed, distance))
if distance < 0.2 and speed > 0:
speed = 0
elif distance < 0.4:
speed /= 2
if speed > 0:
fpwm.ChangeDutyCycle(speed)
bpwm.ChangeDutyCycle(0)
elif speed < 0:
fpwm.ChangeDutyCycle(0)
bpwm.ChangeDutyCycle(abs(speed))
else:
fpwm.ChangeDutyCycle(0)
bpwm.ChangeDutyCycle(0)
#print('not driving!')
# steer the robot with a given angle
def steer(angle):
global servo
if angle < 125:
angle = 125
elif angle > 175:
angle = 175
duty = angle / 18 + 2
servo.ChangeDutyCycle(duty)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
x = threading.Thread(target=thread_function, args=(1,)) # start main thread
x.start()
comms = threading.Thread(target=UDP_Thread, args=(2,)) # start communication thread
comms.start()
ultra = threading.Thread(target=ultrasonic_distance, args=(3,)) # start ultrasonic thread
ultra.start()
cam = threading.Thread(target=camera_stream.Camera.camera_server, args=(4,)) # start camera thread
cam.start()
print('threads started')
| StarcoderdataPython |
12858901 | import base64
import json
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_NONE,
SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD)
from twisted.web.http_headers import Headers
from twisted.internet.defer import inlineCallbacks, fail, succeed
from vxsandbox.resources.http import (
HttpClientContextFactory, HttpClientPolicyForHTTPS, make_context_factory,
HttpClientResource)
from vxsandbox.resources.tests.utils import ResourceTestCaseBase
class DummyResponse(object):
def __init__(self):
self.headers = Headers({})
class DummyHTTPClient(object):
def __init__(self):
self._next_http_request_result = None
self.http_requests = []
def set_agent(self, agent):
self.agent = agent
def get_context_factory(self):
# We need to dig around inside our Agent to find the context factory.
# Since this involves private attributes that have changed a few times
# recently, we need to try various options.
if hasattr(self.agent, "_contextFactory"):
# For Twisted 13.x
return self.agent._contextFactory
elif hasattr(self.agent, "_policyForHTTPS"):
# For Twisted 14.x
return self.agent._policyForHTTPS
elif hasattr(self.agent, "_endpointFactory"):
# For Twisted 15.0.0 (and possibly newer)
return self.agent._endpointFactory._policyForHTTPS
else:
raise NotImplementedError(
"I can't find the context factory on this Agent. This seems"
" to change every few versions of Twisted.")
def fail_next(self, error):
self._next_http_request_result = fail(error)
def succeed_next(self, body, code=200, headers={}):
default_headers = {
'Content-Length': str(len(body)),
}
default_headers.update(headers)
response = DummyResponse()
response.code = code
for header, value in default_headers.items():
response.headers.addRawHeader(header, value)
response.content = lambda: succeed(body)
self._next_http_request_result = succeed(response)
def request(self, *args, **kw):
self.http_requests.append((args, kw))
return self._next_http_request_result
class TestHttpClientResource(ResourceTestCaseBase):
resource_cls = HttpClientResource
@inlineCallbacks
def setUp(self):
super(TestHttpClientResource, self).setUp()
yield self.create_resource({})
self.dummy_client = DummyHTTPClient()
self.patch(self.resource_cls,
'http_client_class', self.get_dummy_client)
def get_dummy_client(self, agent):
self.dummy_client.set_agent(agent)
return self.dummy_client
def http_request_fail(self, error):
self.dummy_client.fail_next(error)
def http_request_succeed(self, body, code=200, headers={}):
self.dummy_client.succeed_next(body, code, headers)
def assert_not_unicode(self, arg):
self.assertFalse(isinstance(arg, unicode))
def get_context_factory(self):
return self.dummy_client.get_context_factory()
def get_context(self, context_factory=None):
if context_factory is None:
context_factory = self.get_context_factory()
if hasattr(context_factory, 'creatorForNetloc'):
# This context_factory is a new-style IPolicyForHTTPS
# implementation, so we need to get a context from through its
# client connection creator. The creator could either be a wrapper
# around a ClientContextFactory (in which case we treat it like
# one) or a ClientTLSOptions object (which means we have to grab
# the context from a private attribute).
creator = context_factory.creatorForNetloc('example.com', 80)
if hasattr(creator, 'getContext'):
return creator.getContext()
else:
return creator._ctx
else:
# This context_factory is an old-style WebClientContextFactory and
# will build us a context object if we ask nicely.
return context_factory.getContext('example.com', 80)
def assert_http_request(self, url, method='GET', headers=None, data=None,
timeout=None, files=None):
timeout = (timeout if timeout is not None
else self.resource.timeout)
args = (method, url,)
kw = dict(headers=headers, data=data,
timeout=timeout, files=files)
[(actual_args, actual_kw)] = self.dummy_client.http_requests
# NOTE: Files are handed over to treq as file pointer-ish things
# which in our case are `StringIO` instances.
actual_kw_files = actual_kw.get('files')
if actual_kw_files is not None:
actual_kw_files = actual_kw.pop('files', None)
kw_files = kw.pop('files', {})
for name, file_data in actual_kw_files.items():
kw_file_data = kw_files[name]
file_name, content_type, sio = file_data
self.assertEqual(
(file_name, content_type, sio.getvalue()),
kw_file_data)
self.assertEqual((actual_args, actual_kw), (args, kw))
self.assert_not_unicode(actual_args[0])
self.assert_not_unicode(actual_kw.get('data'))
headers = actual_kw.get('headers')
if headers is not None:
for key, values in headers.items():
self.assert_not_unicode(key)
for value in values:
self.assert_not_unicode(value)
def test_make_context_factory_no_method_verify_none(self):
context_factory = make_context_factory(verify_options=VERIFY_NONE)
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, VERIFY_NONE)
self.assertEqual(context_factory.ssl_method, None)
self.assertEqual(
self.get_context(context_factory).get_verify_mode(), VERIFY_NONE)
def test_make_context_factory_no_method_verify_peer(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(verify_options=VERIFY_PEER)
context = self.get_context(context_factory)
self.assertEqual(context_factory.ssl_method, None)
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, VERIFY_PEER)
self.assertEqual(context.get_verify_mode(), VERIFY_PEER)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_no_method_verify_peer_or_fail(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(
verify_options=(VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT))
context = self.get_context(context_factory)
self.assertEqual(context_factory.ssl_method, None)
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(
context_factory.verify_options,
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
self.assertEqual(
context.get_verify_mode(),
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_no_method_no_verify(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory()
self.assertEqual(context_factory.ssl_method, None)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_sslv3_no_verify(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(ssl_method=SSLv3_METHOD)
self.assertEqual(context_factory.ssl_method, SSLv3_METHOD)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
@inlineCallbacks
def test_handle_get(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('get',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='GET')
@inlineCallbacks
def test_handle_post(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('post',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='POST')
@inlineCallbacks
def test_handle_patch(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('patch',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='PATCH')
@inlineCallbacks
def test_handle_head(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('head',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='HEAD')
@inlineCallbacks
def test_handle_delete(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('delete',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='DELETE')
@inlineCallbacks
def test_handle_put(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('put',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='PUT')
@inlineCallbacks
def test_failed_get(self):
self.http_request_fail(ValueError("HTTP request failed"))
reply = yield self.dispatch_command('get',
url='http://www.example.com')
self.assertFalse(reply['success'])
self.assertEqual(reply['reason'], "HTTP request failed")
self.assert_http_request('http://www.example.com', method='GET')
@inlineCallbacks
def test_null_url(self):
reply = yield self.dispatch_command('get')
self.assertFalse(reply['success'])
self.assertEqual(reply['reason'], "No URL given")
@inlineCallbacks
def test_https_request(self):
# This test's behaviour depends on the version of Twisted being used.
self.http_request_succeed("foo")
reply = yield self.dispatch_command('get',
url='https://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, None)
if HttpClientPolicyForHTTPS is None:
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
@inlineCallbacks
def test_https_request_verify_none(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com',
verify_options=['VERIFY_NONE'])
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context = self.get_context()
self.assertEqual(context.get_verify_mode(), VERIFY_NONE)
@inlineCallbacks
def test_https_request_verify_peer_or_fail(self):
# This test's behaviour depends on the version of Twisted being used.
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com',
verify_options=['VERIFY_PEER', 'VERIFY_FAIL_IF_NO_PEER_CERT'])
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context = self.get_context()
# We don't control verify mode in newer Twisted.
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
self.assertEqual(
context.get_verify_mode(),
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
@inlineCallbacks
def test_handle_post_files(self):
self.http_request_succeed('')
reply = yield self.dispatch_command(
'post', url='https://www.example.com', files={
'foo': {
'file_name': 'foo.json',
'content_type': 'application/json',
'data': base64.b64encode(json.dumps({'foo': 'bar'})),
}
})
self.assertTrue(reply['success'])
self.assert_http_request(
'https://www.example.com', method='POST', files={
'foo': ('foo.json', 'application/json',
json.dumps({'foo': 'bar'})),
})
@inlineCallbacks
def test_data_limit_exceeded_using_head_method(self):
self.http_request_succeed('', headers={
'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1),
})
reply = yield self.dispatch_command(
'head', url='https://www.example.com',)
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "")
self.assert_http_request('https://www.example.com', method='HEAD')
@inlineCallbacks
def test_data_limit_exceeded_using_header(self):
self.http_request_succeed('', headers={
'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1),
})
reply = yield self.dispatch_command(
'get', url='https://www.example.com',)
self.assertFalse(reply['success'])
self.assertEqual(
reply['reason'],
'Received %d bytes, maximum of %s bytes allowed.' % (
self.resource.DEFAULT_DATA_LIMIT + 1,
self.resource.DEFAULT_DATA_LIMIT,))
@inlineCallbacks
def test_data_limit_exceeded_inferred_from_body(self):
self.http_request_succeed('1' * (self.resource.DEFAULT_DATA_LIMIT + 1))
reply = yield self.dispatch_command(
'get', url='https://www.example.com',)
self.assertFalse(reply['success'])
self.assertEqual(
reply['reason'],
'Received %d bytes, maximum of %s bytes allowed.' % (
self.resource.DEFAULT_DATA_LIMIT + 1,
self.resource.DEFAULT_DATA_LIMIT,))
@inlineCallbacks
def test_https_request_method_default(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, None)
@inlineCallbacks
def test_https_request_method_SSLv3(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='SSLv3')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, SSLv3_METHOD)
@inlineCallbacks
def test_https_request_method_SSLv23(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='SSLv23')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, SSLv23_METHOD)
@inlineCallbacks
def test_https_request_method_TLSv1(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='TLSv1')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, TLSv1_METHOD)
| StarcoderdataPython |
38204 | import utils
import os
import json
def getjsondata(path):
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
f = open(path)
data = json.loads(f.read())
return data
def getconfig():
return getjsondata('./conf.json') | StarcoderdataPython |
11262518 | # Copyright (c) 2020 <NAME>
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import time
import json
from binascii import hexlify, unhexlify
from moneysocket.utl.third_party.lightning_payencode.lnaddr import lndecode
MSATOSHIS_PER_BTC = 100000000000
class Bolt11(object):
""" Parse a bolt 11 to a dictionary.
WARNING: this is kind of primitive and doesn't properly parse all the valid tags in
https://github.com/lightningnetwork/lightning-rfc/blob/master/11-payment-encoding.md
"""
def tags_by_name(name, tags):
return [t[1] for t in tags if t[0] == name]
def dump(bolt11):
a = lndecode(bolt11)
print(a.__dict__)
print("Signed with public key:", hexlify(a.pubkey.serialize()))
print("Currency:", a.currency)
print("Payment hash:", hexlify(a.paymenthash))
if a.amount:
print("Amount:", a.amount)
print("Timestamp: {} ({})".format(a.date, time.ctime(a.date)))
for r in Bolt11.tags_by_name('r', a.tags):
print("Route: ", end='')
for step in r:
print("{}/{}/{}/{}/{} ".format(hexlify(step[0]), hexlify(step[1]), step[2], step[3], step[4]), end='')
print('')
fallback = Bolt11.tags_by_name('f', a.tags)
if fallback:
print("Fallback:", fallback[0])
description = Bolt11.tags_by_name('d', a.tags)
if description:
print("Description:", description[0])
dhash = Bolt11.tags_by_name('h', a.tags)
if dhash:
print("Description hash:", hexlify(dhash[0]))
expiry = Bolt11.tags_by_name('x', a.tags)
if expiry:
print("Expiry (seconds):", expiry[0])
for t in [t for t in a.tags if t[0] not in 'rdfhx']:
print("UNKNOWN TAG {}: {}".format(t[0], hexlify(t[1])))
def iter_attributes(bolt11):
a = lndecode(bolt11)
yield "payee", a.pubkey.serialize().hex()
yield "currency", a.currency
yield "payment_hash", a.paymenthash.hex()
if a.amount:
msat = int(a.amount * MSATOSHIS_PER_BTC)
yield "msatoshi", msat
yield "amount_msat", "%dmsat" % msat
yield "created_at", a.date
description = Bolt11.tags_by_name('d', a.tags)
if description:
yield "description", description[0]
else:
yield "description", ""
expiry = Bolt11.tags_by_name('x', a.tags)
if expiry:
yield "expiry", expiry[0]
else:
yield "expiry", 3600 # default if not specified
def to_dict(bolt11):
return {key: value for key, value in Bolt11.iter_attributes(bolt11)}
| StarcoderdataPython |
270219 | <gh_stars>0
# Generated from .\seedot.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .seedotParser import seedotParser
else:
from seedotParser import seedotParser
# This class defines a complete generic visitor for a parse tree produced by seedotParser.
class seedotVisitor(ParseTreeVisitor):
# Visit a parse tree produced by seedotParser#bop1.
def visitBop1(self, ctx:seedotParser.Bop1Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#init.
def visitInit(self, ctx:seedotParser.InitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#bop2.
def visitBop2(self, ctx:seedotParser.Bop2Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#decl.
def visitDecl(self, ctx:seedotParser.DeclContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#splice.
def visitSplice(self, ctx:seedotParser.SpliceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#index.
def visitIndex(self, ctx:seedotParser.IndexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#sum.
def visitSum(self, ctx:seedotParser.SumContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#reshape.
def visitReshape(self, ctx:seedotParser.ReshapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#float.
def visitFloat(self, ctx:seedotParser.FloatContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#reverse.
def visitReverse(self, ctx:seedotParser.ReverseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#cond.
def visitCond(self, ctx:seedotParser.CondContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#int.
def visitInt(self, ctx:seedotParser.IntContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#transp.
def visitTransp(self, ctx:seedotParser.TranspContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#paren.
def visitParen(self, ctx:seedotParser.ParenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#func.
def visitFunc(self, ctx:seedotParser.FuncContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#loop.
def visitLoop(self, ctx:seedotParser.LoopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#uop.
def visitUop(self, ctx:seedotParser.UopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#convolution.
def visitConvolution(self, ctx:seedotParser.ConvolutionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#let.
def visitLet(self, ctx:seedotParser.LetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#id.
def visitId(self, ctx:seedotParser.IdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#funcCall.
def visitFuncCall(self, ctx:seedotParser.FuncCallContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#maxpool.
def visitMaxpool(self, ctx:seedotParser.MaxpoolContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#name.
def visitName(self, ctx:seedotParser.NameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#leftSplice.
def visitLeftSplice(self, ctx:seedotParser.LeftSpliceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#addOp.
def visitAddOp(self, ctx:seedotParser.AddOpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#binOp.
def visitBinOp(self, ctx:seedotParser.BinOpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#specialFunc.
def visitSpecialFunc(self, ctx:seedotParser.SpecialFuncContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by seedotParser#intConstList.
def visitIntConstList(self, ctx:seedotParser.IntConstListContext):
return self.visitChildren(ctx)
del seedotParser | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.