id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4844470 | import numpy as np
from scipy.signal import find_peaks
from .utils import compute_probs
from .utils import probs2cfs
def pick_arrivals(cf):
prom = cf.max()
#mad = median_abs_deviation(cf)
median = np.median(cf)
mad = np.median(abs(cf-median))
for i in range(2):
#prom /= 2
peaks, properties = find_peaks(x=cf, height=30*mad,
distance=40, prominence=prom/2)
if peaks.size > 0:
peak_prom = properties['prominences']
confidences = peak_prom / peak_prom.sum()
return peaks, confidences
return (np.nan, np.nan)
def pick_probs(waveform, fs, wl, model, transform, g=0.1, batch_size=None):
probs = compute_probs(model, transform, waveform,
shape=[3, fs * wl],
step=[1, int(g * fs)],
batch_size=batch_size)
# compute cf
#cf_p, cf_s = probs2cfs(probs)
# find prominent local peaks
peaks_p, confidences_p = pick_arrivals(probs[1])
peaks_s, confidences_s = pick_arrivals(probs[2])
pick_results = {
'p': peaks_p * g + 5,
's': peaks_s * g + 5,
'p_conf': confidences_p,
's_conf': confidences_s,
'prob_p': probs[1],
'prob_s': probs[2],
'prob_n': probs[0]
}
return pick_results
def pick(waveform, fs, wl, model, transform, g=0.1, batch_size=None):
probs = compute_probs(model, transform, waveform,
shape=[3, fs * wl],
step=[1, int(g * fs)],
batch_size=batch_size)
# compute cf
cf_p, cf_s = probs2cfs(probs)
# find prominent local peaks
peaks_p, confidences_p = pick_arrivals(cf_p)
peaks_s, confidences_s = pick_arrivals(cf_s)
pick_results = {
'p': peaks_p * g + 5,
's': peaks_s * g + 5,
'p_conf': confidences_p,
's_conf': confidences_s,
'cf_p': cf_p,
'cf_s': cf_s
}
return pick_results
| StarcoderdataPython |
3303396 | """
CISCO SAMPLE CODE LICENSE
Version 1.0
Copyright (c) 2020 Cisco and/or its affiliates
These terms govern this Cisco example or demo source code and its
associated documentation (together, the "Sample Code"). By downloading,
copying, modifying, compiling, or redistributing the Sample Code, you
accept and agree to be bound by the following terms and conditions (the
"License"). If you are accepting the License on behalf of an entity, you
represent that you have the authority to do so (either you or the entity,
"you"). Sample Code is not supported by Cisco TAC and is not tested for
quality or performance. This is your only license to the Sample Code and
all rights not expressly granted are reserved.
"""
# -*- mode: python; python-indent: 4 -*-
import ncs
from ncs.application import Service
from ncs.dp import Action
# ---------------
# ACTIONS EXAMPLE
# ---------------
class PartialSyncFrom(Action):
@Action.action
def cb_action(self, uinfo, name, kp, input, output, trans):
self.log.info('action name: ', name)
self.log.info('action input.device: ', input.device)
self.log.info('action input.xpath: ', input.xpath)
with ncs.maapi.Maapi() as m:
with ncs.maapi.Session(m, "admin", 'python', groups=['ncsadmin']):
with m.start_write_trans() as trans_write:
root = ncs.maagic.get_root(trans_write)
self.log.info("Executing a partial sync from device {}...".format(input.device))
input_params = root.devices.partial_sync_from.get_input()
input_params.path = ["/devices/device[name='" + input.device + "']/config/" + input.xpath]
partial_sync_output = root.devices.partial_sync_from(input_params)
self.log.info(
"Output of partial sync-from: {}".format(partial_sync_output.sync_result[input.device].result))
output.result = partial_sync_output.sync_result[input.device].result
# ---------------------------------------------
# COMPONENT THREAD THAT WILL BE STARTED BY NCS.
# ---------------------------------------------
class Main(ncs.application.Application):
def setup(self):
# The application class sets up logging for us. It is accessible
# through 'self.log' and is a ncs.log.Log instance.
self.log.info('Main RUNNING')
# When using actions, this is how we register them:
#
self.register_action('partial-sync-from-action', PartialSyncFrom)
# If we registered any callback(s) above, the Application class
# took care of creating a daemon (related to the service/action point).
# When this setup method is finished, all registrations are
# considered done and the application is 'started'.
def teardown(self):
# When the application is finished (which would happen if NCS went
# down, packages were reloaded or some error occurred) this teardown
# method will be called.
self.log.info('Main FINISHED')
| StarcoderdataPython |
8190191 | import pyglet
#from sketches.OrthoWindow import OrthoWindow as MainWindow
#from sketches.ChunkWindow import ChunkWindow as MainWindow
#from sketches.ProjectionWindow import ProjectionWindow as MainWindow
from sketches.WorldWindow import WorldWindow as MainWindow
#from sketches.RenderGraphTestWindow import RenderGraphTestWindow as MainWindow
# platform = pyglet.window.get_platform()
# display = platform.get_default_display()
gl_config = pyglet.gl.Config(
major_version=3,
minor_version=0,
double_buffer=True,
)
main_window = MainWindow(config=gl_config, resizable=True)
pyglet.app.run()
| StarcoderdataPython |
1883868 | # -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# (c) 1998-2022 all rights reserved
# framework
import merlin
# the GNU compiler suite
class Suite(merlin.component,
family="merlin.compilers.gnu", implements=merlin.protocols.compiler):
"""
The GNU compiler suite
"""
# end of file
| StarcoderdataPython |
3492533 | <gh_stars>0
import time
import importlib
from pip._internal import main as pipmain
import data.global_variables as global_var
# FUNCTION - Installs the given Module in Python
def install(package):
pipmain(['install', package])
# Reads Requirements from Requirements.txt
def read_requirements():
with open(global_var.requirements_path) as f:
requirements = f.read().splitlines()
# print(requirements)
return requirements
# Start
if __name__ == '__main__':
print("Script Started...")
# Start Time of Code
start_time = time.time()
read_requirements()
dependencies = read_requirements()
# Check for dependencies
try:
for module in dependencies:
module_check = importlib.util.find_spec(module) # works for python >= 3.4
if module_check is None:
install(module)
print("Dependencies resolved...")
except ImportError:
print("There was an issue in resolving dependencies!")
# Print Time taken to execute script
print("CUSTOM INFO : --- Script Execution Time: %s seconds ---" % (time.time() - start_time))
| StarcoderdataPython |
266350 | # -*- coding: utf-8 -*-
import json
import os
from urlparse import urljoin
import requests
import rethinkdb as r
from requests.auth import HTTPDigestAuth
from requests.packages.urllib3.exceptions import (InsecureRequestWarning,
SNIMissingWarning,
InsecurePlatformWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../config.json')
with open(config_file) as fr:
config = json.loads(fr.read())
def test_rethinkdb():
rdb = r.connect( host=config['rethinkdb']['host'],
port=config['rethinkdb']['port'],
db=config['rethinkdb']['db'],
timeout=3)
rdb.close()
def test_moloch():
moloch_api_auth = HTTPDigestAuth(config['moloch']['username'], config['moloch']['password'])
moloch_api_url = urljoin(config['moloch']['viewer'], 'connections.json')
try:
req = requests.get(moloch_api_url, auth=moloch_api_auth, verify=False, timeout=5)
if req.status_code != 200:
raise AssertionError
except (requests.ConnectionError, AssertionError):
raise AssertionError('Error connecting to Moloch on {0}'.format(config['moloch']['viewer']))
def test_api():
try:
req = requests.get(config['forwarder']['url'], verify=False, timeout=5)
if req.status_code != 200:
raise AssertionError
except (requests.ConnectionError, AssertionError):
raise AssertionError('Error connecting to API on {0}'.format(config['forwarder']['url']))
def test_amqp():
pass
test_rethinkdb()
test_moloch()
test_api()
test_amqp()
| StarcoderdataPython |
51279 | import gym
import gym_sokoban
import torch
import numpy as np
import random
import time
from utilities.channelConverter import hwc2chw
from experts.utils import get_distance
from external_actions import get_astar_action
import warnings
warnings.simplefilter("ignore", UserWarning)
def test_the_agent(agent, data_path, USE_CUDA, eval_num, args=None, display=False, deter=False, Variable=None):
solved = []
rewards = []
#specify the environment you wanna use; v0 means sample sub-cases randomly, and v1 only sample targeted sub-cases;
#env = gym.make('Curriculum-Sokoban-v2', data_path = data_path, seed=random.randint(0,100))
env = gym.make('Curriculum-Sokoban-v2', data_path = data_path)
solved_maps = []
unsolved_maps = []
for i in range(eval_num):
episode_reward = 0
state = env.reset()
if display:
print('#### Start ####')
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
state = hwc2chw(state, test=True)
if USE_CUDA:
state = state.cuda()
action = agent.select_action(state.unsqueeze(0), test=1, determinisitc=deter)
next_state, reward, done, _ = env.step(action.item())
episode_reward += reward
next_state = hwc2chw(next_state, test=True)
if display:
print('#### action taken ####')
print('taken action is {}, expert action is {}'.format(action.item(), get_astar_action(env.room_state)))
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
i = 1
while not done:
state = next_state
if USE_CUDA:
state = state.cuda()
with torch.no_grad():
action = agent.select_action(state.unsqueeze(0), test=1, determinisitc=deter)
if display:
print('#### action taken ####')
print('taken action is {}, expert action is {}'.format(action.item(), get_astar_action(env.room_state)))
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
next_state, reward, done, _ = env.step(action.item())
if get_distance(env.room_state) == -1:
if display:
print('The game is unsolvable now')
time.sleep(2)
break
episode_reward += reward
next_state = hwc2chw(next_state, test=True)
i += 1
if i < env.max_steps and get_distance(env.room_state) != -1:
solved.append(1)
solved_maps.append(env.selected_map)
else:
unsolved_maps.append(env.selected_map)
rewards.append(episode_reward)
return np.sum(solved)/eval_num
| StarcoderdataPython |
1775762 | default_app_config='accounting.apps.AccountingConfig'
| StarcoderdataPython |
11395132 | <gh_stars>10-100
# Main interface to the SmugMug web service.
from . import smugmug_oauth
import base64
import collections
import hashlib
import heapq
import io
import json
import math
import os
import re
import requests
import threading
import time
API_ROOT = 'https://api.smugmug.com'
API_UPLOAD = 'https://upload.smugmug.com/'
API_REQUEST = 'https://api.smugmug.com/api/developer/apply'
PAGE_START_RE = re.compile(r'(\?.*start=)[0-9]+')
class Error(Exception):
"""Base class for all exception of this module."""
class InvalidArgumentError(Error):
"""Error raised when an invalid argument is specified."""
class NotLoggedInError(Error):
"""Error raised if the user is not logged in."""
class RemoteDataError(Error):
"""Error raised when the remote structure is incompatible with SmugCLI."""
class UnexpectedResponseError(Error):
"""Error raised when encountering unexpected data returned by SmugMug."""
class InterruptedError(Error):
"""Error raised when a network operation is interrupted."""
class ChildCacheGarbageCollector(object):
"""Garbage collector for clearing the node's children cache.
Because multiple threads could process the same nodes in parallel, the nodes
and their children are cached so that we only fetch nodes once form the
server. It's important to eventually clear this cache though, otherwise the
JSON data of the whole SmugMug account could end up being stored in memory
after a complete sync. This garbage collector trims the node tree by clearing
out the node's children cache, keeping the number of cached nodes under a
certain threshold. Nodes are discarded by clearing the nodes that were visited
the longest ago first.
"""
def __init__(self, max_nodes):
self._max_nodes = max_nodes
self._oldest = []
self._nodes = {}
self._mutex = threading.Lock()
self._DELETED = '__DELETED__'
self._age_index = 0
def set_max_children_cache(self, max_nodes):
"""Set the maximum number of children cache to keep in memory.
As a rule of thumb, the number of cached nodes should be proportional to the
number of threads processing the tree.
Args:
max_nodes: int, the number of nodes that should be allows to keep a
children cache.
"""
self._max_nodes = max_nodes
def _get_next_age_index(self):
age_index = self._age_index
self._age_index += 1
return age_index
def visited(self, node):
"""Record a node as just visited and clear the cache of the oldest visit.
Args:
node: Node object, the node object to mark as visited.
"""
with self._mutex:
if node in self._nodes:
self._nodes[node][0] = self._get_next_age_index()
heapq.heapify(self._oldest)
else:
new_entry = [self._get_next_age_index(), node]
self._nodes[node] = new_entry
heapq.heappush(self._oldest, new_entry)
while len(self._nodes) > self._max_nodes:
node_to_clear = heapq.heappop(self._oldest)[1]
node_to_clear.reset_cache()
del self._nodes[node_to_clear]
class NodeList(object):
def __init__(self, smugmug, json, parent):
self._smugmug = smugmug
self._parent = parent
response = json['Response']
locator = response['Locator']
page_info = response['Pages']
self._page_size = page_info['Count']
self._total_size = page_info['Total']
num_pages = int(math.ceil(float(self._total_size) / self._page_size)
if self._page_size else 0)
self._pages = [None] * num_pages
if num_pages:
self._pages[0] = response[locator]
self._uri = PAGE_START_RE.sub(r'\1%d', response['Uri'])
def __len__(self):
return self._total_size
def __getitem__(self, item):
if item < 0 or item >= self._total_size:
raise IndexError
page_index = int(item / self._page_size)
if self._pages[page_index] is None:
new_page_uri = self._uri % (page_index * self._page_size + 1)
json = self._smugmug.get_json(new_page_uri)
response = json['Response']
locator = response['Locator']
self._pages[page_index] = response[locator]
return Node(self._smugmug,
self._pages[page_index][item - page_index * self._page_size],
self._parent)
class Node(object):
def __init__(self, smugmug, json, parent=None):
self._smugmug = smugmug
self._json = json
self._parent = parent
self._child_nodes_by_name = None
self._lock = threading.Lock()
@property
def json(self):
return self._json
@property
def name(self):
return self._json.get('FileName') or self._json['Name']
@property
def path(self):
if self._parent is not None:
return os.path.join(self._parent.path, self.name)
else:
return self.name
def get(self, url_name, **kwargs):
uri = self.uri(url_name)
return self._smugmug.get(uri, parent=self, **kwargs)
def post(self, uri_name, data=None, json=None, **kwargs):
uri = self.uri(uri_name)
return self._smugmug.post(uri, data, json, **kwargs)
def patch(self, uri_name, data=None, json=None, **kwargs):
uri = self.uri(uri_name)
return self._smugmug.patch(uri, data, json, **kwargs)
def delete(self, **kwargs):
uri = self._json.get('Uri')
return self._smugmug.delete(uri, **kwargs)
def upload(self, uri_name, filename, data, progress_fn=None, headers=None):
uri = self.uri(uri_name)
return self._smugmug.upload(uri, filename, data, progress_fn, headers)
def uri(self, url_name):
uri = self._json.get('Uris', {}).get(url_name, {}).get('Uri')
if not uri:
raise UnexpectedResponseError('Node does not have a "%s" uri.' % url_name)
return uri
def __getitem__(self, key):
return self._json[key]
def __contains__(self, key):
return key in self._json
def __eq__(self, other):
return self._json == other
def __ne__(self, other):
return self._json != other
def __hash__(self):
return id(self)
def get_children(self, params=None):
if 'Type' not in self._json:
raise UnexpectedResponseError('Node does not have a "Type" attribute.')
params = params or {}
params = {
'start': params.get('start', 1),
'count': params.get('count', self._smugmug.config.get('page_size', 1000))}
if self._json['Type'] == 'Album':
return self.get('Album').get('AlbumImages', params=params)
else:
return self.get('ChildNodes', params=params)
def _get_child_nodes_by_name(self):
if self._child_nodes_by_name is None:
self._child_nodes_by_name = collections.defaultdict(list)
for child in self.get_children():
self._child_nodes_by_name[child.name].append(child)
self._smugmug.garbage_collector.visited(self)
return self._child_nodes_by_name
def _create_child_node(self, name, params):
if self._json['Type'] != 'Folder':
raise InvalidArgumentError(
'Nodes can only be created in folders.\n'
'"%s" is of type "%s".' % (self.name, self._json['Type']))
if name in self._get_child_nodes_by_name():
raise InvalidArgumentError('Node %s already exists in folder %s' % (
name, self.name))
remote_name = name.strip()
node_params = {
'Name': remote_name,
'Privacy': 'Public',
'SortDirection': 'Ascending',
'SortMethod': 'Name',
}
node_params.update(params or {})
print('Creating %s "%s".' % (params['Type'], os.path.join(self.path,
remote_name)))
response = self.post('ChildNodes', data=sorted(node_params.items()))
if response.status_code != 201:
raise UnexpectedResponseError(
'Error creating node "%s".\n'
'Server responded with status code %d: %s.' % (
name, response.status_code, response.json()['Message']))
node_json = response.json().get('Response', {}).get('Node')
if not node_json:
raise UnexpectedResponseError('Cannot resolve created node JSON')
node = Node(self._smugmug, node_json, parent=self)
node._child_nodes_by_name = {}
self._smugmug.garbage_collector.visited(node)
self._get_child_nodes_by_name()[name] = [node]
if node['Type'] == 'Album':
response = node.patch('Album', json={'SortMethod': 'DateTimeOriginal'})
if response.status_code != 200:
print('Failed setting SortMethod on Album "%s".' % name)
print('Server responded with status code %d: %s.' % (
response.status_code, response.json()['Message']))
return node
def get_child(self, name):
with self._lock:
match = self._get_child_nodes_by_name().get(name)
if not match:
return None
if len(match) > 1:
raise RemoteDataError(
'Multiple remote nodes matches "%s" in node "%s".' % (name, self.name))
return match[0]
def get_or_create_child(self, name, params):
with self._lock:
match = self._get_child_nodes_by_name().get(name)
if not match:
return self._create_child_node(name, params)
if len(match) > 1:
raise RemoteDataError(
'Multiple remote nodes matches "%s" in node "%s".' % (name, self.name))
return match[0]
def reset_cache(self):
with self._lock:
self._child_nodes_by_name = None
def Wrapper(smugmug, json, parent=None):
response = json['Response']
if 'Pages' in response:
return NodeList(smugmug, json, parent)
else:
locator = response['Locator']
endpoint = response[locator]
return Node(smugmug, endpoint, parent)
class StreamingUpload(object):
def __init__(self, data, progress_fn, chunk_size=1<<13):
self._data = io.BytesIO(data)
self._len = len(data)
self._progress_fn = progress_fn
self._progress = 0
def __len__(self):
return self._len
def read(self, n=-1):
chunk = self._data.read(n)
self._progress += len(chunk)
if self._progress_fn:
aborting = self._progress_fn(100 * self._progress / self._len)
if aborting:
raise InterruptedError('File transfer interrupted.')
return chunk
def tell(self):
return self._data.tell()
def seek(self, offset, whence=0):
self._data.seek(offset, whence)
class SmugMug(object):
def __init__(self, config, requests_sent=None):
self._config = config
self._smugmug_oauth = None
self._oauth = None
self._user_root_node = None
self._session = requests.Session()
self._requests_sent = requests_sent
self._garbage_collector = ChildCacheGarbageCollector(8)
@property
def config(self):
return self._config
@property
def garbage_collector(self):
return self._garbage_collector
@property
def service(self):
if not self._smugmug_oauth:
if 'api_key' in self.config:
self._smugmug_oauth = smugmug_oauth.SmugMugOAuth(self.config['api_key'])
else:
print('No API key provided.')
print('Please request an API key at %s' % API_REQUEST)
print('and run "smugcli.py login"')
raise NotLoggedInError
return self._smugmug_oauth
@property
def oauth(self):
if not self._oauth:
if self.service and 'access_token' in self.config:
self._oauth = self.service.get_oauth(self.config['access_token'])
else:
print('User not logged in. Please run the "login" command')
raise NotLoggedInError
return self._oauth
def login(self, api_key):
self.config['api_key'] = api_key
self.config['access_token'] = self.service.request_access_token()
def logout(self):
if 'api_key' in self.config:
del self.config['api_key']
if 'access_token' in self.config:
del self.config['access_token']
if 'authuser' in self.config:
del self.config['authuser']
if 'authuser_uri' in self.config:
del self.config['authuser_uri']
self._service = None
self._session = None
def get_auth_user(self):
if not 'authuser' in self.config:
self.config['authuser'] = self.get('/api/v2!authuser')['NickName']
return self.config['authuser']
def get_user_uri(self, user):
return self.get('/api/v2/user/%s' % user).uri('Node')
def get_auth_user_uri(self):
if not 'authuser_uri' in self._config:
self.config['authuser_uri'] = self.get_user_uri(self.get_auth_user())
return self.config['authuser_uri']
def get_auth_user_root_node(self):
if self._user_root_node is None:
self._user_root_node = self.get(self.get_auth_user_uri())
return self._user_root_node
def get_root_node(self, user):
if user == self.get_auth_user():
return self.get_auth_user_root_node()
else:
return self.get(self.get_user_uri(user))
def get_json(self, path, **kwargs):
req = requests.Request('GET', API_ROOT + path,
headers={'Accept': 'application/json'},
auth=self.oauth,
**kwargs).prepare()
resp = self._session.send(req)
if self._requests_sent is not None:
self._requests_sent.append((req, resp))
resp.raise_for_status()
return resp.json()
def get(self, path, parent=None, **kwargs):
reply = self.get_json(path, **kwargs)
return Wrapper(self, reply, parent)
def post(self, path, data=None, json=None, **kwargs):
req = requests.Request('POST',
API_ROOT + path,
data=data,
json=json,
headers={'Accept': 'application/json'},
auth=self.oauth,
**kwargs).prepare()
resp = self._session.send(req)
if self._requests_sent is not None:
self._requests_sent.append((req, resp))
return resp
def patch(self, path, data=None, json=None, **kwargs):
req = requests.Request('PATCH',
API_ROOT + path,
data=data, json=json,
headers={'Accept': 'application/json'},
auth=self.oauth,
**kwargs).prepare()
resp = self._session.send(req)
if self._requests_sent is not None:
self._requests_sent.append((req, resp))
return resp
def delete(self, path, data=None, json=None, **kwargs):
req = requests.Request('DELETE',
API_ROOT + path,
auth=self.oauth,
headers={'Accept': 'application/json'},
**kwargs).prepare()
resp = self._session.send(req)
if self._requests_sent is not None:
self._requests_sent.append((req, resp))
return resp
def upload(self, uri, filename, data, progress_fn=None,
additional_headers=None):
headers = {'Content-Length': str(len(data)),
'Content-MD5': base64.b64encode(hashlib.md5(data).digest()),
'X-Smug-AlbumUri': uri,
'X-Smug-FileName': filename,
'X-Smug-ResponseType': 'JSON',
'X-Smug-Version': 'v2'}
headers.update(additional_headers or {})
req = requests.Request('POST',
API_UPLOAD,
data=StreamingUpload(data, progress_fn),
headers=headers,
auth=self.oauth).prepare()
resp = self._session.send(req)
if self._requests_sent is not None:
self._requests_sent.append((req, resp))
return resp
class FakeSmugMug(SmugMug):
def __init__(self, config=None):
config = config or {}
config['page_size'] = 10
super(FakeSmugMug, self).__init__(config or {})
@property
def service(self):
return None
@property
def oauth(self):
return None
| StarcoderdataPython |
9693530 | from . import BaseExtractor
class Layarkaca21(BaseExtractor):
tag = "movie"
host = "http://149.56.24.226/"
def extract_meta(self, id: str) -> dict:
"""
Ambil semua metadata dari halaman web
Args:
id: type 'str'
"""
raw = self.session.get(f"{self.host}/{id}")
soup = self.soup(raw)
img = soup.find(class_="img-thumbnail")
meta = self.MetaSet()
meta["judul"] = img["alt"]
meta["image"] = "https:" + img["src"]
content = soup.find(class_="content")
for div in content.findAll("div"):
if (k := div.h2) and (k := k.text) and k not in ["Oleh", "Diunggah"]:
value = ", ".join(h3.text for h3 in div.findAll("h3"))
meta.add(k, value, split=k not in ["Imdb", "Diterbitkan"])
if (block := soup.find("blockquote")):
block.strong.decompose()
block.span.decompose()
meta["sinopsis"] = block.text
return meta
def extract_data(self, id: str) -> dict:
"""
Ambil semua situs unduhan dari halaman web
Args:
id: jalur url dimulai setelah host, type 'str'
Returns:
dict: hasil 'scrape' halaman web
"""
raw = self.session.post("http://dl.sharemydrive.xyz/verifying.php",
headers={
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "*/*",
"X-Requested-With": "XMLHttpRequest"
},
params={"slug": id},
data={"slug": id}
)
soup = self.soup(raw)
tb = soup.find("tbody")
result = {}
for tr in tb.findAll("tr"):
title = tr.find("strong").text
result[title] = {}
for td in tr.findAll("td")[1:]:
if (a := td.a):
result[title][a["class"]
[-1].split("-")[-1]] = a["href"]
return result
def search(self, query: str, page: int = 1) -> list:
"""
Cari item berdasarkan 'query' yang diberikan
Args:
query: kata kunci pencarian, type 'str'
page: indeks halaman web, type 'int'
"""
raw = self.session.get(self.host,
params={"s": query})
soup = self.soup(raw)
r = []
for item in soup.findAll(class_="search-item"):
a = item.a
extra = {"genre": [], "star": [], "country": [],
"size": [""], "quality": [""], "year": [""]}
for tag in item.find(class_="cat-links").findAll("a"):
name, it = self.re.findall(r"/([^/]+)/([^/]+)", str(tag))[0]
extra[name].insert(0, it)
for p in filter(lambda x: x.strong is not None, item.findAll("p")):
np, vl = self.re.findall(
r"^([^:]+):\s+(.+)", p.text.strip())[0]
np = "star" if np == "Bintang" else "director" if np == "Sutradara" else np
extra[np] = self.re.split(r"\s*,\s*", vl) if "," in vl else vl
extra["id"] = self.re.search(
r"\w/([^/]+)", a["href"]).group(1)
result = {
"title": (item.find("h2").text or a.img["alt"]).strip(),
}
result.update(extra)
r.append(result)
return r
| StarcoderdataPython |
1948011 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ... import TestUnitBase
class TestVBAExtractor(TestUnitBase):
def test_maldoc(self):
data = self.download_sample('4bdc8e660ff4fb05e5b6c0a2dd70c537817f46ac3270d779fdddc8e459829c08')
unit = self.load()
code = list(data | unit)
self.assertIn(B'http://109.94.209'B'.91/12340.txt', code[0])
def test_do_not_extract_plaintext(self):
data = b"some plaintext data"
unit = self.load()
self.assertEqual(bytes(data | unit), b'')
| StarcoderdataPython |
8103600 | import numpy as np
from scipy.stats import rankdata
from functools import partial
from nptyping import Array
from sklearn.metrics import pairwise_distances
from sklearn.base import BaseEstimator, TransformerMixin
class BoostedSURF(BaseEstimator, TransformerMixin):
"""sklearn compatible implementation of the boostedSURF algorithm
<NAME>, <NAME>, <NAME>, <NAME>.
Boosted Spatially Uniform ReliefF Algorithm for Genome-Wide Genetic Analysis.
author: <NAME>
"""
def __init__(self, n_features_to_select=10, phi=5,
dist_func=lambda w, x1, x2: np.sum(w*np.logical_xor(x1, x2), 1), learned_metric_func=None):
self.n_features_to_select = n_features_to_select # Number of features to select.
self.phi = phi # the phi parameter (update weights when iteration_counter mod phi == 0)
self.dist_func = dist_func # Distance function to use.
self.learned_metric_func = learned_metric_func # learned metric function.
def fit(self, data, target):
"""
Rank features using BoostedSURF feature selection algorithm
Args:
data : Array[np.float64] -- matrix of examples
target : Array[np.int] -- vector of target values of examples
Returns:
self
"""
# Run Boosted SURF feature selection algorithm.
if self.learned_metric_func != None:
self.rank, self.weights = self._boostedSURF(data, target, self.phi, self.dist_func,
learned_metric_func=self.learned_metric_func(data, target))
else:
self.rank, self.weights = self._boostedSURF(data, target, self.phi, self.dist_func)
return self
def transform(self, data):
"""
Perform feature selection using computed feature ranks
Args:
data : Array[np.float64] -- matrix of examples on which to perform feature selection
Returns:
Array[np.float64] -- result of performing feature selection
"""
msk = self.rank <= self.n_features_to_select # Compute mask.
return data[:, msk] # Perform feature selection.
def fit_transform(self, data, target):
"""
Compute ranks of features and perform feature selection
Args:
data : Array[np.float64] -- matrix of examples on which to perform feature selection
target : Array[np.int] -- vector of target values of examples
Returns:
Array[np.float64] -- result of performing feature selection
"""
self.fit(data, target)
return self.transform(data)
def _boostedSURF(self, data, target, phi, dist_func, **kwargs):
"""Compute feature scores using boostedSURF algorithm
Args:
data : Array[np.float64] -- Matrix containing examples' data as rows
target : Array[np.int] -- matrix containing the example's target variable value
phi: int -- parameter specifying number of iterations before recomputing distance weights
dist_func : Callable[[Array[np.float64], Array[np.float64]], Array[np.float64]] -- function for evaluating
distances between examples. The function should acept two examples or two matrices of examples and return the dictances.
**kwargs: can contain argument with key 'learned_metric_func' that maps to a function that accepts a distance
function and indices of two training examples and returns the distance between the examples in the learned
metric space.
Returns:
Array[np.int], Array[np.float64] -- Array of feature enumerations based on the scores, array of feature scores
"""
# Initialize distance weights.
dist_weights = np.ones(data.shape[1], dtype=np.float)
# weighted distance function
dist_func_w = partial(dist_func, dist_weights)
# Initialize weights.
weights = np.zeros(data.shape[1], dtype=np.int)
for idx in np.arange(data.shape[0]):
# Recompute distance matrix.
if np.mod(idx, phi) == 0:
dist_weights = np.maximum(weights, np.ones(data.shape[1], dtype=np.float))
dist_func_w = partial(dist_func, dist_weights)
if 'learned_metric_func' in kwargs:
dist_func_w_learned = partial(kwargs['learned_metric_func'], dist_func_w)
# Compute distances from current examples to all other examples.
if 'learned_metric_func' in kwargs:
dists = dist_func_w_learned(idx, np.arange(data.shape[0]))
else:
dists = dist_func_w(data[idx, :], data)
# Compute mean and standard deviation of distances and set thresholds.
t_next = np.mean(dists[np.arange(data.shape[0]) != idx])
sigma_nxt = np.std(dists[np.arange(data.shape[0]) != idx])
thresh_near = t_next - sigma_nxt/2.0
thresh_far = t_next + sigma_nxt/2.0
# Get mask of examples that are close.
msk_close = dists < thresh_near
msk_close[idx] = False
# Get mask of examples that are far.
msk_far = dists > thresh_far
# Get examples that are close.
examples_close = data[msk_close, :]
target_close = target[msk_close]
# Get examples that are far.
examples_far = data[msk_far, :]
target_far = target[msk_far]
# Get considered features of close examples.
features_close = data[idx, :] != examples_close
# Get considered features of far examples.
features_far = data[idx, :] == examples_far
# Get mask for close examples with same class.
msk_same_close = target_close == target[idx]
# Get mask for far examples with same class.
msk_same_far = target_far == target[idx]
### WEIGHTS UPDATE ###
# Get penalty weights update values for close examples.
wu_close_penalty = np.sum(features_close[msk_same_close, :], 0)
# Get reward weights update values for close examples.
wu_close_reward = np.sum(features_close[np.logical_not(msk_same_close), :], 0)
# Get penalty weights update values for far examples.
wu_far_penalty = np.sum(features_far[msk_same_far, :], 0)
# Get reward weights update values for close examples.
wu_far_reward = np.sum(features_far[np.logical_not(msk_same_far), :], 0)
# Update weights.
weights = weights - (wu_close_penalty + wu_far_penalty) + (wu_close_reward + wu_far_reward)
### /WEIGHTS UPDATE ###
# Create array of feature enumerations based on score.
rank = rankdata(-weights, method='ordinal')
return rank, weights
| StarcoderdataPython |
1931358 | #!/usr/bin/env python2
import json
import thread
import psycopg2
import websocket
import api
import config
def on_message(ws, message):
#print(message)
j = json.loads(message)
try:
#print j["params"][1][0][0]["id"]
id_ = j["params"][1][0][0]["id"]
#print id_[:4]
if id_[:4] == "2.9.":
#print j["params"][1][0][0]
data = api._get_object(id_)
#print data[0]
account_id = data[0]["account"]
data_a = api._account(account_id)
#print data_a[0]["name"]
account_name = data_a[0]["name"]
data2 = api._get_object(data[0]['operation_id'])
block_num = data2[0]["block_num"]
op_type = data2[0]["op"][0]
#print block_num
trx_in_block = data2[0]["trx_in_block"]
op_in_trx = data2[0]["op_in_trx"]
con = psycopg2.connect(**config.POSTGRES)
cur = con.cursor()
query = "INSERT INTO ops (oh, ath, block_num, trx_in_block, op_in_trx, datetime, account_id, op_type, account_name) VALUES(%s, %s, %s, %s, %s, NOW(), %s, %s, %s)"
print query
cur.execute(query, (id_, data[0]['operation_id'], str(block_num), str(trx_in_block), str(op_in_trx), account_id, str(op_type), account_name))
con.commit()
except:
pass
def on_error(ws, error):
print(error)
#print ""
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
ws.send('{"method": "set_subscribe_callback", "params": [5, true], "id": 6}')
thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp(config.WEBSOCKET_URL,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
| StarcoderdataPython |
217197 | <gh_stars>0
""" Module with functionalities for blocking based on a dictionary of records,
where a blocking function must return a dictionary with block identifiers
as keys and values being sets or lists of record identifiers in that block.
"""
# =============================================================================
def noBlocking(rec_dict):
"""A function which does no blocking but simply puts all records from the
given dictionary into one block.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys and
corresponding list of record values
"""
print("Run 'no' blocking:")
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
rec_id_list = list(rec_dict.keys())
block_dict = {'all_rec':rec_id_list}
return block_dict
# -----------------------------------------------------------------------------
def simpleBlocking(rec_dict, blk_attr_list):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
A blocking is implemented that simply concatenates attribute values.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys
and corresponding list of record values
blk_attr_list : List of blocking key attributes to use
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
Examples:
If the blocking is based on 'postcode' then:
block_dict = {'2000': [rec1_id, rec2_id, rec3_id, ...],
'2600': [rec4_id, rec5_id, ...],
...
}
while if the blocking is based on 'postcode' and 'gender' then:
block_dict = {'2000f': [rec1_id, rec3_id, ...],
'2000m': [rec2_id, ...],
'2600f': [rec5_id, ...],
'2600m': [rec4_id, ...],
...
}
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run simple blocking:')
print(' List of blocking key attributes: '+str(blk_attr_list))
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Process selected blocking attributes
#
for attr in blk_attr_list:
attr_val = rec_values[attr]
rec_bkv += attr_val
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
# -----------------------------------------------------------------------------
def phoneticBlocking(rec_dict, blk_attr_list):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
A blocking is implemented that concatenates Soundex encoded values of
attribute values.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys
and corresponding list of record values
blk_attr_list : List of blocking key attributes to use
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run phonetic blocking:')
print(' List of blocking key attributes: '+str(blk_attr_list))
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Process selected blocking attributes
#
for attr in blk_attr_list:
attr_val = rec_values[attr]
if (attr_val == ''):
rec_bkv += 'z000' # Often used as Soundex code for empty values
else: # Convert the value into its Soundex code
attr_val = attr_val.lower()
sndx_val = attr_val[0] # Keep first letter
for c in attr_val[1:]: # Loop over all other letters
if (c in 'aehiouwy'): # Not inlcuded into Soundex code
pass
elif (c in 'bfpv'):
if (sndx_val[-1] != '1'): # Don't add duplicates of digits
sndx_val += '1'
elif (c in 'cgjkqsxz'):
if (sndx_val[-1] != '2'): # Don't add duplicates of digits
sndx_val += '2'
elif (c in 'dt'):
if (sndx_val[-1] != '3'): # Don't add duplicates of digits
sndx_val += '3'
elif (c in 'l'):
if (sndx_val[-1] != '4'): # Don't add duplicates of digits
sndx_val += '4'
elif (c in 'mn'):
if (sndx_val[-1] != '5'): # Don't add duplicates of digits
sndx_val += '5'
elif (c in 'r'):
if (sndx_val[-1] != '6'): # Don't add duplicates of digits
sndx_val += '6'
if (len(sndx_val) < 4):
sndx_val += '000' # Ensure enough digits
sndx_val = sndx_val[:4] # Maximum length is 4
rec_bkv += sndx_val
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
# -----------------------------------------------------------------------------
def slkBlocking(rec_dict, fam_name_attr_ind, giv_name_attr_ind,
dob_attr_ind, gender_attr_ind):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
This function should implement the statistical linkage key (SLK-581)
blocking approach as used in real-world linkage applications:
http://www.aihw.gov.au/WorkArea/DownloadAsset.aspx?id=60129551915
A SLK-581 blocking key is the based on the concatenation of:
- 3 letters of family name
- 2 letters of given name
- Date of birth
- Sex
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as
keys and corresponding list of record values
fam_name_attr_ind : The number (index) of the attribute that contains
family name (last name)
giv_name_attr_ind : The number (index) of the attribute that contains
given name (first name)
dob_attr_ind : The number (index) of the attribute that contains
date of birth
gender_attr_ind : The number (index) of the attribute that contains
gender (sex)
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run SLK-581 blocking:')
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Get family name value
#
fam_name = rec_values[fam_name_attr_ind]
if (fam_name == ''):
rec_bkv += '999'
else:
fam_nam = fam_name.replace('-','') # Remove non letter characters
fam_nam = fam_name.replace(",",'')
fam_nam = fam_name.replace('_','')
if (len(fam_name) >= 5):
rec_bkv += (fam_name[1]+fam_name[2]+fam_name[4])
elif (len(fam_name) >= 3):
rec_bkv += (fam_name[1]+fam_name[2]+'2')
elif (len(fam_name) >= 2):
rec_bkv += (fam_name[1]+'22')
# Get given name value
#
giv_name = rec_values[giv_name_attr_ind]
if (giv_name == ''):
rec_bkv += '99'
else:
giv_nam = giv_name.replace('-','') # Remove non letter characters
giv_nam = giv_name.replace(",",'')
giv_nam = giv_name.replace('_','')
if (len(giv_name) >= 3):
rec_bkv += (giv_name[1]+giv_name[2])
elif (len(giv_name) >= 2):
rec_bkv += (giv_name[1]+'2')
# DoB structure we use: dd/mm/yyyy
# Get date of birth
#
dob = rec_values[dob_attr_ind]
dob_list = rec_values[dob_attr_ind].split('/')
# Add some checks
#
if (len(dob_list[0]) < 2):
dob_list[0] = '0' + dob_list[0] # Add leading zero for days < 10
if (len(dob_list[1]) < 2):
dob_list[1] = '0' + dob_list[1] # Add leading zero for months < 10
dob = ''.join(dob_list) # Create: ddmmyyyy
assert len(dob) == 8, dob
rec_bkv += dob
# Get gender
#
gender = rec_values[gender_attr_ind].lower()
if (gender == 'm'):
rec_bkv += '1'
elif (gender == 'f'):
rec_bkv += '2'
else:
rec_bkv += '9'
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
# -----------------------------------------------------------------------------
# Extra task: Implement canopy clustering based blocking as described in
# the Data Matching book
# -----------------------------------------------------------------------------
def printBlockStatistics(blockA_dict, blockB_dict):
"""Calculate and print some basic statistics about the generated blocks
"""
print('Statistics of the generated blocks:')
numA_blocks = len(blockA_dict)
numB_blocks = len(blockB_dict)
block_sizeA_list = []
for rec_id_list in blockA_dict.values(): # Loop over all blocks
block_sizeA_list.append(len(rec_id_list))
block_sizeB_list = []
for rec_id_list in blockB_dict.values(): # Loop over all blocks
block_sizeB_list.append(len(rec_id_list))
print('Dataset A number of blocks generated: %d' % (numA_blocks))
print(' Minimum block size: %d' % (min(block_sizeA_list)))
print(' Average block size: %.2f' % \
(float(sum(block_sizeA_list)) / len(block_sizeA_list)))
print(' Maximum block size: %d' % (max(block_sizeA_list)))
print('')
print('Dataset B number of blocks generated: %d' % (numB_blocks))
print(' Minimum block size: %d' % (min(block_sizeB_list)))
print(' Average block size: %.2f' % \
(float(sum(block_sizeB_list)) / len(block_sizeB_list)))
print(' Maximum block size: %d' % (max(block_sizeB_list)))
print('')
# -----------------------------------------------------------------------------
# End of program.
| StarcoderdataPython |
6443414 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DomainZoneRedirectionArgs', 'DomainZoneRedirection']
@pulumi.input_type
class DomainZoneRedirectionArgs:
def __init__(__self__, *,
target: pulumi.Input[str],
type: pulumi.Input[str],
zone: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
keywords: Optional[pulumi.Input[str]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DomainZoneRedirection resource.
:param pulumi.Input[str] target: The value of the redirection
:param pulumi.Input[str] type: The type of the redirection, with values:
:param pulumi.Input[str] zone: The domain to add the redirection to
:param pulumi.Input[str] description: A description of this redirection
:param pulumi.Input[str] keywords: Keywords to describe this redirection
:param pulumi.Input[str] subdomain: The name of the redirection
:param pulumi.Input[str] title: Title of this redirection
"""
pulumi.set(__self__, "target", target)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "zone", zone)
if description is not None:
pulumi.set(__self__, "description", description)
if keywords is not None:
pulumi.set(__self__, "keywords", keywords)
if subdomain is not None:
pulumi.set(__self__, "subdomain", subdomain)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def target(self) -> pulumi.Input[str]:
"""
The value of the redirection
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input[str]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the redirection, with values:
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def zone(self) -> pulumi.Input[str]:
"""
The domain to add the redirection to
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: pulumi.Input[str]):
pulumi.set(self, "zone", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of this redirection
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def keywords(self) -> Optional[pulumi.Input[str]]:
"""
Keywords to describe this redirection
"""
return pulumi.get(self, "keywords")
@keywords.setter
def keywords(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "keywords", value)
@property
@pulumi.getter
def subdomain(self) -> Optional[pulumi.Input[str]]:
"""
The name of the redirection
"""
return pulumi.get(self, "subdomain")
@subdomain.setter
def subdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subdomain", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Title of this redirection
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@pulumi.input_type
class _DomainZoneRedirectionState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
keywords: Optional[pulumi.Input[str]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DomainZoneRedirection resources.
:param pulumi.Input[str] description: A description of this redirection
:param pulumi.Input[str] keywords: Keywords to describe this redirection
:param pulumi.Input[str] subdomain: The name of the redirection
:param pulumi.Input[str] target: The value of the redirection
:param pulumi.Input[str] title: Title of this redirection
:param pulumi.Input[str] type: The type of the redirection, with values:
:param pulumi.Input[str] zone: The domain to add the redirection to
"""
if description is not None:
pulumi.set(__self__, "description", description)
if keywords is not None:
pulumi.set(__self__, "keywords", keywords)
if subdomain is not None:
pulumi.set(__self__, "subdomain", subdomain)
if target is not None:
pulumi.set(__self__, "target", target)
if title is not None:
pulumi.set(__self__, "title", title)
if type is not None:
pulumi.set(__self__, "type", type)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of this redirection
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def keywords(self) -> Optional[pulumi.Input[str]]:
"""
Keywords to describe this redirection
"""
return pulumi.get(self, "keywords")
@keywords.setter
def keywords(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "keywords", value)
@property
@pulumi.getter
def subdomain(self) -> Optional[pulumi.Input[str]]:
"""
The name of the redirection
"""
return pulumi.get(self, "subdomain")
@subdomain.setter
def subdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subdomain", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The value of the redirection
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Title of this redirection
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the redirection, with values:
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The domain to add the redirection to
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
class DomainZoneRedirection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
keywords: Optional[pulumi.Input[str]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a OVH domain zone redirection.
## Example Usage
```python
import pulumi
import pulumi_ovh as ovh
# Add a redirection to a sub-domain
test = ovh.DomainZoneRedirection("test",
subdomain="test",
target="http://www.ovh",
type="visiblePermanent",
zone="testdemo.ovh")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of this redirection
:param pulumi.Input[str] keywords: Keywords to describe this redirection
:param pulumi.Input[str] subdomain: The name of the redirection
:param pulumi.Input[str] target: The value of the redirection
:param pulumi.Input[str] title: Title of this redirection
:param pulumi.Input[str] type: The type of the redirection, with values:
:param pulumi.Input[str] zone: The domain to add the redirection to
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainZoneRedirectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a OVH domain zone redirection.
## Example Usage
```python
import pulumi
import pulumi_ovh as ovh
# Add a redirection to a sub-domain
test = ovh.DomainZoneRedirection("test",
subdomain="test",
target="http://www.ovh",
type="visiblePermanent",
zone="testdemo.ovh")
```
:param str resource_name: The name of the resource.
:param DomainZoneRedirectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainZoneRedirectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
keywords: Optional[pulumi.Input[str]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainZoneRedirectionArgs.__new__(DomainZoneRedirectionArgs)
__props__.__dict__["description"] = description
__props__.__dict__["keywords"] = keywords
__props__.__dict__["subdomain"] = subdomain
if target is None and not opts.urn:
raise TypeError("Missing required property 'target'")
__props__.__dict__["target"] = target
__props__.__dict__["title"] = title
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
if zone is None and not opts.urn:
raise TypeError("Missing required property 'zone'")
__props__.__dict__["zone"] = zone
super(DomainZoneRedirection, __self__).__init__(
'ovh:index/domainZoneRedirection:DomainZoneRedirection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
keywords: Optional[pulumi.Input[str]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None) -> 'DomainZoneRedirection':
"""
Get an existing DomainZoneRedirection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of this redirection
:param pulumi.Input[str] keywords: Keywords to describe this redirection
:param pulumi.Input[str] subdomain: The name of the redirection
:param pulumi.Input[str] target: The value of the redirection
:param pulumi.Input[str] title: Title of this redirection
:param pulumi.Input[str] type: The type of the redirection, with values:
:param pulumi.Input[str] zone: The domain to add the redirection to
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DomainZoneRedirectionState.__new__(_DomainZoneRedirectionState)
__props__.__dict__["description"] = description
__props__.__dict__["keywords"] = keywords
__props__.__dict__["subdomain"] = subdomain
__props__.__dict__["target"] = target
__props__.__dict__["title"] = title
__props__.__dict__["type"] = type
__props__.__dict__["zone"] = zone
return DomainZoneRedirection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of this redirection
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def keywords(self) -> pulumi.Output[Optional[str]]:
"""
Keywords to describe this redirection
"""
return pulumi.get(self, "keywords")
@property
@pulumi.getter
def subdomain(self) -> pulumi.Output[Optional[str]]:
"""
The name of the redirection
"""
return pulumi.get(self, "subdomain")
@property
@pulumi.getter
def target(self) -> pulumi.Output[str]:
"""
The value of the redirection
"""
return pulumi.get(self, "target")
@property
@pulumi.getter
def title(self) -> pulumi.Output[Optional[str]]:
"""
Title of this redirection
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the redirection, with values:
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def zone(self) -> pulumi.Output[str]:
"""
The domain to add the redirection to
"""
return pulumi.get(self, "zone")
| StarcoderdataPython |
5153514 | # PYTHONPATH must be set to openschc/src
import binascii
import pprint
"""
This is a no_compression test, ie the packet is sent no compressed with a
aligned ruleID of 8 bits.
"""
from gen_rulemanager import RuleManager
from compr_parser import Parser
from compr_core import Compressor
enable_debug_print = True
"""
6 # IPv6 version
00 # DiffServ
12345 # Flow Label
0039 # Payload Length
11 # Next Header (UDP)
33 # Hop Limit
2001 1222 8905 0470 0000 0000 0000 0057 # Device address
2001 41d0 57d7 3100 0000 0000 0000 0401 # Application address
1634 # Device port
1633 # Application port
0039 # UDL length
7a6e # UDP checksum
5102 # CoAP Header NON TLK=1
00a0 # Message ID
20 # Token
b4 7<PASSWORD> <PASSWORD> # Option Path /temp
d1ea 02 # Option Server No Response (4&5 allowed)
ff # end option
981f19074b210503010500220622200600250301220401030300220304010122030a05 # CBOR
"""
packet_src = "6 00 12345 0039 11 33 20011222890504700000000000000057 200141d057d731000000000000000401\
1634 1633 0039 7a6e\
5102 00a0 20 b474656d70 d1ea02 ff\
981f19074b210503010500220622200600250301220401030300220304010122030a05".replace (" ", "")
packet = binascii.unhexlify(packet_src)
print (packet)
rule_default_8bits = {
"RuleID": 8,
"RuleIDLength" : 8,
"NoCompression" : []
}
RM = RuleManager()
RM.Add(dev_info=rule_default_8bits)
RM.Print()
r = RM.FindNoCompressionRule()
print (r)
class debug_protocol:
def _log(*arg):
print(*arg)
C = Compressor(debug_protocol)
result = C.no_compress (r, packet)
result.display()
result.display(format="bin") | StarcoderdataPython |
364231 | <reponame>laetitia123/akikatest<gh_stars>0
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
import datetime as dt
class Akika(models.Model):
title = models.CharField(max_length=60)
post = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date=today)
return news
@classmethod
def days_news(cls, date):
news = cls.objects.filter(pub_date__date=date)
return news
class NewsLetterRecipients(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField(max_length=30)
class Room(models.Model):
name = models.TextField()
label = models.SlugField(unique=True)
def __unicode__(self):
return self.label
class Message(models.Model):
room = models.ForeignKey(
Room, related_name='messages', on_delete=models.CASCADE,)
handle = models.TextField()
message = models.TextField()
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
def __unicode__(self):
return '[{timestamp}] {handle}: {message}'.format(**self.as_dict())
@property
def formatted_timestamp(self):
return self.timestamp.strftime('%b %-d %-I:%M %p')
def as_dict(self):
return {'handle': self.handle, 'message': self.message, 'timestamp': self.formatted_timestamp}
| StarcoderdataPython |
5199361 | <filename>main.py
import sys
sys.path.append('./lib')
import _G, const, util, Input
def start():
util.init()
if _G.AppHwnd == 0:
print("App not found, aborting")
return exit()
util.activate_window(_G.AppHwnd)
while _G.Flags['running']:
main_loop()
def main_loop():
util.uwait(_G.UpdateDuration, False)
update_basic()
_G.CurScriptTick += 1
if _G.CurScriptTick >= _G.ScriptUpdateTime:
_G.CurScriptTick = 0
if not _G.Flags['paused']:
update_script()
Input.clear_cache()
def update_basic():
update_input()
def update_input():
Input.update()
# pause program
if Input.is_trigger(Input.keymap.kF8, True):
_G.Flags['paused'] ^= True
print(f"Paused: {_G.Flags['paused']}")
# terminate program
elif Input.is_trigger(Input.keymap.kF9, True):
_G.Flags['running'] = False
def update_script():
util.get_app_rect()
start() | StarcoderdataPython |
11392901 | import py, sys
from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize
from pypy.interpreter.pyparser import pyparse
from pypy.interpreter.pyparser.test import expressions
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.pyparser.error import SyntaxError, IndentationError
from pypy.tool import stdlib_opcode as ops
def compile_with_astcompiler(expr, mode, space):
p = pyparse.PythonParser(space)
info = pyparse.CompileInfo("<test>", mode)
cst = p.parse_source(expr, info)
ast = astbuilder.ast_from_node(space, cst, info)
return codegen.compile_ast(space, ast, info)
def generate_function_code(expr, space):
p = pyparse.PythonParser(space)
info = pyparse.CompileInfo("<test>", 'exec')
cst = p.parse_source(expr, info)
ast = astbuilder.ast_from_node(space, cst, info)
function_ast = optimize.optimize_ast(space, ast.body[0], info)
function_ast = ast.body[0]
symbols = symtable.SymtableBuilder(space, ast, info)
generator = codegen.FunctionCodeGenerator(
space, 'function', function_ast, 1, symbols, info)
blocks = generator.first_block.post_order()
generator._resolve_block_targets(blocks)
return generator, blocks
class TestCompiler:
"""These tests compile snippets of code and check them by
running them with our own interpreter. These are thus not
completely *unit* tests, but given that our interpreter is
pretty stable now it is the best way I could find to check
the compiler.
"""
def run(self, source):
import sys
source = str(py.code.Source(source))
space = self.space
code = compile_with_astcompiler(source, 'exec', space)
# 2.7 bytecode is too different, the standard `dis` module crashes
# on older cpython versions
if sys.version_info >= (2, 7):
print
code.dump()
w_dict = space.newdict()
code.exec_code(space, w_dict, w_dict)
return w_dict
def check(self, w_dict, evalexpr, expected):
# for now, we compile evalexpr with CPython's compiler but run
# it with our own interpreter to extract the data from w_dict
co_expr = compile(evalexpr, '<evalexpr>', 'eval')
space = self.space
pyco_expr = PyCode._from_code(space, co_expr)
w_res = pyco_expr.exec_host_bytecode(w_dict, w_dict)
res = space.str_w(space.repr(w_res))
if not isinstance(expected, float):
noL = lambda expr: expr.replace('L', '')
assert noL(res) == noL(repr(expected))
else:
# Float representation can vary a bit between interpreter
# versions, compare the numbers instead.
assert eval(res) == expected
def simple_test(self, source, evalexpr, expected):
w_g = self.run(source)
self.check(w_g, evalexpr, expected)
st = simple_test
def error_test(self, source, exc_type):
py.test.raises(exc_type, self.simple_test, source, None, None)
def test_issue_713(self):
func = "def f(_=2): return (_ if _ else _) if False else _"
yield self.st, func, "f()", 2
def test_long_jump(self):
func = """def f(x):
y = 0
if x:
%s return 1
else:
return 0""" % (" y += 1\n" * 6700,)
yield self.st, func, "f(1)", 1
yield self.st, func, "f(0)", 0
def test_argtuple(self):
yield (self.simple_test, "def f( x, (y,z) ): return x,y,z",
"f((1,2),(3,4))", ((1,2),3,4))
yield (self.simple_test, "def f( x, (y,(z,t)) ): return x,y,z,t",
"f(1,(2,(3,4)))", (1,2,3,4))
yield (self.simple_test, "def f(((((x,),y),z),t),u): return x,y,z,t,u",
"f(((((1,),2),3),4),5)", (1,2,3,4,5))
def test_constants(self):
for c in expressions.constants:
yield (self.simple_test, "x="+c, "x", eval(c))
def test_neg_sys_maxint(self):
import sys
stmt = "x = %s" % (-sys.maxint-1)
self.simple_test(stmt, "type(x)", int)
def test_tuple_assign(self):
yield self.error_test, "() = 1", SyntaxError
yield self.simple_test, "x,= 1,", "x", 1
yield self.simple_test, "x,y = 1,2", "x,y", (1, 2)
yield self.simple_test, "x,y,z = 1,2,3", "x,y,z", (1, 2, 3)
yield self.simple_test, "x,y,z,t = 1,2,3,4", "x,y,z,t", (1, 2, 3, 4)
yield self.simple_test, "x,y,x,t = 1,2,3,4", "x,y,t", (3, 2, 4)
yield self.simple_test, "[] = []", "1", 1
yield self.simple_test, "[x]= 1,", "x", 1
yield self.simple_test, "[x,y] = [1,2]", "x,y", (1, 2)
yield self.simple_test, "[x,y,z] = 1,2,3", "x,y,z", (1, 2, 3)
yield self.simple_test, "[x,y,z,t] = [1,2,3,4]", "x,y,z,t", (1, 2, 3,4)
yield self.simple_test, "[x,y,x,t] = 1,2,3,4", "x,y,t", (3, 2, 4)
def test_tuple_assign_order(self):
decl = py.code.Source("""
class A:
def __getattr__(self, name):
global seen
seen += name
return name
def __setattr__(self, name, value):
global seen
seen += '%s=%s' % (name, value)
seen = ''
a = A()
""")
decl = str(decl) + '\n'
yield self.st, decl+"a.x,= a.a,", 'seen', 'ax=a'
yield self.st, decl+"a.x,a.y = a.a,a.b", 'seen', 'abx=ay=b'
yield self.st, decl+"a.x,a.y,a.z = a.a,a.b,a.c", 'seen', 'abcx=ay=bz=c'
yield self.st, decl+"a.x,a.y,a.x,a.t = a.a,a.b,a.c,a.d", 'seen', \
'abcdx=ay=bx=ct=d'
yield self.st, decl+"[a.x] = [a.a]", 'seen', 'ax=a'
yield self.st, decl+"[a.x,a.y] = a.a,a.b", 'seen', 'abx=ay=b'
yield self.st, decl+"[a.x,a.y,a.z] = [a.a,a.b,a.c]", 'seen', \
'abcx=ay=bz=c'
yield self.st, decl+"[a.x,a.y,a.x,a.t] = a.a,a.b,a.c,a.d", 'seen', \
'abcdx=ay=bx=ct=d'
def test_binary_operator(self):
for operator in ['+', '-', '*', '**', '/', '&', '|', '^', '//',
'<<', '>>', 'and', 'or', '<', '>', '<=', '>=',
'is', 'is not']:
expected = eval("17 %s 5" % operator)
yield self.simple_test, "x = 17 %s 5" % operator, "x", expected
expected = eval("0 %s 11" % operator)
yield self.simple_test, "x = 0 %s 11" % operator, "x", expected
def test_compare(self):
yield self.st, "x = 2; y = 5; y; h = 1 < x >= 3 < x", "h", False
def test_augmented_assignment(self):
for operator in ['+', '-', '*', '**', '/', '&', '|', '^', '//',
'<<', '>>']:
expected = eval("17 %s 5" % operator)
yield self.simple_test, "x = 17; x %s= 5" % operator, "x", expected
def test_subscript(self):
yield self.simple_test, "d={2:3}; x=d[2]", "x", 3
yield self.simple_test, "d={(2,):3}; x=d[2,]", "x", 3
yield self.simple_test, "d={}; d[1]=len(d); x=d[len(d)]", "x", 0
yield self.simple_test, "d={}; d[1]=3; del d[1]", "len(d)", 0
def test_attribute(self):
yield self.simple_test, """
class A:
pass
a1 = A()
a2 = A()
a1.bc = A()
a1.bc.de = a2
a2.see = 4
a1.bc.de.see += 3
x = a1.bc.de.see
""", 'x', 7
def test_slice(self):
decl = py.code.Source("""
class A(object):
def __getitem__(self, x):
global got
got = x
def __setitem__(self, x, y):
global set
set = x
def __delitem__(self, x):
global deleted
deleted = x
a = A()
""")
decl = str(decl) + '\n'
testcases = ['[:]', '[:,9]', '[8,:]',
'[2:]', '[2:,9]', '[8,2:]',
'[:2]', '[:2,9]', '[8,:2]',
'[4:7]', '[4:7,9]', '[8,4:7]',
'[::]', '[::,9]', '[8,::]',
'[2::]', '[2::,9]', '[8,2::]',
'[:2:]', '[:2:,9]', '[8,:2:]',
'[4:7:]', '[4:7:,9]', '[8,4:7:]',
'[::3]', '[::3,9]', '[8,::3]',
'[2::3]', '[2::3,9]', '[8,2::3]',
'[:2:3]', '[:2:3,9]', '[8,:2:3]',
'[4:7:3]','[4:7:3,9]','[8,4:7:3]',
]
class Checker(object):
def __getitem__(self, x):
self.got = x
checker = Checker()
for testcase in testcases:
exec "checker" + testcase
yield self.st, decl + "a" + testcase, "got", checker.got
yield self.st, decl + "a" + testcase + ' = 5', "set", checker.got
yield self.st, decl + "del a" + testcase, "deleted", checker.got
def test_funccalls(self):
decl = py.code.Source("""
def f(*args, **kwds):
kwds = kwds.items()
kwds.sort()
return list(args) + kwds
""")
decl = str(decl) + '\n'
yield self.st, decl + "x=f()", "x", []
yield self.st, decl + "x=f(5)", "x", [5]
yield self.st, decl + "x=f(5, 6, 7, 8)", "x", [5, 6, 7, 8]
yield self.st, decl + "x=f(a=2, b=5)", "x", [('a',2), ('b',5)]
yield self.st, decl + "x=f(5, b=2, *[6,7])", "x", [5, 6, 7, ('b', 2)]
yield self.st, decl + "x=f(5, b=2, **{'a': 8})", "x", [5, ('a', 8),
('b', 2)]
def test_listmakers(self):
yield (self.st,
"l = [(j, i) for j in range(10) for i in range(j)"
+ " if (i+j)%2 == 0 and i%3 == 0]",
"l",
[(2, 0), (4, 0), (5, 3), (6, 0),
(7, 3), (8, 0), (8, 6), (9, 3)])
def test_genexprs(self):
yield (self.st,
"l = list((j, i) for j in range(10) for i in range(j)"
+ " if (i+j)%2 == 0 and i%3 == 0)",
"l",
[(2, 0), (4, 0), (5, 3), (6, 0),
(7, 3), (8, 0), (8, 6), (9, 3)])
def test_comparisons(self):
yield self.st, "x = 3 in {3: 5}", "x", True
yield self.st, "x = 3 not in {3: 5}", "x", False
yield self.st, "t = True; x = t is True", "x", True
yield self.st, "t = True; x = t is False", "x", False
yield self.st, "t = True; x = t is None", "x", False
yield self.st, "n = None; x = n is True", "x", False
yield self.st, "n = None; x = n is False", "x", False
yield self.st, "n = None; x = n is None", "x", True
yield self.st, "t = True; x = t is not True", "x", False
yield self.st, "t = True; x = t is not False", "x", True
yield self.st, "t = True; x = t is not None", "x", True
yield self.st, "n = None; x = n is not True", "x", True
yield self.st, "n = None; x = n is not False", "x", True
yield self.st, "n = None; x = n is not None", "x", False
yield self.st, "x = not (3 in {3: 5})", "x", False
yield self.st, "x = not (3 not in {3: 5})", "x", True
yield self.st, "t = True; x = not (t is True)", "x", False
yield self.st, "t = True; x = not (t is False)", "x", True
yield self.st, "t = True; x = not (t is None)", "x", True
yield self.st, "n = None; x = not (n is True)", "x", True
yield self.st, "n = None; x = not (n is False)", "x", True
yield self.st, "n = None; x = not (n is None)", "x", False
yield self.st, "t = True; x = not (t is not True)", "x", True
yield self.st, "t = True; x = not (t is not False)", "x", False
yield self.st, "t = True; x = not (t is not None)", "x", False
yield self.st, "n = None; x = not (n is not True)", "x", False
yield self.st, "n = None; x = not (n is not False)", "x", False
yield self.st, "n = None; x = not (n is not None)", "x", True
def test_multiexpr(self):
yield self.st, "z = 2+3; x = y = z", "x,y,z", (5,5,5)
def test_imports(self):
import os
yield self.st, "import sys", "sys.__name__", "sys"
yield self.st, "import sys as y", "y.__name__", "sys"
yield (self.st, "import sys, os",
"sys.__name__, os.__name__", ("sys", "os"))
yield (self.st, "import sys as x, os.path as y",
"x.__name__, y.__name__", ("sys", os.path.__name__))
yield self.st, 'import os.path', "os.path.__name__", os.path.__name__
yield (self.st, 'import os.path, sys',
"os.path.__name__, sys.__name__", (os.path.__name__, "sys"))
yield (self.st, 'import sys, os.path as osp',
"osp.__name__, sys.__name__", (os.path.__name__, "sys"))
yield (self.st, 'import os.path as osp',
"osp.__name__", os.path.__name__)
yield (self.st, 'from os import path',
"path.__name__", os.path.__name__)
yield (self.st, 'from os import path, sep',
"path.__name__, sep", (os.path.__name__, os.sep))
yield (self.st, 'from os import path as p',
"p.__name__", os.path.__name__)
yield (self.st, 'from os import *',
"path.__name__, sep", (os.path.__name__, os.sep))
yield (self.st, '''
class A(object):
def m(self):
from __foo__.bar import x
try:
A().m()
except ImportError, e:
msg = str(e)
''', "msg", "No module named __foo__")
def test_if_stmts(self):
yield self.st, "a = 42\nif a > 10: a += 2", "a", 44
yield self.st, "a=5\nif 0: a=7", "a", 5
yield self.st, "a=5\nif 1: a=7", "a", 7
yield self.st, "a=5\nif a and not not (a<10): a=7", "a", 7
yield self.st, """
lst = []
for a in range(10):
if a < 3:
a += 20
elif a > 3 and a < 8:
a += 30
else:
a += 40
lst.append(a)
""", "lst", [20, 21, 22, 43, 34, 35, 36, 37, 48, 49]
yield self.st, """
lst = []
for a in range(10):
b = (a & 7) ^ 1
if a or 1 or b: lst.append('A')
if a or 0 or b: lst.append('B')
if a and 1 and b: lst.append('C')
if a and 0 and b: lst.append('D')
if not (a or 1 or b): lst.append('-A')
if not (a or 0 or b): lst.append('-B')
if not (a and 1 and b): lst.append('-C')
if not (a and 0 and b): lst.append('-D')
if (not a) or (not 1) or (not b): lst.append('A')
if (not a) or (not 0) or (not b): lst.append('B')
if (not a) and (not 1) and (not b): lst.append('C')
if (not a) and (not 0) and (not b): lst.append('D')
""", "lst", ['A', 'B', '-C', '-D', 'A', 'B', 'A', 'B', '-C',
'-D', 'A', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'C', '-D', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'C', '-D', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'C', '-D', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'-C', '-D', 'A', 'B']
def test_docstrings(self):
for source, expected in [
('''def foo(): return 1''', None),
('''class foo: pass''', None),
('''foo = lambda: 4''', None),
('''foo = lambda: "foo"''', None),
('''def foo(): 4''', None),
('''class foo: "foo"''', "foo"),
('''def foo():
"""foo docstring"""
return 1
''', "foo docstring"),
('''def foo():
"""foo docstring"""
a = 1
"""bar"""
return a
''', "foo docstring"),
('''def foo():
"""doc"""; print 1
a=1
''', "doc"),
('''
class Foo(object): pass
foo = Foo()
exec "'moduledoc'" in foo.__dict__
''', "moduledoc"),
]:
yield self.simple_test, source, "foo.__doc__", expected
def test_in(self):
yield self.st, "n = 5; x = n in [3,4,5]", 'x', True
yield self.st, "n = 5; x = n in [3,4,6]", 'x', False
yield self.st, "n = 5; x = n in [3,4,n]", 'x', True
yield self.st, "n = 5; x = n in [3,4,n+1]", 'x', False
yield self.st, "n = 5; x = n in (3,4,5)", 'x', True
yield self.st, "n = 5; x = n in (3,4,6)", 'x', False
yield self.st, "n = 5; x = n in (3,4,n)", 'x', True
yield self.st, "n = 5; x = n in (3,4,n+1)", 'x', False
def test_for_loops(self):
yield self.st, """
total = 0
for i in [2, 7, 5]:
total += i
""", 'total', 2 + 7 + 5
yield self.st, """
total = 0
for i in (2, 7, 5):
total += i
""", 'total', 2 + 7 + 5
yield self.st, """
total = 0
for i in [2, 7, total+5]:
total += i
""", 'total', 2 + 7 + 5
yield self.st, "x = sum([n+2 for n in [6, 1, 2]])", 'x', 15
yield self.st, "x = sum([n+2 for n in (6, 1, 2)])", 'x', 15
yield self.st, "k=2; x = sum([n+2 for n in [6, 1, k]])", 'x', 15
yield self.st, "k=2; x = sum([n+2 for n in (6, 1, k)])", 'x', 15
yield self.st, "x = sum(n+2 for n in [6, 1, 2])", 'x', 15
yield self.st, "x = sum(n+2 for n in (6, 1, 2))", 'x', 15
yield self.st, "k=2; x = sum(n+2 for n in [6, 1, k])", 'x', 15
yield self.st, "k=2; x = sum(n+2 for n in (6, 1, k))", 'x', 15
def test_closure(self):
decl = py.code.Source("""
def make_adder(n):
def add(m):
return n + m
return add
""")
decl = str(decl) + "\n"
yield self.st, decl + "x = make_adder(40)(2)", 'x', 42
decl = py.code.Source("""
def f(a, g, e, c):
def b(n, d):
return (a, c, d, g, n)
def f(b, a):
return (a, b, c, g)
return (a, g, e, c, b, f)
A, G, E, C, B, F = f(6, 2, 8, 5)
A1, C1, D1, G1, N1 = B(7, 3)
A2, B2, C2, G2 = F(1, 4)
""")
decl = str(decl) + "\n"
yield self.st, decl, 'A,A1,A2,B2,C,C1,C2,D1,E,G,G1,G2,N1', \
(6,6 ,4 ,1 ,5,5 ,5 ,3 ,8,2,2 ,2 ,7 )
decl = py.code.Source("""
def f((a, b)):
def g((c, d)):
return (a, b, c, d)
return g
x = f((1, 2))((3, 4))
""")
decl = str(decl) + "\n"
yield self.st, decl, 'x', (1, 2, 3, 4)
def test_closure_error(self):
source = """if 1:
def f(a):
del a
def x():
a
"""
with py.test.raises(SyntaxError) as excinfo:
self.run(source)
msg = excinfo.value.msg
assert msg == "Can't delete variable used in nested scopes: 'a'"
def test_try_except_finally(self):
yield self.simple_test, """
try:
x = 5
try:
if x > 2:
raise ValueError
finally:
x += 1
except ValueError:
x *= 7
""", 'x', 42
def test_while_loop(self):
yield self.simple_test, """
comments = [42]
comment = '# foo'
while comment[:1] == '#':
comments[:0] = [comment]
comment = ''
""", 'comments', ['# foo', 42]
yield self.simple_test, """
while 0:
pass
else:
x = 1
""", "x", 1
def test_return_lineno(self):
# the point of this test is to check that there is no code associated
# with any line greater than 4.
# The implict return will have the line number of the last statement
# so we check that that line contains exactly the implicit return None
yield self.simple_test, """\
def ireturn_example(): # line 1
global b # line 2
if a == b: # line 3
b = a+1 # line 4
else: # line 5
if 1: pass # line 6
import dis
co = ireturn_example.func_code
linestarts = list(dis.findlinestarts(co))
addrreturn = linestarts[-1][0]
x = [addrreturn == (len(co.co_code) - 4)]
x.extend([lineno for addr, lineno in linestarts])
""", 'x', [True, 3, 4, 6]
def test_type_of_constants(self):
yield self.simple_test, "x=[0, 0L]", 'type(x[1])', long
yield self.simple_test, "x=[(1,0), (1,0L)]", 'type(x[1][1])', long
yield self.simple_test, "x=['2?-', '2?-']", 'id(x[0])==id(x[1])', True
def test_pprint(self):
# a larger example that showed a bug with jumps
# over more than 256 bytes
decl = py.code.Source("""
def _safe_repr(object, context, maxlevels, level):
typ = type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level > maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in object.iteritems():
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ', '.join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level > maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ', '.join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
""")
decl = str(decl) + '\n'
g = {}
exec decl in g
expected = g['_safe_repr']([5], {}, 3, 0)
yield self.st, decl + 'x=_safe_repr([5], {}, 3, 0)', 'x', expected
def test_mapping_test(self):
decl = py.code.Source("""
class X(object):
reference = {1:2, "key1":"value1", "key2":(1,2,3)}
key, value = reference.popitem()
other = {key:value}
key, value = reference.popitem()
inmapping = {key:value}
reference[key] = value
def _empty_mapping(self):
return {}
_full_mapping = dict
def assertEqual(self, x, y):
assert x == y
failUnlessRaises = staticmethod(raises)
def assert_(self, x):
assert x
def failIf(self, x):
assert not x
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = self.other.keys()[0]
self.failUnlessRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#has_key
for k in self.reference:
self.assert_(d.has_key(k))
self.assert_(k in d)
for k in self.other:
self.failIf(d.has_key(k))
self.failIf(k in d)
#cmp
self.assertEqual(cmp(p,p), 0)
self.assertEqual(cmp(d,d), 0)
self.assertEqual(cmp(p,d), -1)
self.assertEqual(cmp(d,p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assert_(hasattr(iter, 'next'))
self.assert_(hasattr(iter, '__iter__'))
x = list(iter)
self.assert_(set(x)==set(lst)==set(ref))
check_iterandlist(d.iterkeys(), d.keys(), self.reference.keys())
check_iterandlist(iter(d), d.keys(), self.reference.keys())
check_iterandlist(d.itervalues(), d.values(), self.reference.values())
check_iterandlist(d.iteritems(), d.items(), self.reference.items())
#get
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.failIf(knownkey in d)
return 42
""")
decl = str(decl) + '\n'
yield self.simple_test, decl + 'r = test_read(X())', 'r', 42
def test_stack_depth_bug(self):
decl = py.code.Source("""
class A:
def initialize(self):
# install all the MultiMethods into the space instance
if isinstance(mm, object):
def make_boundmethod(func=func):
def boundmethod(*args):
return func(self, *args)
r = None
""")
decl = str(decl) + '\n'
yield self.simple_test, decl, 'r', None
def test_indentation_error(self):
source = py.code.Source("""
x
y
""")
try:
self.simple_test(source, None, None)
except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
def test_no_indent(self):
source = py.code.Source("""
def f():
xxx
""")
try:
self.simple_test(source, None, None)
except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
def test_indent_error_filename(self):
source = py.code.Source("""
def f():
x
y
""")
try:
self.simple_test(source, None, None)
except IndentationError as e:
assert e.filename == '<test>'
else:
raise Exception("DID NOT RAISE")
def test_kwargs_last(self):
py.test.raises(SyntaxError, self.simple_test, "int(base=10, '2')",
None, None)
def test_crap_after_starargs(self):
source = "call(*args, *args)"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_not_a_name(self):
source = "call(a, b, c, 3=3)"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_assignment_to_call_func(self):
source = "call(a, b, c) = 3"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_augassig_to_sequence(self):
source = "a, b += 3"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_broken_setups(self):
source = """if 1:
try:
break
finally:
pass
"""
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_unpack_singletuple(self):
source = """if 1:
l = []
for x, in [(1,), (2,)]:
l.append(x)
"""
self.simple_test(source, 'l', [1, 2])
def test_unpack_wrong_stackeffect(self):
source = """if 1:
l = [1, 2]
a, b = l
a, b = l
a, b = l
a, b = l
a, b = l
a, b = l
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 2
def test_stackeffect_bug3(self):
source = """if 1:
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 3
def test_stackeffect_bug4(self):
source = """if 1:
with a: pass
with a: pass
with a: pass
with a: pass
with a: pass
with a: pass
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 4
def test_stackeffect_bug5(self):
source = """if 1:
a[:]; a[:]; a[:]; a[:]; a[:]; a[:]
a[1:]; a[1:]; a[1:]; a[1:]; a[1:]; a[1:]
a[:2]; a[:2]; a[:2]; a[:2]; a[:2]; a[:2]
a[1:2]; a[1:2]; a[1:2]; a[1:2]; a[1:2]; a[1:2]
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 3
def test_stackeffect_bug6(self):
source = """if 1:
{1}; {1}; {1}; {1}; {1}; {1}; {1}
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 1
def test_stackeffect_bug7(self):
source = '''def f():
for i in a:
return
'''
code = compile_with_astcompiler(source, 'exec', self.space)
def test_lambda(self):
yield self.st, "y = lambda x: x", "y(4)", 4
def test_backquote_repr(self):
yield self.st, "x = None; y = `x`", "y", "None"
def test_deleting_attributes(self):
test = """if 1:
class X():
x = 3
del X.x
try:
X.x
except AttributeError:
pass
else:
raise AssertionError("attribute not removed")"""
yield self.st, test, "X.__name__", "X"
def test_lots_of_loops(self):
source = "for x in y: pass\n" * 1000
compile_with_astcompiler(source, 'exec', self.space)
def test_assign_to_empty_list_1(self):
source = """if 1:
for i in range(5):
del []
[] = ()
[] = []
[] = [] = []
ok = 1
"""
self.simple_test(source, 'ok', 1)
def test_assign_to_empty_list_2(self):
source = """if 1:
for i in range(5):
try: [] = 1, 2, 3
except ValueError: pass
else: raise AssertionError
try: [] = a = 1
except TypeError: pass
else: raise AssertionError
try: [] = _ = iter(['foo'])
except ValueError: pass
else: raise AssertionError
try: [], _ = iter(['foo']), 1
except ValueError: pass
else: raise AssertionError
ok = 1
"""
self.simple_test(source, 'ok', 1)
@py.test.mark.parametrize('expr, result', [
("f1.__doc__", None),
("f2.__doc__", 'docstring'),
("f2()", 'docstring'),
("f3.__doc__", None),
("f3()", 'bar'),
("C1.__doc__", None),
("C2.__doc__", 'docstring'),
("C3.field", 'not docstring'),
("C4.field", 'docstring'),
("C4.__doc__", 'docstring'),
("C4.__doc__", 'docstring'),
("__doc__", None),])
def test_remove_docstring(self, expr, result):
source = '"module_docstring"\n' + """if 1:
def f1():
'docstring'
def f2():
'docstring'
return 'docstring'
def f3():
'foo'
return 'bar'
class C1():
'docstring'
class C2():
__doc__ = 'docstring'
class C3():
field = 'not docstring'
class C4():
'docstring'
field = 'docstring'
"""
code_w = compile_with_astcompiler(source, 'exec', self.space)
code_w.remove_docstrings(self.space)
dict_w = self.space.newdict();
code_w.exec_code(self.space, dict_w, dict_w)
self.check(dict_w, expr, result)
def test_assert_skipping(self):
space = self.space
mod = space.getbuiltinmodule('__pypy__')
w_set_debug = space.getattr(mod, space.wrap('set_debug'))
space.call_function(w_set_debug, space.w_False)
source = """if 1:
assert False
"""
try:
self.run(source)
finally:
space.call_function(w_set_debug, space.w_True)
def test_dont_fold_equal_code_objects(self):
yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float
yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()",
'repr(x)', '(0.0, -0.0)')
class AppTestCompiler:
def setup_class(cls):
cls.w_maxunicode = cls.space.wrap(sys.maxunicode)
def test_docstring_not_loaded(self):
import StringIO, dis, sys
ns = {}
exec "def f():\n 'hi'" in ns
f = ns["f"]
save = sys.stdout
sys.stdout = output = StringIO.StringIO()
try:
dis.dis(f)
finally:
sys.stdout = save
assert "0 ('hi')" not in output.getvalue()
def test_print_to(self):
exec """if 1:
from StringIO import StringIO
s = StringIO()
print >> s, "hi", "lovely!"
assert s.getvalue() == "hi lovely!\\n"
s = StringIO()
print >> s, "hi", "lovely!",
assert s.getvalue() == "hi lovely!"
""" in {}
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
@py.test.mark.skipif('config.option.runappdirect')
def test_build_list_from_arg_length_hint(self):
hint_called = [False]
class Foo(object):
def __length_hint__(self):
hint_called[0] = True
return 5
def __iter__(self):
for i in range(5):
yield i
l = [a for a in Foo()]
assert hint_called[0]
assert l == list(range(5))
def test_unicode_in_source(self):
import sys
d = {}
exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d
if sys.maxunicode > 65535 and self.maxunicode > 65535:
expected_length = 1
else:
expected_length = 2
assert len(d['u']) == expected_length
class TestOptimizations:
def count_instructions(self, source):
code, blocks = generate_function_code(source, self.space)
instrs = []
for block in blocks:
instrs.extend(block.instructions)
print instrs
counts = {}
for instr in instrs:
counts[instr.opcode] = counts.get(instr.opcode, 0) + 1
return counts
def test_elim_jump_to_return(self):
source = """def f():
return true_value if cond else false_value
"""
counts = self.count_instructions(source)
assert ops.JUMP_FORWARD not in counts
assert ops.JUMP_ABSOLUTE not in counts
assert counts[ops.RETURN_VALUE] == 2
def test_const_fold_subscr(self):
source = """def f():
return (0, 1)[0]
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
source = """def f():
return (0, 1)[:2]
"""
# Just checking this doesn't crash out
self.count_instructions(source)
def test_const_fold_unicode_subscr(self, monkeypatch):
source = """def f():
return u"abc"[0]
"""
counts = self.count_instructions(source)
if 0: # xxx later?
assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
# getitem outside of the BMP should not be optimized
source = """def f():
return u"\U00012345"[0]
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_CONST: 2, ops.BINARY_SUBSCR: 1,
ops.RETURN_VALUE: 1}
source = """def f():
return u"\U00012345abcdef"[3]
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_CONST: 2, ops.BINARY_SUBSCR: 1,
ops.RETURN_VALUE: 1}
monkeypatch.setattr(optimize, "MAXUNICODE", 0xFFFF)
source = """def f():
return u"\uE01F"[0]
"""
counts = self.count_instructions(source)
if 0: # xxx later?
assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
monkeypatch.undo()
# getslice is not yet optimized.
# Still, check a case which yields the empty string.
source = """def f():
return u"abc"[:0]
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_CONST: 2, ops.SLICE+2: 1,
ops.RETURN_VALUE: 1}
def test_remove_dead_code(self):
source = """def f(x):
return 5
x += 1
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_CONST:1, ops.RETURN_VALUE: 1}
def test_remove_dead_jump_after_return(self):
source = """def f(x, y, z):
if x:
return y
else:
return z
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_FAST: 3,
ops.POP_JUMP_IF_FALSE: 1,
ops.RETURN_VALUE: 2}
def test_remove_dead_yield(self):
source = """def f(x):
return
yield 6
"""
counts = self.count_instructions(source)
assert counts == {ops.LOAD_CONST:1, ops.RETURN_VALUE: 1}
#
space = self.space
w_generator = space.appexec([], """():
d = {}
exec '''def f(x):
return
yield 6
''' in d
return d['f'](5)
""")
assert 'generator' in space.str_w(space.repr(w_generator))
def test_list_comprehension(self):
source = "def f(): [i for i in l]"
source2 = "def f(): [i for i in l for j in l]"
source3 = "def f(): [i for i in l if i]"
counts = self.count_instructions(source)
assert ops.BUILD_LIST not in counts
assert counts[ops.BUILD_LIST_FROM_ARG] == 1
counts = self.count_instructions(source2)
assert counts[ops.BUILD_LIST] == 1
assert ops.BUILD_LIST_FROM_ARG not in counts
counts = self.count_instructions(source3)
assert counts[ops.BUILD_LIST] == 1
assert ops.BUILD_LIST_FROM_ARG not in counts
def test_folding_of_list_constants(self):
for source in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
'a in [1,2,3]',
'a not in ["a","b","c"]',
'a in [None, 1, None]',
'a not in [(1, 2), 3, 4]',
):
source = 'def f(): %s' % source
counts = self.count_instructions(source)
assert ops.BUILD_LIST not in counts
assert ops.LOAD_CONST in counts
def test_folding_of_set_constants(self):
for source in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
'a in {1,2,3}',
'a not in {"a","b","c"}',
'a in {None, 1, None}',
'a not in {(1, 2), 3, 4}',
'a in {1, 2, 3, 3, 2, 1}',
):
source = 'def f(): %s' % source
counts = self.count_instructions(source)
assert ops.BUILD_SET not in counts
assert ops.LOAD_CONST in counts
def test_dont_fold_huge_powers(self):
for source in (
"2 ** 3000", # not constant-folded: too big
"(-2) ** 3000",
):
source = 'def f(): %s' % source
counts = self.count_instructions(source)
assert ops.BINARY_POWER in counts
for source in (
"2 ** 2000", # constant-folded
"2 ** -3000",
"1.001 ** 3000",
"1 ** 3000.0",
):
source = 'def f(): %s' % source
counts = self.count_instructions(source)
assert ops.BINARY_POWER not in counts
| StarcoderdataPython |
6634844 |
import json
import subprocess
import csv
import operator
import ld_helpers
import pprint
import mpu_helpers
DEFAULT_RAM_SECTIONS = ['.data', '.bss', '._user_heap_stack',
'.stack', '.hexbox_rt_ram']
DEFAULT_FLASH_SECTIONS = ['.isr_vector', '.rodata', '.ARM.extab',
'.ARM', '.text', '.hexbox_rt_code']
DEFAULT_STACK_REGION = '.stack'
FLASH_ADDR = 0x08000000
FLASH_SIZE = 1*1024*1024
RAM_ADDR = 0x20000000
RAM_SIZE = 128*1024
'''
.isr_vector 392 134217728
.text 10536 134218120
.rodata 448 134228656
.ARM.extab 24 134229104
.ARM 200 134229128
.data 1096 536870912
.hexbox_text_0 512688 134229328
.hexbox_text_1 196 134742016
.hexbox_text_2 938 134742212
.ccmram 0 268435456
.bss 72 536872008
._user_heap_stack 1536 536872080
'''
def parse_size_data(size_output_str):
data=size_output_str.split('\n')
print data
size_data ={}
for line in data[2:]:# Skip first two lines
fields = line.split()
if len(fields) != 3:
break
name = fields[0]
if name.endswith("_bss"):
name = name[:-4]
elif name.endswith("_data"):
name = name[:-5]
size = int(fields[1])
if size >= 0:
if size == 0:
size = 0
elif size <32:
size = 32
if size_data.has_key(name):
size_data[name]['size'] += size
else:
size_data[name] = {'size':size,'addr':int(fields[2])}
return size_data
def get_section_sizes(object_filename):
#arm-none-eabi-size -A -d bin/temp.obj
cmd =['arm-none-eabi-size', '-A','-d']
cmd.append(object_filename)
stdout = subprocess.check_output(cmd)
size_data = parse_size_data(stdout)
return size_data
def get_default_ram_sections(size_data):
size = 0
for name in DEFAULT_RAM_SECTIONS:
size += size_data[name]['size']
return size
def get_default_flash_sections(size_data):
size = 0
for name in DEFAULT_FLASH_SECTIONS:
try:
size += size_data[name]['size']
except KeyError:
print "Default section not present in final binary"
pass
return size
def create_flash_linker_string(flash_sorted_sections):
'''
Creates the linker string for all the flash regions. Also returns a
dictionary of the linker sections, with their start addr and sizes
'''
flash_regions ={}
str_list = []
hexbox_code = ld_helpers.get_hexbox_rt_code_region()
str_list.extend(hexbox_code)
last_flash_addr = FLASH_ADDR + FLASH_SIZE
for section in flash_sorted_sections:
name = section[0]
size = ld_helpers.next_power_2(section[1])
last_flash_addr = last_flash_addr - size
addr = last_flash_addr
flash_regions[name] = {'addr':addr,'size':size}
str_list.extend(ld_helpers.set_code_sections(name,addr,size))
text_str = "\n".join(str_list)
return text_str, flash_regions
def create_ram_linker_string(ram_sorted_sections):
ram_regions = {}
hexbox_data = ld_helpers.get_hexbox_rt_data_region()
str_list = []
str_list.extend(hexbox_data)
last_ram_addr = RAM_ADDR + RAM_SIZE
for section in ram_sorted_sections:
name = section[0]
size = ld_helpers.next_power_2(section[1])
last_ram_addr = last_ram_addr - size
addr = last_ram_addr
ram_regions[name] = {'addr':addr,'size':size}
str_list.extend(ld_helpers.set_ram_sections(name,addr,size))
data_str = "\n".join(str_list)
return data_str,ram_regions
def get_sorted_sections(size_data):
hexbox_flash_sections = {}
hexbox_ram_sections = {}
for name,data in size_data.items():
addr = data['addr']
aligned_size = ld_helpers.next_power_2(data['size'])
if addr >= FLASH_ADDR and addr < (FLASH_ADDR + FLASH_SIZE):
if name not in DEFAULT_FLASH_SECTIONS:
hexbox_flash_sections[name] = aligned_size
elif addr >= RAM_ADDR and addr < (RAM_ADDR + RAM_SIZE):
if name not in DEFAULT_RAM_SECTIONS:
hexbox_ram_sections[name] = aligned_size
flash_sorted_sections = sorted(hexbox_flash_sections.items(), key=operator.itemgetter(1),reverse=True)
ram_sorted_sections = sorted(hexbox_ram_sections.items(), key=operator.itemgetter(1), reverse=True)
return (flash_sorted_sections , ram_sorted_sections)
def get_default_region(policy,size_data,size=8):
default_conf ={
"Attrs":[0] * size,
"Addrs":[0] * size
}
policy["MPU_CONFIG"]["__hexbox_default"]=default_conf
get_base_mpu_regions(policy["MPU_CONFIG"]["__hexbox_default"],size_data)
def get_mpu_config(policy,ld_regions,size_data):
'''
Builds the mpu configs for each of the compartments, these are assigned to
regions MPU_R3-MPU_R6
'''
size = policy["NUM_MPU_REGIONS"]
get_default_region(policy,size_data)
for region in ld_regions:
#for comp_name,comp_data in policy['Compartments'].items():
print region
if not policy["Compartments"].has_key(region):
continue
print "Getting MPU Config: ", region
comp_data = policy["Compartments"][region]
MPU_config = policy['MPU_CONFIG'][region]
get_base_mpu_regions(MPU_config, size_data)
#Code Region
size = ld_regions[region]['size']
addr = ld_regions[region]['addr']
# This compartment code region
mpu_helpers.encode_mpu_region(MPU_config, 3,addr,size,'FLASH-XR')
n = 4
comp_data = policy["Compartments"][region]
for data_name in comp_data['Data']:
size = ld_regions[data_name]['size']
addr = ld_regions[data_name]['addr']
mpu_helpers.encode_mpu_region(MPU_config,n,addr,size,'RAM-RW')
n += 1
for p_data in comp_data['Peripherals']:
size = 2**p_data['Size']
addr = p_data['Addr']
mpu_helpers.encode_mpu_region(MPU_config,n,addr,size,'PER-RW')
n += 1
for i in range(n,8):
mpu_helpers.disable_region(MPU_config,n)
#if n >8:
# print "To many regions in policy file for compartment: ",comp_data
# quit(-1)
def get_base_mpu_regions(MPU_config,size_data):
default_flash_size = get_default_flash_sections(size_data)
default_ram_size = get_default_ram_sections(size_data)
flash_pow_size = ld_helpers.next_power_2(default_flash_size);
ram_pow_size = ld_helpers.next_power_2(default_ram_size);
mpu_helpers.encode_mpu_region(MPU_config,0,0,4*1024*1024*1024,'RAM-RO',[0,7])
mpu_helpers.encode_mpu_region(MPU_config,1,FLASH_ADDR,flash_pow_size,'FLASH-XR')
#mpu_helpers.encode_mpu_region(MPU_config,2,RAM_ADDR,ram_pow_size,'RAM-RW')
stack_pwr_2 = ld_helpers.next_power_2(size_data[DEFAULT_STACK_REGION]['size'])
stack_addr = size_data[DEFAULT_STACK_REGION]['addr']
# print "Stack Base Addr 0x%08x" % stack_addr
mpu_helpers.encode_mpu_region(MPU_config,2,stack_addr,stack_pwr_2,'RAM-RW')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-o','--object',dest='object_filename',required=True,
help='Object File')
parser.add_argument('-t','--templater',dest='template',required=True,
help='Template linker script')
parser.add_argument('-l','--linker_script',dest='ld_script', required=True,
help='Name of output filename')
parser.add_argument('-p','--policy',dest='policy_file', required = True,
help = "Policy file used to derive the permissions for each region")
parser.add_argument('-f','--final_policy',dest='final_policy', required = True,
help = "Copy of Policy file with the permissions specified")
args = parser.parse_args()
size_data = get_section_sizes(args.object_filename)
# print size_data
default_ram_size = get_default_ram_sections(size_data)
# print default_ram_size
default_flash_size = get_default_flash_sections(size_data)
flash_pow_size = ld_helpers.next_power_2(default_flash_size);
ram_pow_size = ld_helpers.next_power_2(default_ram_size);
# print default_flash_size
(flash_sorted_sections , ram_sorted_sections) = get_sorted_sections(size_data)
# print flash_sorted_sections
# print ram_sorted_sections
text_str,flash_regions = create_flash_linker_string(flash_sorted_sections)
data_str,ram_regions = create_ram_linker_string(ram_sorted_sections)
ld_helpers.write_linker(args.template, args.ld_script, text_str, data_str)
with open(args.policy_file,'rb') as in_policy:
policy = json.load(in_policy)
ld_regions = flash_regions.copy()
ld_regions.update(ram_regions)
get_mpu_config(policy,ld_regions,size_data)
with open(args.final_policy,'wb') as out_file:
json.dump(policy,out_file,sort_keys=True, indent=4, separators=(',', ': '))
| StarcoderdataPython |
9691271 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uses ontology universe to validate parsed instance data."""
from __future__ import print_function
from validate import connection
from validate import field_translation
from validate import link
from yamlformat.validator import findings_lib
TRANSLATION_COMPLIANT = 'COMPLIANT'
ID_KEY = 'id'
TYPE_KEY = 'type'
LINKS_KEY = 'links'
TRANSLATION_KEY = 'translation'
CONNECTIONS_KEY = 'connections'
PRESENT_VALUE = 'present_value'
POINTS = 'points'
UNITS_KEY = 'units'
UNIT_VALUES_KEY = 'unit_values'
VALUES_KEY = 'values'
STATES_KEY = 'states'
# TODO(nkilmer): move parsing and validation logic in this class into subclasses
class EntityInstance(findings_lib.Findings):
"""Uses information from the generated ontology universe to validate
an entity instance. An entity instance is composed of at least an id and a
type. For example: {'id': 'FACILITIES/12345', 'type': 'FACILITIES/BUILDING'}.
Args:
entity_yaml: parsed instance YAML file formatted as dictionary
"""
def __init__(self, entity_yaml):
super().__init__()
self.id = None
if ID_KEY in entity_yaml.keys():
self.id = entity_yaml[ID_KEY]
self.namespace, self.type_name = None, None
if TYPE_KEY in entity_yaml.keys():
self.namespace, self.type_name = self._ParseTypeString(
entity_yaml[TYPE_KEY])
self.translation = None
if TRANSLATION_KEY in entity_yaml.keys():
self.translation = self._ParseTranslation(
entity_yaml[TRANSLATION_KEY])
self.connections = None
if CONNECTIONS_KEY in entity_yaml.keys():
self.connections = self._ParseConnections(
entity_yaml[CONNECTIONS_KEY])
self.links = None
if LINKS_KEY in entity_yaml.keys():
self.links = self._ParseLinks(entity_yaml[LINKS_KEY])
def _ParseTypeString(self, type_str):
"""Parses an entity type string into a namespace and type name.
Args:
type_str: entity type string from YAML
Returns:
Type namespace string
Type name string
"""
type_parse = type_str.split('/')
if len(type_parse) == 1:
print('Type improperly formatted, a namespace is missing: '
, type_str)
raise TypeError('Type improperly formatted, a namespace is missing: '
, type_str)
if len(type_parse) > 2:
print('Type improperly formatted: ', type_str)
raise TypeError('Type improperly formatted: ', type_str)
return type_parse[0], type_parse[1]
def _ParseTranslation(self, translation_body):
"""Parses YAML defining the translation of an entity's points.
Args:
translation_body: YAML body for the entity translation
Returns:
A dictionary from field names to FieldTranslation instances
"""
if isinstance(translation_body, str):
return translation_body
translation = {}
# TODO(b/176094783): reuse the tuple from the ontology validator
for std_field_name in translation_body.keys():
if isinstance(translation_body[std_field_name], str):
continue
# TODO(b/176097512): Manually defined non UDMI translations should be
# accepted by the validator
ft = translation_body[std_field_name]
raw_field_name = str(ft[PRESENT_VALUE])\
.replace(PRESENT_VALUE, '')\
.replace(POINTS, '')\
.replace('.', '')
units = dict()
if UNITS_KEY in ft.keys():
units = ft[UNITS_KEY][VALUES_KEY]
elif UNIT_VALUES_KEY in ft.keys():
units = ft[UNIT_VALUES_KEY]
states = dict()
if STATES_KEY in ft.keys():
states = ft[STATES_KEY]
translation[std_field_name] = field_translation.FieldTranslation(
std_field_name, raw_field_name, units, states)
return translation
def _ParseConnections(self, connections_body):
"""Parses YAML defining the connections between an entity and other
entities, which are the sources of the connections.
Args:
connections_body: YAML body for the entity connections
Returns:
A set of Connection instances
"""
connections = set()
for source_entity, connection_type in connections_body.items():
connections.add(connection.Connection(connection_type, source_entity))
return connections
def _ParseLinks(self, links_body):
"""Parses YAML defining the links between the fields of this entity and
other source entities.
Args:
links_body: YAML body for the entity links
Returns:
A set of Link instances
"""
links = set()
for source_entity, field_map in links_body.items():
links.add(link.Link(source_entity, field_map))
return links
def _ValidateType(self, universe):
"""Uses information from the generated ontology universe to validate
an entity's type.
Returns:
Returns boolean for validity of entity type.
"""
if universe.GetEntityTypeNamespace(self.namespace) is None:
print('Invalid namespace: ', self.namespace)
return False
entity_type = universe.GetEntityType(self.namespace, self.type_name)
if entity_type is None:
print('Invalid entity type: ', self.type_name)
return False
elif entity_type.is_abstract:
print('Abstract types cannot be instantiated: ', self.type_name)
return False
return True
def _ValidateTranslation(self, universe):
"""Uses information from the generated ontology universe to validate
an entity's translation if it exists.
Returns:
Returns boolean for validity of entity translation, defaults to True if
translation is not specified.
"""
if self.translation is None:
return True
if isinstance(self.translation, str):
if self.translation == TRANSLATION_COMPLIANT:
return True
else:
print('Invalid translation compliance string: ', self.translation)
return False
is_valid = True
entity_type = universe.GetEntityType(self.namespace, self.type_name)
type_fields = entity_type.GetAllFields()
found_fields = set()
for field_name, ft in self.translation.items():
# TODO(charbull): the key in the dictionary all_fields_dict starts
# with `/`, needs to be cleaned
key_field_name = '/' + field_name
if key_field_name not in type_fields.keys():
print('Field {0} is not defined on the type'.format(field_name))
is_valid = False
continue
found_fields.add(key_field_name)
valid_units = universe.GetUnitsMapByMeasurement(field_name)
if valid_units:
for unit in ft.units.keys():
if unit not in valid_units:
print('Field {0} has an invalid unit: {1}'.format(field_name, unit))
is_valid = False
valid_states = universe.GetStatesByField(field_name)
if valid_states:
for state in ft.states.keys():
if state not in valid_states:
print('Field {0} has an invalid state: {1}'
.format(field_name, state))
is_valid = False
for field_name, field in type_fields.items():
if not field.optional and field_name not in found_fields:
print('Required field {0} is missing from translation'
.format(field_name))
is_valid = False
return is_valid
def _ValidateConnections(self, universe):
"""Uses information from the generated ontology universe to validate
an entity's connections. Connections are not currently generated in the
ontology universe, so this code assumes the contents are a set.
Returns:
Returns boolean for validity of entity connections.
"""
# TODO(nkilmer): validate existence of connection source entities
if universe.connections_universe is None or self.connections is None:
return True
is_valid = True
for conn_inst in self.connections:
if conn_inst.ctype not in universe.connections_universe:
print('Invalid connection type: {0}'.format(conn_inst.ctype))
is_valid = False
return is_valid
def _ValidateLinks(self, universe, entity_instances):
"""Uses information from the generated ontology universe to validate
the links key of an entity.
Args:
universe: ConfigUniverse generated from the ontology
entity_instances: dictionary containing all instances
Returns:
Returns boolean for validity of links key, defaulting to True if the
key is not present.
"""
if self.links is None:
return True
is_valid = True
entity_type = universe.GetEntityType(self.namespace, self.type_name)
type_fields = entity_type.GetAllFields()
found_fields = set()
for link_inst in self.links:
if link_inst.source not in entity_instances.keys():
print('Invalid link source entity name: {0}'.format(link_inst.source))
is_valid = False
continue
src_entity = entity_instances.get(link_inst.source)
src_namespace = src_entity.namespace
src_type_name = src_entity.type_name
src_entity_type = universe.GetEntityType(src_namespace,
src_type_name)
for source_field, target_field in link_inst.field_map.items():
# assumes that the namespace is '' for now
if not entity_type.HasField('/' + target_field):
print('Invalid link target field: ', target_field)
is_valid = False
continue
if not src_entity_type.HasField('/' + source_field):
print('Invalid link source field: ', source_field)
is_valid = False
continue
found_fields.add('/' + target_field)
if not self._ValidateLinkUnitsMatch(universe,
source_field, target_field):
is_valid = False
continue
if not self._ValidateLinkStatesMatch(universe,
source_field, target_field):
is_valid = False
continue
for field_name, field in type_fields.items():
if not field.optional and field_name not in found_fields:
print('Required field {0} is missing from links'.format(field_name))
is_valid = False
return is_valid
def _ValidateLinkUnitsMatch(self, universe, source_field, target_field):
"""Validates that units match between linked source and target fields."""
source_units = universe.GetUnitsMapByMeasurement(source_field)
target_units = universe.GetUnitsMapByMeasurement(target_field)
if source_units != target_units:
print('Unit mismatch in link from {0} to {1}'\
.format(source_field, target_field))
return False
return True
def _ValidateLinkStatesMatch(self, universe, source_field, target_field):
"""Validates that states match between linked source and target fields."""
source_states = universe.GetStatesByField(source_field)
target_states = universe.GetStatesByField(target_field)
if source_states != target_states:
print('State mismatch in link from {0} to {1}'
.format(source_field, target_field))
return False
return True
def IsValidEntityInstance(self, universe=None, entity_instances=None):
"""Uses the generated ontology universe to validate an entity.
Args:
universe: ConfigUniverse generated from the ontology
entity_instances: dictionary containing all entity instances
Returns:
True if the entity is valid
"""
is_valid = True
if self.id is None:
print('Required field not specified: id')
is_valid = False
if self.namespace is None or self.type_name is None:
print('Required field not specified: type')
is_valid = False
if universe is None:
return is_valid
if not self._ValidateType(universe):
is_valid = False
if not self._ValidateTranslation(universe):
is_valid = False
if not self._ValidateConnections(universe):
is_valid = False
if not self._ValidateLinks(universe, entity_instances):
is_valid = False
return is_valid
| StarcoderdataPython |
5153702 | <filename>screen/screen.py
from screens.call_screen import CallScreen
from screens.inital_call_screen import InitialCallScreen
from screens.middle_screen import MiddleScreen
class Screen(object):
"""Class that represents the screen itself and instantiates all subscreens"""
def __init__(self):
self.initial_call_screen = InitialCallScreen(self)
self.call_screen = CallScreen(self)
self.middle_screen = MiddleScreen(self)
self.state = self.initial_call_screen
def change_state(self, action):
"""Return the next screen initials or a error message"""
return self.state.change_state(action)
| StarcoderdataPython |
1858965 | <reponame>weeb-poly/syncplay-proxy<filename>syncplay/ep_proxy.py
import os
import logging
from twisted.internet.endpoints import TCP4ServerEndpoint, SSL4ServerEndpoint
from syncplay.server import SyncplayProxyWSFactory
from syncplay.server import SyncplayProxyTCPFactory
from twisted.internet import reactor
# from autobahn.twisted.choosereactor import install_reactor
# reactor = install_reactor()
def setupTCPFactory(factory, port: int) -> None:
connType = "TCP"
endpoint4 = TCP4ServerEndpoint(reactor, port)
setupEndpoint(endpoint4, factory, "IPv4", connType)
def setupWSFactory(factory, port: int) -> None:
connType = "WS"
if factory.options is not None:
endpoint4 = SSL4ServerEndpoint(reactor, port, factory.options)
else:
endpoint4 = TCP4ServerEndpoint(reactor, port)
setupEndpoint(endpoint4, factory, "IPv4", connType)
def setupEndpoint(endpoint, factory, addrType: str, connType: str) -> None:
def listenFailed(e):
logging.exception(e)
logging.exception(f"{addrType} listening failed ({connType}).")
endpoint.listen(factory).addErrback(listenFailed)
def main():
tcport = os.environ.get('SYNCPLAY_TCP_PORT', None)
wsport = os.environ.get('SYNCPLAY_WS_PORT', None)
host = os.environ.get('SYNCPLAY_HOST', 'syncplay.pl:8997')
tls = os.environ.get('SYNCPLAY_TLS_PATH')
if tcport is not None:
tcp_factory = SyncplayProxyTCPFactory(
tcport,
host,
tls
)
setupTCPFactory(tcp_factory, int(tcport))
if wsport is not None:
ws_factory = SyncplayProxyWSFactory(
wsport,
host,
tls
)
setupWSFactory(ws_factory, int(wsport))
reactor.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1932063 | <filename>spotjuk/src/schemas/create_database.py
import os
os.remove('../database.db')
import sqlite3
conn = sqlite3.connect('../database.db')
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE categories (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
categorie VARCHAR(20) NOT NULL,
cover VARCHAR(50) NOT NULL
);
""")
print("Tabela criada com sucesso!")
# DESCONECTAR DO BANCO DE DADOS
conn.close() | StarcoderdataPython |
8145045 | import datetime
from Bearing import Bearing
class Sensors():
def __init__(self):
self.setAllValues()
############################################ getter -->
def getWindDirection(self):
return self.__windDirection
def getCompassBearing(self):
return self.__compassCourse
def getWinkelgesch(self):
return self.__winkelgesch
def getBattery(self):
return self.__Akku
def getPosition(self):
return self.__Position
def getDateTime(self):
return self.__DateTime
def getCourseMadeGood(self):
return self.__courseMadeGood
def getSpeedOverGround(self):
return self.__speedOverGround
############################################ set all values
def setAllValues(self):
self.__windDirection = self.__setWindDirection()
self.__compassCourse = self.__setCompassBearing()
self.__winkelgesch = self.__setWinkelgesch()
self.__Akku = self.__setBattery()
self.__Position = self.__setPosition()
self.__courseMadeGood = self.__setCourseMadeGood()
self.__speedOverGround = self.__setSpeedOverGround()
self.__DateTime = self.__setDateTime()
############################################ setter (read Sensors) -->
def __setWindDirection(self):
return Bearing(0)
def __setCompassBearing(self):
return Bearing(40)
def __setWinkelgesch(self):
return 2
def __setBattery(self):
return 100
def __setPosition(self):
return [53.570110,9.674878]
def __setDateTime(self):
return datetime.datetime.now()
def __setCourseMadeGood(self):
return Bearing(45)
def __setSpeedOverGround(self):
return 1.8
| StarcoderdataPython |
233917 | n1 = int(input('Input an integer number: '))
n2 = int(input('Input another integer number: '))
print('The sum between {} and {} is equal to: {}'.format(n1, n2, n1 + n2))
| StarcoderdataPython |
106936 | #! /opt/conda/bin/python3
""" File containing keras callback class to collect runstats of the training process """
# Copyright 2018 FAU-iPAT (http://ipat.uni-erlangen.de/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Any, Dict, List
from keras.callbacks import Callback # type: ignore
import keras.backend as backend # type: ignore
_TRunStats = Dict[str, List[float]] # pylint: disable=invalid-name
class RunStatsCallback(Callback):
"""
Callback class to log runstats of the keras model
This class stores the default history of the optimizer plus some
additional values. Those include value for each epoch for:
- Time required
- Base learning rate of the optimizer
At the end of the training those values call be accessed via the
runstats property.
"""
def __init__(self) -> None:
"""
Class initialization
"""
super(RunStatsCallback, self).__init__()
self._runstats = {} # type: _TRunStats
self._epoch_time_start = 0.0
# Allocate epoch and progress info tensors
self._epoch = backend.variable(0.0, dtype='float32', name='RunStatsCallbackEpoch')
self._max_epochs = backend.variable(1.0, dtype='float32', name='RunStatsCallbackMaxEpochs')
self._progress = self._epoch / self._max_epochs
def on_train_begin(self, logs: Dict[str, Any] = None) -> None:
"""
Callback method to setup at beginning of training
:param logs: Log data from keras
"""
self._runstats = {'time': [], 'lr': []}
epochs = self.params['epochs'] if self.params['epochs'] else 1.0
backend.set_value(self._max_epochs, epochs)
backend.set_value(self._epoch, 0.0)
def on_epoch_begin(self, epoch: int, logs: Dict[str, Any] = None) -> None:
"""
Callback method called at beginning of each epoch
:param epoch: Epoch to be started
:param logs: Log data from keras
"""
self._epoch_time_start = time.time()
backend.set_value(self._epoch, epoch)
def on_epoch_end(self, epoch: int, logs: Dict[str, Any] = None) -> None:
"""
Callback method called at the end of each epoch
:param epoch: Epoch to be ended
:param logs: Log data from keras
"""
backend.set_value(self._epoch, epoch+1)
# Store default history data
if logs:
for name in logs:
if name not in self._runstats:
self._runstats[name] = []
self._runstats[name].append(logs[name])
# Additionally store time required
self._runstats['time'].append(time.time() - self._epoch_time_start)
# Additionally store base learning rate of the optimizer
try:
learning_rate = self.model.optimizer.lr
self._runstats['lr'].append(backend.get_value(learning_rate))
except AttributeError:
pass
@property
def runstats(self) -> _TRunStats:
"""
runstats property
:return: runstats dictionary
"""
return self._runstats
@property
def progress(self):
"""
Progress tensor property
:return: progress tensor
"""
return self._progress
| StarcoderdataPython |
1843524 | from dagster import solid
from dagster.core.execution.context.compute import AbstractComputeExecutionContext
from hca_manage.check import CheckManager
from hca_manage.common import ProblemCount
from hca_orchestration.contrib.dagster import short_run_id
@solid(
required_resource_keys={'data_repo_client', 'hca_dataset_operation_config', 'hca_manage_config'}
)
def post_import_validate(context: AbstractComputeExecutionContext) -> ProblemCount:
"""
Checks if the target dataset has any rows with duplicate IDs or null file references.
"""
return CheckManager(
environment=context.resources.hca_manage_config.gcp_env,
project=context.resources.hca_manage_config.google_project_name,
dataset=context.resources.hca_dataset_operation_config.dataset_name,
data_repo_client=context.resources.data_repo_client,
snapshot=False
).check_for_all()
@solid(
required_resource_keys={'slack', 'hca_dataset_operation_config'}
)
def notify_slack_of_egress_validation_results(
context: AbstractComputeExecutionContext,
validation_results: ProblemCount
) -> str:
dataset_name = context.resources.hca_dataset_operation_config.dataset_name
message = construct_validation_message(validation_results, dataset_name, short_run_id(context.run_id))
context.resources.slack.send_message(message)
return message
def construct_validation_message(validation_results: ProblemCount, dataset_name: str, run_id: str) -> str:
if validation_results.has_problems():
message_lines = [
f"Problems identified in post-validation for HCA dataset {dataset_name}:",
"Run ID: " + str(run_id),
"Duplicate lines found: " + str(validation_results.duplicates),
"Null file references found: " + str(validation_results.null_file_refs),
"Dangling project references found: " + str(validation_results.dangling_project_refs),
"Empty links table: " + str(validation_results.empty_links_count),
"Empty projects table: " + str(validation_results.empty_projects_count)
]
else:
message_lines = [
f"HCA dataset {dataset_name} has passed post-validation."
]
message = "\n".join(message_lines)
return message
| StarcoderdataPython |
133489 | '''
We'll put utility functions here - timing decorators, exiting functions, and maths stuff are here atm.
<NAME>
28/10/2019
'''
#------------------------------------------------------------------
import time
from math import sqrt
import sys
from ast import literal_eval as lit
import random
from pathlib import Path
import pandas as pd
import pickle as pkl
from util.message import message
#------------------------------------------------------------------
TIMING_INFO = True
#------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# system functions
#-----------------------------------------------------------------------------------------
def exit(code):
'''
Exit the program, 0 is failure, 1 is success.
'''
if not isinstance(code, int):
message.logError('Exit code must be an interger.')
exit(0)
if code == 0:
message.logError('Exiting program with failure status.')
elif code == 1:
if TIMING_INFO is True:
showTiming()
message.logDebug('Exiting program with success status.')
else:
message.logError('Exiting program with unknown error status ('+str(code)+')')
sys.exit()
#-----------------------------------------------------------------------------------------
# pickle functions
#-----------------------------------------------------------------------------------------
def save(save_this, file_name):
output = open(file_name, 'wb')
pkl.dump(save_this, output)
output.close()
def load(file_name):
pkl_file = open(file_name, 'rb')
obj = pkl.load(pkl_file)
pkl_file.close()
return obj
#-----------------------------------------------------------------------------------------
# timing functions
#-----------------------------------------------------------------------------------------
def timeit(method):
'''
Use as a decorator on methods to print timing information for that call to the log file.
'''
def timed(*args, **kw):
if TIMING_INFO is True:
ts = time.time()
result = method(*args, **kw)
if TIMING_INFO is True:
te = time.time()
message.logTiming(method, te-ts)
return result
return timed
def showTiming():
'''
Show a chart of any timing info stored in the message class, and write it to the log file.
'''
if TIMING_INFO is True:
message.logDebug("Showing average timing info for method instances:", "utilities::showTiming")
for k, v in message.timing.items():
message.logDebug("{0:.2f} (sigma={1:.2f}, total={2:.2f}): {3}".format(mean(v), stdEstimate(v), sum(v), k))
#-----------------------------------------------------------------------------------------
# maths functions
#-----------------------------------------------------------------------------------------
def mean(x):
'''
Returns the mean of the list of numbers.
'''
return sum(x) / (len(x)+0.0)
def stdEstimate(x):
'''
Returns an estimate of the standard deviation of the list of numbers.
'''
meanx = mean(x)
norm = 1./(len(x)+0.0)
y = []
for v in x:
y.append( (v - meanx)*(v - meanx) )
return sqrt(norm * sum(y))
#-----------------------------------------------------------------------------------------
# list functions
#-----------------------------------------------------------------------------------------
def randomSample(lst, num_sample):
"""Will return a randomly selected num_sample elements from lst."""
if not isinstance(lst, list):
message.logError("lst must be a list instance.", "utilities::randomSample")
exit(0)
if not isinstance(num_sample, int):
message.logError("num_sample must be an int instance.", "utilities::randomSample")
exit(0)
return random.sample(lst, num_sample)
#-----------------------------------------------------------------------------------------
# string functions
#-----------------------------------------------------------------------------------------
def parseStr(str):
"""
Converts string representation of a literal to actual literal.
"""
str = str.replace("null", "None").replace("false","False").replace("true","True")
return lit(str)
#-----------------------------------------------------------------------------------------
# date functions
#-----------------------------------------------------------------------------------------
def dateCheck(date, start_date, end_date):
"""
Will check if :param:`date` is between :param:`start_date` and :param:`end_date`.
:return: Boolean.
"""
date = date.split("-")
start_date = start_date.split("-")
if not _dateCheck(start_date, date):
return False
end_date = end_date.split("-")
return _dateCheck(date, end_date)
def _dateCheck(date_1, date_2):
"""
Will return True if date_1 is before or equal to date_2.
Date params are lists with 3 elements, year, month, day.
"""
if date_1[0] < date_2[0]:
return True
if date_1[0] > date_2[0]:
return False
if date_1[1] < date_2[1]:
return True
if date_1[1] > date_2[1]:
return False
if date_1[2] < date_2[2]:
return True
if date_1[2] > date_2[2]:
return False
return True
def merge_events(api_events: list, pd_events: pd.DataFrame) :
# I don't know why this is necessary
pd_events.to_csv("expanded_events.csv", index=False)
pd_events = pd.read_csv("expanded_events.csv")
pd_events = pd_events.rename(columns={"Date": "startDate", \
"TimeStart": "startTimeString", \
"TimeEnd": "endTimeString", \
"Description": "description", \
"Category": "genre", \
"Title": "title", \
"Location": "venue", \
"URL": "webLink"
})
pd_events['headline'] = pd_events['title']
pd_events['startDate'] = pd.to_datetime(pd_events['startDate'])
pd_events['startDate'] = pd_events['startDate'].dt.strftime('%Y-%m-%d')
pd_events["endDate"] = pd_events["startDate"]
pd_events["image"] = "" #"https://theparkcentre.org.uk/wp/wp-content/uploads/2017/07/cropped-logo-small-1.png"
hardcoded_list_of_dicts = list( pd_events.T.to_dict().values() )
for event in hardcoded_list_of_dicts :
event["genre"] = [event["genre"]]
event["image"] = {"url": event["image"]}
return api_events + hardcoded_list_of_dicts
def get_project_root() -> Path:
"""Returns project root folder."""
return Path(__file__).parent.parent | StarcoderdataPython |
3500024 | '''
CLI entry-point for salt-api
'''
# Import python libs
import sys
import logging
# Import salt libs
import salt.utils.verify
from salt.utils.parsers import (
ConfigDirMixIn,
DaemonMixIn,
LogLevelMixIn,
MergeConfigMixIn,
OptionParser,
OptionParserMeta,
PidfileMixin)
# Import salt-api libs
import saltapi.client
import saltapi.config
import saltapi.version
log = logging.getLogger(__name__)
class SaltAPI(OptionParser, ConfigDirMixIn, LogLevelMixIn, PidfileMixin,
DaemonMixIn, MergeConfigMixIn):
'''
The cli parser object used to fire up the salt api system.
'''
__metaclass__ = OptionParserMeta
VERSION = saltapi.version.__version__
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_logfile_ = '/var/log/salt/api'
def setup_config(self):
return saltapi.config.api_config(self.get_config_file_path())
def run(self):
'''
Run the api
'''
self.parse_args()
try:
if self.config['verify_env']:
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith('tcp://') \
and not logfile.startswith('udp://') \
and not logfile.startswith('file://'):
# Logfile is not using Syslog, verify
salt.utils.verify.verify_files(
[logfile], self.config['user']
)
except OSError as err:
log.error(err)
sys.exit(err.errno)
self.setup_logfile_logger()
client = saltapi.client.SaltAPIClient(self.config)
self.daemonize_if_required()
self.set_pidfile()
client.run()
| StarcoderdataPython |
12808033 | <reponame>hellysmile/aiohttp_request
import contextvars
import typing
from functools import partial
from aiohttp import web
from werkzeug.local import LocalProxy
__version__ = '0.0.1'
ctx = contextvars.ContextVar('request') # type: contextvars.ContextVar
class ThreadContext:
__slots__ = ('_ctx', '_fn')
def __init__(self, fn: typing.Callable) -> None:
self._fn = fn
self._ctx = contextvars.copy_context() # type: contextvars.Context
def __call__(self, *args, **kwargs):
return self._ctx.run(partial(self._fn, *args, **kwargs))
def middleware_factory() -> typing.Awaitable:
@web.middleware
async def middleware(
request: web.Request,
handler: typing.Callable,
) -> web.Response:
token = ctx.set(request) # type: contextvars.Token
try:
return await handler(request)
finally:
ctx.reset(token)
return middleware
def get_request() -> web.Request:
return ctx.get()
grequest = LocalProxy(get_request)
| StarcoderdataPython |
380311 | <filename>she-process.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# pip install pandas openpyxl jinja2 faker
import pandas as pd
import pathlib
from jinja2 import Template
from faker import Faker
import sys
# In[2]:
df_names = pd.read_csv('out.csv')
# print(df_names[:4])
names = df_names['en'].sample(n=1, random_state=1)
# print(names.values)
# print(names.values[0])
# In[3]:
# goal: nested lists of form: [index, suffix, birthDate, centre...]
# for now, suffix = name and suffix
fake = Faker()
def genfsh(lang, obs):
for i in range(obs):
tempname = df_names[lang].sample(n=1)
tempname = tempname.values[0]
suffix = lang + str(i)
name = tempname + str(i)
if lang == 'en':
orgname = "Government Hospital"
centre = 'Vaccination Site'
if lang == 'es':
orgname = "Hospital del Gobierno"
centre = "Sitio de vacunación"
if lang == 'fr':
orgname = "Hôpital du gouvernement"
centre = "Site de vaccination"
if lang == 'ar':
orgname = "<NAME>"
centre = "موقع التطعيم"
if lang == 'zh':
orgname = "政府医院"
centre = "疫苗接种现场"
if lang == 'ru':
orgname = "Государственная больница"
centre = "Сайт вакцинации"
x = fake.date_between(start_date='-80y', end_date='-15y')
birthDate = str(x)
identifier = lang + str(9999) + str(i)
# this prints oddly bc of the mix of rtl-ltr langs?
print(lang, suffix, name, birthDate, identifier)
# put through jinja2
path = pathlib.Path('she-template.fsh')
text = path.read_text()
t = Template(text)
msg = t.render(
suffix=suffix,
name=name,
birthDate=birthDate,
identifier=identifier,
orgname=orgname,
centre=centre
)
path_out = pathlib.Path(f"input/fsh/{suffix}.fsh")
path_out.write_text(msg)
# In[4]:
print("command:", str(sys.argv))
langs = ['en', 'es', 'fr', 'ar', 'zh', 'ru']
if sys.argv[1] in langs:
genfsh(str(sys.argv[1]), int(sys.argv[2]))
else:
genfsh('ar', 100)
# In[ ]:
| StarcoderdataPython |
3562866 | <gh_stars>0
# Copyright 2018-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from pysmapi.smapi import *
class Profile_Lock_Query_DM(Request):
def __init__(self,
**kwargs):
super(Profile_Lock_Query_DM, self).__init__(**kwargs)
# Response values
self._locked_type = ""
self._profile_locked_by = ""
self._locked_dev_array = []
@property
def locked_type(self):
return self._locked_type
@locked_type.setter
def locked_type(self, value):
self._locked_type = value
@property
def profile_locked_by(self):
return self._profile_locked_by
@profile_locked_by.setter
def profile_locked_by(self, value):
self._profile_locked_by = value
@property
def locked_dev_array(self):
return self._locked_dev_array
@locked_dev_array.setter
def locked_dev_array(self, value):
self._locked_dev_array = value
def unpack(self, buf):
offset = 0
# lock_info_structure_length (int4)
slen, = struct.unpack("!I", buf[offset:offset + 4])
offset += 4
# locked_type (string,5-6,char26)
# profile_locked_by (string,0-8,char42)
# locked_dev_array_length (int4)
if slen > 0:
self._locked_type, _, self._profile_locked_by = \
b2s(buf[offset:offset + slen]).partition(" ")
offset += slen
# locked_dev_array_length (int4)
alen, = struct.unpack("!I", buf[offset:offset + 4])
offset += 4
# locked_dev_array
self._locked_dev_array = []
while alen > 0:
entry = Obj()
self._locked_dev_array.append(entry)
# dev_address (string,1-4,char16)
# dev_locked_by (string,1-8,char42)
entry.dev_address,
entry.dev_locked_by = struct.unpack("4s8s",
b2s(buf[offset:offset + 12]))
offset += 12
alen -= 12
| StarcoderdataPython |
1788524 | <filename>routemaster/logging/__init__.py
"""Logging plugin subsystem."""
from routemaster.logging.base import BaseLogger
from routemaster.logging.plugins import (
PluginConfigurationException,
register_loggers,
)
from routemaster.logging.split_logger import SplitLogger
from routemaster.logging.python_logger import PythonLogger
__all__ = (
'BaseLogger',
'SplitLogger',
'PythonLogger',
'register_loggers',
'PluginConfigurationException',
)
| StarcoderdataPython |
265428 | # Runners group
runners = ['harry', 'ron', 'harmoine']
our_group = ['mukul']
while runners:
athlete = runners.pop()
print("Adding user: " + athlete.title())
our_group.append(athlete)
print("That's our group:- ")
for our_group in our_group:
print(our_group.title() + " from harry potter!")
Dream_vacation = {}
polling_active = True
while polling_active:
name = input("\nWhat is your name?")
location = input("Which is your dream vacation ?")
Dream_vacation[name] = location
repeat = input("would you like to let another person respond? (yes/ no)")
if repeat == 'no':
polling_active = False
print("\n ---- Poll Results ---- ")
for name, location in Dream_vacation.items():
print(name + " ok you want to go " + location.title() + " !") | StarcoderdataPython |
331416 | <reponame>itzpc/Google-MLCC-NITJ
def main():
choice='z'
if choice == 'a':
print("You chose 'a'.")
elif choice == 'b':
print("You chose 'b'.")
elif choice == 'c':
print("You chose 'c'.")
else:
print("Invalid choice.")
if __name__ == '__main__':
main()
| StarcoderdataPython |
6401396 | from pyppeteer import launch
async def screengrab(url):
browser = await launch({"slowMo": 5}, args=["--no-sandbox"])
page = await browser.newPage()
await page.setViewport(
{"width": 1920, "height": 1080, "deviceScaleFactor": 1}
)
await page.goto(url, waitUntil="networkidle2")
await page.screenshot({"path": "temp.png"})
await browser.close()
| StarcoderdataPython |
6699701 | from typing import List, Iterator
import os
import logging
from ..module import SafeFilenameModule
logger = logging.getLogger(__name__)
class PackageScanner:
"""
Scans a package for all the Python modules within it.
Usage:
package = SafeFilenameModule('mypackage', '/path/to/mypackage/__init__.py')
scanner = PackageScanner(package)
modules = scanner.scan_for_modules()
"""
def __init__(self, package: SafeFilenameModule) -> None:
self.package = package
def scan_for_modules(self) -> List[SafeFilenameModule]:
"""
Returns:
List of module names (list(str, ...)).
"""
package_directory = os.path.dirname(self.package.filename)
modules: List[SafeFilenameModule] = []
for module_filename in self._get_python_files_inside_package(package_directory):
module_name = self._module_name_from_filename(module_filename, package_directory)
modules.append(
SafeFilenameModule(module_name, module_filename)
)
return modules
def _get_python_files_inside_package(self, directory: str) -> Iterator[str]:
"""
Get a list of Python files within the supplied package directory.
Return:
Generator of Python file names.
"""
for root, dirs, files in os.walk(directory):
# Don't include directories that aren't Python packages,
# nor their subdirectories.
if '__init__.py' not in files:
for d in list(dirs):
dirs.remove(d)
continue
# Don't include hidden directories.
dirs_to_remove = [d for d in dirs if self._should_ignore_dir(d)]
for d in dirs_to_remove:
dirs.remove(d)
for filename in files:
if self._is_python_file(filename):
yield os.path.join(root, filename)
def _should_ignore_dir(self, directory: str) -> bool:
# TODO: make this configurable.
# Skip adding directories that are hidden, or look like Django migrations.
return directory.startswith('.') or directory == 'migrations'
def _is_python_file(self, filename: str) -> bool:
"""
Given a filename, return whether it's a Python file.
Args:
filename (str): the filename, excluding the path.
Returns:
bool: whether it's a Python file.
"""
return not filename.startswith('.') and filename.endswith('.py')
def _module_name_from_filename(self, filename_and_path: str, package_directory: str) -> str:
"""
Args:
filename_and_path (string) - the full name of the Python file.
package_directory (string) - the full path of the top level Python package directory.
Returns:
Absolute module name for importing (string).
"""
container_directory, package_name = os.path.split(package_directory)
internal_filename_and_path = filename_and_path[len(package_directory):]
internal_filename_and_path_without_extension = internal_filename_and_path[1:-3]
components = [package_name] + internal_filename_and_path_without_extension.split(os.sep)
if components[-1] == '__init__':
components.pop()
return '.'.join(components)
| StarcoderdataPython |
1629836 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""create and return detail url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='meat'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='paper'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
default = {
"title": "sample recipe",
"time_minutes": 12,
"price": 12.00
}
default.update(params)
return Recipe.objects.create(user=user, **default)
class PublicAPITest(TestCase):
"""Tests that doesn't require authentication"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that any request without authentication fails"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateAPITest(TestCase):
"""Tests that requires authentication"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='<NAME>')
self.client.force_authenticate(user=self.user)
def test_retrieve_recipes(self):
"""Test that retrieving recipes is successful"""
sample_recipe(self.user)
sample_recipe(self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_limited_to_user(self):
"""Test that requests are limited to authenticated user"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
sample_recipe(user=user2, title='Stake')
sample_recipe(self.user)
user_recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(user_recipes, many=True)
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing of recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
serializer = RecipeDetailSerializer(recipe)
url = detail_url(recipe.id)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test that basic recipe is created successfully"""
payload = {
'title': "chocolate cake",
'time_minutes': 30,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipes_with_tags(self):
"""Test creating recipe with tags"""
tag1 = sample_tag(self.user, name='vegan')
tag2 = sample_tag(self.user, name='barbra')
payload = {
'title': 'Lala',
'time_minutes': 7,
'price': 23.50,
'tags': [tag1.id, tag2.id]
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredient(self):
"""Test create recipe with ingredient"""
ingredient1 = sample_ingredient(self.user, 'paper')
ingredient2 = sample_ingredient(self.user, 'salt')
payload = {
'title': 'Maya',
'time_minutes': 7,
'price': 23.50,
'ingredients': [ingredient1.id, ingredient2.id]
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating recipe with patch method"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Asman')
payload = {
'title': 'Chicken soup',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating recipe with put method"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spageti with meat',
'time_minutes': 30,
'price': 100.00,
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
self.assertEqual(len(recipe.tags.all()), 0)
def test_filter_recipes_by_tag(self):
"""Test filtering recipes by tag names"""
recipe1 = sample_recipe(user=self.user, title='recipe1')
recipe2 = sample_recipe(user=self.user, title='recipe2')
recipe3 = sample_recipe(user=self.user, title='recipe3')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
res = self.client.get(
RECIPES_URL,
{'tags': f"{tag1.id}, {tag2.id}"}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test filtering recipes by ingredient"""
recipe1 = sample_recipe(user=self.user, title='recipe1')
recipe2 = sample_recipe(user=self.user, title='recipe2')
recipe3 = sample_recipe(user=self.user, title='recipe3')
ingredient1 = sample_ingredient(user=self.user, name='paper')
ingredient2 = sample_ingredient(user=self.user, name='salt')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
res = self.client.get(
RECIPES_URL,
{'ingredients': f"{ingredient1.id},{ingredient2.id}"}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| StarcoderdataPython |
11348466 | def console(outfile,highlighter):
with open(outfile, 'r') as fin:
for line in fin:
print(line)
for word in line.split():
if 'TAB' in word:
highlighter
| StarcoderdataPython |
1808627 | from datetime import datetime
from functools import lru_cache
from houdini.data import db
class Penguin(db.Model):
__tablename__ = 'penguin'
id = db.Column(db.Integer, primary_key=True, server_default=db.text("nextval('\"penguin_id_seq\"'::regclass)"))
username = db.Column(db.String(12), nullable=False, unique=True)
nickname = db.Column(db.String(30), nullable=False)
password = db.Column(db.CHAR(60), nullable=False)
email = db.Column(db.String(255), nullable=False, index=True)
registration_date = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
active = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
safe_chat = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
last_paycheck = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
minutes_played = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
moderator = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
stealth_moderator = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
character = db.Column(db.ForeignKey('character.id', ondelete='CASCADE', onupdate='CASCADE'))
igloo = db.Column(db.ForeignKey('penguin_igloo_room.id', ondelete='CASCADE', onupdate='CASCADE'))
coins = db.Column(db.Integer, nullable=False, server_default=db.text("500"))
color = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
head = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
face = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
neck = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
body = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
hand = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
feet = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
photo = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
flag = db.Column(db.ForeignKey('item.id', ondelete='CASCADE', onupdate='CASCADE'))
permaban = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
book_modified = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
book_color = db.Column(db.SmallInteger, nullable=False, server_default=db.text("1"))
book_highlight = db.Column(db.SmallInteger, nullable=False, server_default=db.text("1"))
book_pattern = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
book_icon = db.Column(db.SmallInteger, nullable=False, server_default=db.text("1"))
agent_status = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
field_op_status = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
career_medals = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
agent_medals = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
last_field_op = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
com_message_read_date = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
ninja_rank = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
ninja_progress = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
fire_ninja_rank = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
fire_ninja_progress = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
water_ninja_rank = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
water_ninja_progress = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
ninja_matches_won = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
fire_matches_won = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
water_matches_won = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
rainbow_adoptability = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
has_dug = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
puffle_handler = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
nuggets = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
walking = db.Column(db.ForeignKey('penguin_puffle.id', ondelete='CASCADE', onupdate='CASCADE'))
opened_playercard = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
special_wave = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
special_dance = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
special_snowball = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
map_category = db.Column(db.SmallInteger, nullable=False, server_default=db.text("0"))
status_field = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
timer_active = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
timer_start = db.Column(db.Time, nullable=False, server_default=db.text("'00:00:00'::time without time zone"))
timer_end = db.Column(db.Time, nullable=False, server_default=db.text("'23:59:59'::time without time zone"))
timer_total = db.Column(db.Interval, nullable=False, server_default=db.text("'01:00:00'::interval"))
grounded = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
approval_en = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
approval_pt = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
approval_fr = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
approval_es = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
approval_de = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
approval_ru = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
rejection_en = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
rejection_pt = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
rejection_fr = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
rejection_es = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
rejection_de = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
rejection_ru = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
def __init__(self, *args, **kwargs):
self.inventory = None
self.permissions = None
self.igloos = None
self.igloo_rooms = None
self.furniture = None
self.flooring = None
self.locations = None
self.stamps = None
self.cards = None
self.puffles = None
self.puffle_items = None
self.buddies = None
self.buddy_requests = None
self.character_buddies = None
self.ignore = None
super().__init__(*args, **kwargs)
@lru_cache()
def safe_nickname(self, language_bitmask):
return self.nickname if self.approval & language_bitmask else "P" + str(self.id)
async def status_field_set(self, field_bitmask):
if (self.status_field & field_bitmask) == 0:
await self.update(status_field=self.status_field ^ field_bitmask).apply()
def status_field_get(self, field_bitmask):
return (self.status_field & field_bitmask) != 0
@property
@lru_cache()
def age(self):
return (datetime.now() - self.registration_date).days
@property
@lru_cache()
def approval(self):
return int(f'{self.approval_ru * 1}{self.approval_de * 1}0{self.approval_es * 1}'
f'{self.approval_fr * 1}{self.approval_pt * 1}{self.approval_en * 1}', 2)
@property
@lru_cache()
def rejection(self):
return int(f'{self.rejection_ru * 1}{self.rejection_de * 1}0{self.rejection_es * 1}'
f'{self.rejection_fr * 1}{self.rejection_pt * 1}{self.rejection_en * 1}', 2)
class ActivationKey(db.Model):
__tablename__ = 'activation_key'
penguin_id = db.Column(db.ForeignKey('penguin.id', ondelete='CASCADE', onupdate='CASCADE'), primary_key=True,
nullable=False)
activation_key = db.Column(db.CHAR(255), primary_key=True, nullable=False)
class PenguinMembership(db.Model):
__tablename__ = 'penguin_membership'
penguin_id = db.Column(db.ForeignKey('penguin.id', ondelete='CASCADE', onupdate='CASCADE'), primary_key=True,
nullable=False)
start = db.Column(db.DateTime, primary_key=True, nullable=False)
expires = db.Column(db.DateTime)
start_aware = db.Column(db.Boolean, server_default=db.text("false"))
expires_aware = db.Column(db.Boolean, server_default=db.text("false"))
expired_aware = db.Column(db.Boolean, server_default=db.text("false"))
class Login(db.Model):
__tablename__ = 'login'
id = db.Column(db.Integer, primary_key=True, server_default=db.text("nextval('\"login_id_seq\"'::regclass)"))
penguin_id = db.Column(db.ForeignKey('penguin.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
date = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
ip_hash = db.Column(db.CHAR(255), nullable=False)
minutes_played = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
class EpfComMessage(db.Model):
__tablename__ = 'epf_com_message'
message = db.Column(db.Text, nullable=False)
character_id = db.Column(db.ForeignKey('character.id', ondelete='RESTRICT', onupdate='CASCADE'), nullable=False)
date = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
class CfcDonation(db.Model):
__tablename__ = 'cfc_donation'
penguin_id = db.Column(db.ForeignKey('penguin.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
coins = db.Column(db.Integer, nullable=False)
charity = db.Column(db.Integer, nullable=False)
date = db.Column(db.DateTime, nullable=False, server_default=db.text("now()"))
| StarcoderdataPython |
11258102 | # Generated by Django 2.0.4 on 2018-05-10 05:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lock_name', models.CharField(max_length=32, unique=True, verbose_name='Lock Name')),
('worker_name', models.CharField(max_length=32, verbose_name='Worker Name')),
('lock_time', models.DateTimeField(verbose_name='Lock Time')),
('expire_time', models.DateTimeField(verbose_name='Expired Time')),
],
options={
'verbose_name': 'Lock',
'verbose_name_plural': 'Locks',
},
),
]
| StarcoderdataPython |
378929 | import inspect
import pytest
from botocore.stub import Stubber
from AwAws.SharedResources.parameters import Parameters
# set up some simple responses from SSM
@pytest.fixture
def ssm_get_response():
response = {
'Parameter': {
'Type': 'String',
'Name': '/alpha/hostname',
'Value': 'some.hostname.com'
}
}
return response
@pytest.fixture
def ssm_get_all_response():
response = {
'Parameters': [
{
'Type': 'String',
'Name': '/alpha/hostname',
'Value': 'some.hostname.com'
},
{
'Type': 'SecureString',
'Name': '/alpha/password',
'Value': '<PASSWORD>'
},
]
}
return response
@pytest.fixture
def ssm_put_response():
response = {
'Version': 1234
}
return response
def test_init():
params = Parameters(service='test_service')
inspect.isclass(Parameters)
assert params.service == 'test_service'
assert params.region_name is None
assert params.role_arn is None
assert params.tmp_file_loc == '/tmp/awaws_ssm_params'
assert params.ssm is None
def test_get_ssm():
params = Parameters(service='test_service')
params.get_ssm()
assert str(type(params.ssm)) == "<class 'botocore.client.SSM'>"
def test_get_param(ssm_get_response):
params = Parameters(service='alpha')
params.get_ssm()
with Stubber(params.ssm) as stubber:
stubber.add_response('get_parameter', ssm_get_response)
assert params.get_param('hostname')['Value'] == 'some.hostname.com'
def test_get_param_value(ssm_get_response):
params = Parameters(service='alpha')
params.get_ssm()
with Stubber(params.ssm) as stubber:
stubber.add_response('get_parameter', ssm_get_response)
assert params.get_param_value('hostname') == 'some.hostname.com'
def test_get_fail():
params = Parameters(service='omega')
params.get_ssm()
with Stubber(params.ssm) as stubber:
stubber.add_client_error('get_parameter')
with pytest.raises(RuntimeError):
params.get_param('hostname')
def test_get_all(ssm_get_all_response):
params = Parameters(service='test_service')
params.get_ssm()
with Stubber(params.ssm) as stubber:
stubber.add_response('get_parameters_by_path', ssm_get_all_response)
my_params = params.get_all()
assert my_params['hostname']['Value'] == 'some.hostname.com'
assert my_params['password']['Value'] == '<PASSWORD>'
def test_put_param(ssm_put_response):
params = Parameters(service='alpha')
params.get_ssm()
with Stubber(params.ssm) as stubber:
stubber.add_response('put_parameter', ssm_put_response)
stubber.add_response('put_parameter', ssm_put_response)
assert params.put_param('param1', 'test1.mongo.name') == 1234
assert params.put_param('param2', 'test2.mongo.name', secure=True) == 1234
def test_put_fail():
params = Parameters(service='omega')
params.get_ssm()
with Stubber(params.ssm) as stubber:
stubber.add_client_error('put_parameter')
with pytest.raises(RuntimeError):
params.put_param('a', 'b')
def test_fully_qualified_parameter_name():
params = Parameters(service='beta')
assert params.fully_qualified_parameter_name('hello') == '/beta/hello'
| StarcoderdataPython |
5061447 | <reponame>mn3711698/wrobot
# -*- coding: utf-8 -*-
##############################################################################
# Author:QQ173782910
##############################################################################
"""admin/vi/BASE_TPL.py"""
from basic.VIEW_TOOL import cVIEWS
class cBASE_TPL(cVIEWS):
def goPartUpload(self):
url = self.dl.Upload()
return self.jsons({'url': url})
def goPartPem_upload(self):
url = self.dl.Pem_upload()
return self.jsons({'url': url})
def goPartSave_type(self):#增加广告类型
dR=self.dl.save_type()
return self.jsons(dR)
def goPartSave_ctype(self):#增加广告类型
dR=self.dl.save_ctype()
return self.jsons(dR)
def goPartDel_qiniu_pic(self):#删除七牛照片
dR=self.dl.del_qiniu_pic()
return self.jsons(dR)
| StarcoderdataPython |
11367086 | <reponame>MatthewTsan/Leetcode
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if n == 0 or not head:
return head
# walk N step, p = head. if None, then return,
cur = head
while n > 0 and cur:
cur = cur.next
n -= 1
if not cur:
if n == 0:
return head.next
return head
# walk until None, delete p
p = head
prep = head
if not cur.next:
head.next = head.next.next
return head
while cur:
cur, prep, p = cur.next, p, p.next
prep.next = prep.next.next
return head
| StarcoderdataPython |
6611489 | # -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
class KhronosOpenCLCLHPPConan(ConanFile):
name = "khronos-opencl-clhpp"
version = "20190207"
description = "OpenCL Host API C++ bindings"
topics = ("conan", "opencl", "header-only", "opencl-headers", "clhpp", "khronos")
url = "https://github.com/bincrafters/conan-khronos-opencl-clhpp"
homepage = "https://github.com/KhronosGroup/OpenCL-CLHPP"
author = "Bincrafters <<EMAIL>>"
license = "MIT"
exports = ["LICENSE.md"]
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
no_copy_source = True
# Custom attributes for Bincrafters recipe conventions
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
requires = (
"khronos-opencl-headers/20190412@bincrafters/stable",
"khronos-opencl-icd-loader/20190412@bincrafters/stable"
)
def source(self):
source_url = "https://github.com/KhronosGroup/OpenCL-CLHPP"
commit = "97a643f3bcb583fcbfb2a616d9b52790389514bc"
sha256 = "46157b36bed68e661cc73d4794829b0a06005ca9dda512dc7e30a376bee33557"
tools.get("{0}/archive/{1}.tar.gz".format(source_url, commit), sha256=sha256)
extracted_dir = "OpenCL-CLHPP-" + commit
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["BUILD_DOCS"] = False
cmake.definitions["BUILD_EXAMPLES"] = False
cmake.definitions["BUILD_TESTS"] = False
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_id(self):
self.info.header_only() | StarcoderdataPython |
6481739 | <gh_stars>0
from objects import experiments, outputtable, computationalresource
import json
import itertools
import copy
import os
import lxml.etree as etree
import sqlite3 as lite
import sys
import subprocess
import datetime
import time
modelsAndAlgorithmNames_global = []
baseParamsDict_global = {}
computationalResource_global = None
outputtable_global = None
outputtable_relerror_global = None
databaseName_global = '../db/facpro-results.db'
databaseTables = {'BaseExpr', 'RelError'}
def remove_old_solution_files():
dir = "../output/"
directory = os.listdir(dir)
for item in directory:
if item.endswith(".mst") or item.endswith(".sol"):
os.remove(os.path.join(dir, item))
def setUp(mode = 'debug', resetParams = False):
if resetParams:
remove_old_solution_files()
createOutputTables()
createDefaultComputationalResource(mode)
createBaseParamsDict()
def setUpDatabase(dropExisting = False):
con = None
try:
con = lite.connect(databaseName_global)
c = con.cursor()
if dropExisting:
for tableName in databaseTables:
c.execute('drop table if exists ' + tableName)
# Create table
c.execute(getCreateTableString_BaseExpr())
c.execute(getCreateTableString_RelError())
con.commit()
except lite.Error, e:
if con:
con.rollback()
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
def getCreateTableString_BaseExpr():
myDict = json.loads(open('../db/databaseTableColumns.json').read())
typesDict = myDict['types']
string = 'CREATE TABLE ' + 'BaseExpr' + '''('''
for column in myDict['metaColumns']:
string += column + ' ' + typesDict[column] + ', '
for dbColCategory in myDict['columnsInAllTables']:
for dbColName in myDict['columnsInAllTables'][dbColCategory]:
string += dbColName + ' ' + typesDict[dbColName] + ', '
for column in myDict['baseTableColumns']:
string += column + ' ' + typesDict[column] + ', '
string = string[:-2]
string += ''');'''
return string
def getCreateTableString_RelError():
myDict = json.loads(open('../db/databaseTableColumns.json').read())
typesDict = myDict['types']
string = 'CREATE TABLE ' + 'RelError' + '''('''
for column in myDict['metaColumns']:
string += column + ' ' + typesDict[column] + ', '
for dbColCategory in myDict['columnsInAllTables']:
for dbColName in myDict['columnsInAllTables'][dbColCategory]:
string += dbColName + ' ' + typesDict[dbColName] + ', '
for column in myDict['relErrorTableColumns']:
string += column + ' ' + typesDict[column] + ', '
string = string[:-2]
string += ''');'''
return string
def createOutputTables():
global outputtable_global, outputtable_relerror_global
outputtable_global = outputtable.OutputTable(databaseName = databaseName_global, tableName = 'mainTable')
outputtable_relerror_global = outputtable.OutputTable(databaseName = databaseName_global, tableName='relerrorTable')
def createDefaultComputationalResource(mode = 'debug'):
global computationalResource_global
if mode == 'debug':
computationalResource_global = computationalresource.createComputationalResource('shadow-debug')
else:
computationalResource_global = computationalresource.createComputationalResource('shadow-unsponsored')
def createBaseParamsDict():
global baseParamsDict_global
baseParamsDict_global = json.loads(open('baseExperimentParameters.json').read())
def flatten_two_level_nested_dict(dict):
newDict = {}
for key in dict:
for subkey in dict[key]:
newDict[subkey] = dict[key][subkey]
return newDict
def cardProductOfDictionaries(paramsDict):
for key in paramsDict:
if not isinstance(paramsDict[key], list):
paramsDict[key] = [paramsDict[key]]
return list(dict(itertools.izip(paramsDict, x)) for x in itertools.product(*paramsDict.itervalues()))
def createParamsDictsForExprmnts(baseParamsDict, rangesOfParametersToVary, group_def = None):
''' returns a list of dictonaries, one for each experiment'''
if group_def is not None:
baseParamsDict = flatten_two_level_nested_dict(baseParamsDict)
newParamsDict = copy.deepcopy(baseParamsDict)
for paramName in rangesOfParametersToVary.keys():
newParamsDict[paramName] = rangesOfParametersToVary[paramName]
list_of_flattened_dicts = cardProductOfDictionaries(newParamsDict)
list_of_unflattened_dicts = []
for flattened_dict in list_of_flattened_dicts:
unflattened_dict = {}
for key in group_def:
unflattened_dict[key] = {}
for subkey in group_def[key]:
unflattened_dict[key][subkey] = flattened_dict[subkey]
list_of_unflattened_dicts.append(unflattened_dict)
return list_of_unflattened_dicts
def getFilenameForExprParamsDict(rangesOfParametersToVary, paramsDict):
paramsToVary = rangesOfParametersToVary.keys()
stringToAdd = ''
for paramName in paramsToVary:
stringToAdd += '_' + paramName + '-' + paramsDict[paramName]
return '../exprFiles/ExprParams_base' + stringToAdd + '.json'
def runExperimentsForExperimentBatch(ranges_of_params_to_vary, experimentName,
modelsAndAlgs = modelsAndAlgorithmNames_global, baseParamsDict = None,
runTheExperiments = False, localMode = False):
group_def = json.loads(open('../db/databaseTableColumns.json').read())['columnsInAllTables']
if baseParamsDict is None:
params_dicts_for_exprs = createParamsDictsForExprmnts(baseParamsDict_global,
ranges_of_params_to_vary, group_def)
else:
params_dicts_for_exprs = createParamsDictsForExprmnts(baseParamsDict, ranges_of_params_to_vary, group_def)
print "paramsDictsForExperiments", params_dicts_for_exprs
exprBatch = experiments.OptimizationExperimentBatch(computationalResource_global,
'../exprBatchScripts/run_experiments_for_' + experimentName + '.sh')
for paramsDict in params_dicts_for_exprs:
for infModelName in modelsAndAlgs:
scriptCall = 'python ' + '../src/models/run_facpro.py'
exprBatch.addOptimizationExperiment(experiments.OptimizationExperiment(scriptCall,
computationalResource_global, outputtable_global, experimentName,
parametersDictionary = paramsDict, paramsThatChanged = ranges_of_params_to_vary.keys()))
exprBatch.writeBatchScript()
if not localMode:
print "syncing files"
os.system('rsync -av --exclude ~/PycharmProjects/wnopt_cavs3/exprBatchScripts/rsync-exclude-list.txt '
'~/PycharmProjects/wnopt_cavs3 hmedal@shadow-login:/work/hmedal/code/')
os.system('ssh hmedal@shadow-login chmod a+x /work/hmedal/code/wnopt_cavs3/exprBatchScripts/*.sh')
result = subprocess.check_output('ssh hmedal@shadow-login "cd /work/hmedal/code/wnopt_cavs3/exprBatchScripts; '
'./run_experiments_for_Test.sh"', shell = True) # note to self: output appears in exprBatchScripts
print "result ", result
with open('../log/jobs_scheduled.log', 'a') as f:
ts = time.time()
f.write(datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.write(result + '\n') # python will convert \n to os.linesep
f.close() # you can omit in most cases as the destructor will call it
def run_experiments_for_RunTime_Table(runTheExperiments = False):
rangesOfParametersToVary = {'datasetName': ['grid-7x7',
'berkeley', 'grid-8x8', 'grid-9x9'], 'numChannels' : [1,2], 'jamBudget' : [1,3]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'RunTime',
modelsAndAlgs=modelsAndAlgorithmNames_global,
runTheExperiments = runTheExperiments)
if runTheExperiments:
os.system('ssh hmedal@shadow-login "cd /work/hmedal/code/wnopt_cavs/exprBatchScripts; '
'./run_experiments_for_RunTime.sh"')
def run_experiments_for_HeatMap_Figure(runTheExperiments = False):
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numChannels': [1, 2], 'numJammers': [1, 3]}
paramsDictsForExperiments = createParamsDictsForExprmnts(baseParamsDict_global, rangesOfParametersToVary)
exprBatch = experiments.OptimizationExperimentBatch(computationalResource_global,
'../exprBatchScripts/run_experiments_for_HeatMap_Figure.sh')
for paramsDict in paramsDictsForExperiments:
for infModelName in ['none', 'semi-additive', 'capture', 'protocol', 'interferenceRangeA',
'interferenceRangeB']:
paramsDict['interferenceApproximateModel'] = infModelName
paramsDict['interferenceTrueModel'] = 'additive'
scriptCall = 'python ' + '../src/models/relerror.py'
exprBatch.addOptimizationExperiment(experiments.OptimizationExperiment(scriptCall,
computationalResource_global, outputtable_global, 'HeatMap', parametersDictionary=paramsDict))
def run_experiments_for_NumNodes_Table():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley', 'grid-8x8', 'grid-9x9']}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumNodes')
def run_experiments_for_NumChannels_Table():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numChannels' : [1,2,3]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumChannels')
def run_experiments_for_NumJammerLocations_Table_2D():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numJammerLocations': [9, 16, 25]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumJammerLocations_2D')
def run_experiments_for_NumJammerLocations_Table_3D():
rangesOfParametersToVary = {'dataset': ['grid_5x5x5', 'berkeley_3d'], 'numJammerLocations': [27, 64, 125]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumJammerLocations_3D')
def run_experiments_for_NumJammerLocations_Table():
run_experiments_for_NumJammerLocations_Table_2D()
run_experiments_for_NumJammerLocations_Table_3D()
def run_experiments_for_NumJammers_Table():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numJammers': [1,2,3,4,5]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumJammers')
if __name__ == "__main__":
setUpDB = False
setUp()
if setUpDB:
setUpDatabase()
run_experiments_for_RunTime_Table()
run_experiments_for_HeatMap_Figure()
run_experiments_for_NumNodes_Table()
run_experiments_for_NumChannels_Table()
run_experiments_for_NumJammerLocations_Table()
run_experiments_for_NumJammers_Table() | StarcoderdataPython |
1946747 | <filename>doc/week1/w1d3/json.py
import urllib
import simplejson as json # sudo pip install simplejson
url = "http://www.boldsystems.org/index.php/API_Tax/TaxonSearch?taxName=Danaus"
response = urllib.urlopen(url)
data = json.loads(response.read())
if data['top_matched_names']:
for name in data['top_matched_names']:
if name['representitive_image']:
print name['representitive_image']['image'] | StarcoderdataPython |
1832350 | def print_left_perimeter(root):
while root != None:
curr_val = root.data
if root.left != None:
root = root.left
elif root.right != None:
root = root.right
else: # leaf node
break
print(str(curr_val) + " ")
def print_right_perimeter(root):
r_values = [] # stack for right side values
while root != None:
curr_val = root.data
if root.right != None:
root = root.right
elif root.left != None:
root = root.leftt
else: # leaf node
break
r_values.append(curr_val)
while len(r_values) != 0:
print(str(r_values.pop()) + " ")
| StarcoderdataPython |
4862425 | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""Simple web server for testing the Authz
usage:
./authz.py
--debug Show debug logs
--use_ssl Start with SSL using ssl.crt and ssl.key in PEM format;
the ssl.key must not be password protected
--port= Set the listener port of the cherrypy webserver
(default 28081)
--saml_issuer= set the SAML_issuer ID parameter
Default is set to authz.py
--backend= *Required* Set the authz database backend to use. Should be
specified in the form <module>.<class>. If only the class is
provided, then the script will look for the class in the
list of globals.
This script runs a web server on port 28081 that allows you to test the
Authz SPI on the Google Search Appliance.
The authz.py script is basically a SAML Policy server which authorizes users and
then complies the result with SAML2.0 specifications.
It is not designed to handle full production load but rather just as a
proof-of-concept.
This script comes with two authz backends: DictAuthzBackend and
FileAuthzBackend.
DictAuthzBackend: Simply determines a user's access to a resource as defined
in a hardcoded dictionary. See self.authz_db variable in DictAuthzBackend
FileAuthzBackend: Reads access rules from the file 'acl.txt' in the current
directory. The acl.txt file is a text file in the following format:
resource1|user1,user2
resource2|user2
(etc.)
It's very simple to write custom authorization backends: simply implement the
AuthzBackend interface and provide the authorize(username, resource) method (see
the documentation for AuthzBackend for more details). To use the custom
backend, run authz.py with the --backend option specifiying the module and class
name of the backend.
SETUP and RUN configuration.
After startup, you can view the configured authz backend by visiting
http://idp.yourdomain.com:28081/ (if idp.yourdomain.com is where authz.py
is running)
In the GSA admin interface, go to:
Serving-->Access Control
Authorization service URL:
http://idp.yourdomain.com:28081/authz
This script requires the cherrypy v3 to be installed (v2 gives an error since
quickstart is not available).
http://www.cherrypy.org/
Also see:
http://www.google.com/support/enterprise/static/gsa/docs/admin/72/gsa_doc_set/authn_authz_spi/authn_authz_spi.html
"""
import datetime
import getopt
import random
import sys
import time
import xml.dom.minidom
import cherrypy
from xml.sax.saxutils import escape
from socket import gethostname
class AuthZ(object):
def __init__(self, authz_backend, protocol, debug_flag, saml_issuer):
self.authz_backend = authz_backend
self.protocol = protocol
self.debug_flag = debug_flag
self.saml_issuer = saml_issuer
log ('--------------------------------')
log ('-----> Starting authz.py <------')
log ('--------------------------------')
# Main landing page
def index(self):
return ('<html><head><title>Authz Landing Page</title></head>'
'<body><center>'
'<h3>Landing page for SPI authz.py</h3>'
'<p>Authz backend: %s</p>'
'</center></body></html>') % self.authz_backend.__class__.__name__
index.exposed = True
# Public page
def public(self):
return 'Anyone can view this page. No authentication is required"'
public.exposed = True
# Generates SAML 2.0 IDs randomly.
def getrandom_samlID(self):
# Generate a randomID starting with an character
return random.choice('abcdefghijklmnopqrstuvwxyz') + hex(random.getrandbits(160))[2:-1]
#
def get_saml_namespace(self, xmldoc, tagname):
a = xmldoc.getElementsByTagName('samlp:%s' % tagname)
if a != []:
return ("samlp", "saml", a)
a = xmldoc.getElementsByTagName('saml2p:%s' % tagname)
if a != []:
return ("saml2p", "saml2", a)
log("exotic namespace")
# TODO get the name space and return it
return ("", "", [])
# The SAML Authorization service (/authz) called by the GSA to query to see
# if individual URLs are authorized for a given user. Both the username
# and the URL to check for is passed in
def authz(self):
authz_request = cherrypy.request.body.read()
xmldoc = xml.dom.minidom.parseString(authz_request)
if self.debug_flag:
log('----------- AUTHZ BEGIN -----------')
log('AUTHZ Request: %s' % xmldoc.toprettyxml())
now = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
# The request should look something like
# request = ("<soapenv:Envelope xmlns:soapenv=\
# "http://schemas.xmlsoap.org/soap/envelope/\"
# "xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"
# "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">"
# "<soapenv:Body>"
# "<samlp:AuthzDecisionQuery ID=
# \"dbppegngllhegcobmfponljblmfhjjiglbkbmmco\"
# "IssueInstant=\"2008-07-09T15:22:55Z\""
# "Resource=\"secure.yourdomain.com/protected/page2.html\"
# Version=\"2.0\"
# "xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\""
# "xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\">"
# "<saml:Issuer>google.com</saml:Issuer>"
# "<saml:Subject>"
# "<saml:NameID>user1</saml:NameID>"
# "</saml:Subject>"
# "<saml:Action Namespace=\"urn:oasis:names:tc:SAML:1.0:
# action:ghpp\">GET</saml:Action>"
# "</samlp:AuthzDecisionQuery>"
# "<samlp:AuthzDecisionQuery ID=
# \"eeppegngllhegcobmfabcnljblmfhrrniglbkbeed\"
# "IssueInstant=\"2008-07-09T15:22:55Z\""
# "Resource=\"secure.yourdomain.com/protected/page3.html\"
# Version=\"2.0\"
# "xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\""
# "xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\">"
# "<saml:Issuer>google.com</saml:Issuer>"
# "<saml:Subject>"
# "<saml:NameID>user1</saml:NameID>"
# "</saml:Subject>"
# "<saml:Action Namespace=\"urn:oasis:names:tc:SAML:1.0:
# action:ghpp\">GET</saml:Action>"
# "</samlp:AuthzDecisionQuery>"
# "</soapenv:Body>"
# "</soapenv:Envelope>")
saml_id = None
resource = None
username = None
# parse out the SAML AuthzRequest
# If we don't know *for sure* if a user is allowed or not, send back an
# indeterminate. if the URL does *not* exist in the built in self.authz_db,
# then we don't know if the user is allowed
# or not for sure, then send back an Indeterminate.
# GSA 6.0+ can request batch authorization where there can be multiple
# samlp:AuthzDecisionQuery in one request for several URLs
# which means we have to responde back in one response for each request.
# If we're using the internal authz system, parse out the inbound
# request for the URI and use that to check against the local authz db
(spprefix, sprefix, samlp) = self.get_saml_namespace(xmldoc, 'AuthzDecisionQuery')
log('using prefix: %s and %s' % (spprefix, sprefix))
response = ('<soapenv:Envelope '
'xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">'
'<soapenv:Body>')
for node in samlp:
if (node.nodeName == "%s:AuthzDecisionQuery" % spprefix):
saml_id = node.attributes["ID"].value
resource = node.attributes["Resource"].value
samlName = node.getElementsByTagName("%s:NameID" % sprefix)
for n_issuer in samlName:
cnode= n_issuer.childNodes[0]
if cnode.nodeType == node.TEXT_NODE:
username = cnode.nodeValue
# the SAML response and assertion are unique random ID numbers
# back in the sring with the decision.
rand_id_saml_resp = self.getrandom_samlID()
rand_id_saml_assert = self.getrandom_samlID()
decision = self.authz_backend.authorize(username, resource)
if self.debug_flag:
log ('authz ID: %s' %(saml_id))
log ('authz Resource: %s' %(resource))
log ('authz samlName %s' %(username))
log ('authz decision for resource %s [%s]' % (resource, decision))
response = response + ('<samlp:Response '
'xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" '
'xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" '
'ID="%s" Version="2.0" IssueInstant="%s">'
'<samlp:Status><samlp:StatusCode '
'Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>'
'</samlp:Status><saml:Assertion Version="2.0" '
'ID="%s" IssueInstant="%s">'
'<saml:Issuer>%s</saml:Issuer><saml:Subject>'
'<saml:NameID>%s</saml:NameID></saml:Subject>'
'<saml:AuthzDecisionStatement Resource="%s" '
'Decision="%s"><saml:Action '
'Namespace="urn:oasis:names:tc:SAML:1.0:action:ghpp">'
'GET</saml:Action></saml:AuthzDecisionStatement>'
'</saml:Assertion></samlp:Response>') % (rand_id_saml_resp,now,
rand_id_saml_assert,
now, self.saml_issuer, username,
escape(resource), decision)
response += '</soapenv:Body></soapenv:Envelope>'
if self.debug_flag:
xmldoc = xml.dom.minidom.parseString(response)
log('authz response %s' %(xmldoc.toprettyxml()))
log('----------- AUTHZ END -----------')
return response
authz.exposed = True
class AuthzBackend(object):
def authorize(username, resource):
"""Checks if a user is authorized to view a resource.
Args:
username: The username (string).
resource: The resource URL (string).
Returns:
A string indicating the result. 'Permit' if the user is permitted to
access the resource, 'Deny' if not, or 'Indeterminate' if unknown
(authorization will be passed on to a fallback mechanism).
"""
raise NotImplementedError('authorize() not implemented')
class DictAuthzBackend(AuthzBackend):
def __init__(self):
self.authz_db = {'http://secure.yourdomain.com/protected/page1':
'user1,user2,gsa1',
'http://secure.yourdomain.com/protected/page2':
'user1,gsa1',
'http://secure.yourdomain.com/protected/page3':
'user2,gsa1',
'http://secure.yourdomain.com/protected/page4':
'user1,user2,gsa1',
'http://meow2.mtv.corp.google.com:8000/blah-allowed.html':'user2'}
def authorize(self, username, resource):
decision = 'Indeterminate'
allowed_users = None
try:
allowed_users = self.authz_db[resource]
except KeyError:
log('Resource not found in database: %s ' %(resource))
else:
arr_users = allowed_users.split(",")
log('Allowed users for Resource: %s %s ' %(resource, arr_users))
if username in arr_users:
decision = 'Permit'
else:
decision = 'Deny'
return decision
class FileAuthzBackend(AuthzBackend):
def __init__(self):
self.db = {}
input = open('acl.txt', 'r')
for line in input:
arr = line.strip().split('|')
resource, users = arr[0], arr[1]
if resource in self.db:
log('Warning: resource %s duplicated in file authz backend input' %
resource)
self.db[resource] = users.split(',')
input.close()
def authorize(self, username, resource):
if resource not in self.db:
return 'Indeterminate'
if username in self.db[resource]:
return 'Permit'
return 'Deny'
def log(msg):
print ('[%s] %s') % (datetime.datetime.now(), msg)
# -------
# Main
# -------------
def main():
# Default listen port
cherrypy.server.socket_port = 28081
cherrypy.server.socket_host = '0.0.0.0'
protocol = "http"
debug_flag = False
saml_issuer = "authn.py"
backend = None
def usage():
print ('\nUsage: authz.py --debug --use_ssl '
'--port=<port> --saml_issuer=<issuer> --backend=<backend>\n')
try:
opts, args = getopt.getopt(sys.argv[1:], None,
["debug", "use_ssl", "port=", "saml_issuer=",
"backend="])
except getopt.GetoptError:
usage()
sys.exit(1)
cherrypy.config.update({'global':{'log.screen': False}})
for opt, arg in opts:
if opt == "--debug":
debug_flag = True
cherrypy.config.update({'global':{'log.screen': True}})
if opt == "--saml_issuer":
saml_issuer = arg
if opt == "--use_ssl":
protocol = "https"
cherrypy.config.update({"global": {
"server.ssl_certificate": "ssl.crt",
"server.ssl_private_key": "ssl.key",}})
if opt == "--port":
port = int(arg)
cherrypy.config.update({"global": {"server.socket_port": port}})
if opt == "--backend":
backend = arg
# try to import the backend class
if not backend:
print '\n--backend= required. Please see documentation.\n'
usage()
sys.exit(1)
modulename, _, classname = backend.rpartition('.')
if not modulename:
# if no module name, then try to get it from globals
backend_class = globals()[classname]
else:
# otherwise, import that module and get it from there
module = __import__(modulename)
backend_class = getattr(module, classname)
cherrypy.quickstart(AuthZ(backend_class(), protocol, debug_flag,
saml_issuer))
if __name__ == '__main__':
main()
| StarcoderdataPython |
11365847 | from collections import defaultdict
class Solution:
def canPair(self, arr, k):
# Code here
for i in range(len(arr)):
arr[i] = arr[i] % k
dict_1 = defaultdict(lambda: 0)
# print(arr)
for i in arr:
if i == 0:
if dict_1[i] > 0:
dict_1[i] -= 1
else:
dict_1[i] += 1
elif dict_1[k-i] > 0:
dict_1[k-i] -= 1
else:
dict_1[i] += 1
# print(dict_1)
for i in dict_1.values():
if i > 0:
return False
return True
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
T = int(input())
for i in range(T):
n, k = input().split()
n = int(n)
k = int(k)
nums = list(map(int, input().split()))
ob = Solution()
ans = ob.canPair(nums, k)
if(ans):
print("True")
else:
print("False")
# } Driver Code Ends
| StarcoderdataPython |
295133 | # RUN: test-parser.sh %s
# RUN: test-output.sh %s
x = 1 # PARSER-LABEL:x = 1i
y = 2 # PARSER-NEXT:y = 2i
print("Start") # PARSER-NEXT:print("Start")
# OUTPUT-LABEL: Start
if x == 1: # PARSER-NEXT:if (x == 1i):
if y == 3: # PARSER-NEXT: if (y == 3i):
print("A") # PARSER-NEXT: print("A")
else: # PARSER-NEXT:else:
print("C") # PARSER-NEXT: print("C")
print("D") # PARSER-NEXT:print("D")
# OUTPUT-NEXT: D
if x == 1: # PARSER-NEXT:if (x == 1i):
if y == 3: # PARSER-NEXT: if (y == 3i):
print("A") # PARSER-NEXT: print("A")
else: # PARSER-NEXT: else:
print("B") # PARSER-NEXT: print("B")
else: # PARSER-NEXT:else:
print("C") # PARSER-NEXT: print("C")
print("D") # PARSER-NEXT:print("D")
# OUTPUT-NEXT: B
# OUTPUT-NEXT: D
if x == 1: # PARSER-NEXT:if (x == 1i):
if y == 3: # PARSER-NEXT: if (y == 3i):
print("A") # PARSER-NEXT: print("A")
else: # PARSER-NEXT: else:
print("B") # PARSER-NEXT: print("B")
print("D") # PARSER-NEXT:print("D")
# OUTPUT-NEXT: B
# OUTPUT-NEXT: D
if x == 1: # PARSER-NEXT:if (x == 1i):
if y == 3: # PARSER-NEXT: if (y == 3i):
print("A") # PARSER-NEXT: print("A")
else: # PARSER-NEXT: else:
print("X") # PARSER-NEXT: print("X")
if y == 2: # PARSER-NEXT: if (y == 2i):
print("B") # PARSER-NEXT: print("B")
else: # PARSER-NEXT: else:
print("E") # PARSER-NEXT: print("E")
else: # PARSER-NEXT:else:
print("C") # PARSER-NEXT: print("C")
print("D") # PARSER-NEXT:print("D")
# OUTPUT-NEXT: X
# OUTPUT-NEXT: B
# OUTPUT-NEXT: D
| StarcoderdataPython |
5029193 | <reponame>mfleming99/COVID-QA<filename>covid_nlp/language/ms_translate.py
# -*- coding: utf-8 -*-
import os, requests, uuid, json
import sys
import pandas as pd
class MSTranslator():
def __init__(self, key = None, endpoint = None, lang = None):
if key:
self.azure_key = key
else:
self.azure_key = os.environ['AZURE_TRANSLATE_KEY']
self.azure_endpoint = endpoint
self.lang = lang
self.url = f"{self.azure_endpoint}/translate?api-version=3.0&to={self.lang}"
self.headers = {
'Ocp-Apim-Subscription-Key': self.azure_key,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
def translate(self, text):
body = [{'text': text.strip()}]
request = requests.post(self.url, headers = self.headers, json = body)
response = request.json()
trans_text = ""
if len(response) > 0:
trans_text = response[0]['translations'][0]['text']
return trans_text
def main():
lang = "ar"
azure_endpoint = "https://api.cognitive.microsofttranslator.com/"
ms_translator = MSTranslator(endpoint = azure_endpoint, lang = lang)
faq_file = "../../data/faqs/faq_covidbert.csv"
df = pd.read_csv(faq_file)
df[f'question_{lang}'] = df.apply(lambda x: ms_translator.translate(x.question), axis=1)
df[f'answer_{lang}'] = df.apply(lambda x: ms_translator.translate(x.answer), axis=1)
faq_filename = os.path.basename(faq_file)
df.to_csv(f"MT_{lang}_{faq_filename}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
9720246 | <filename>model/data_analysis.py
import csv
import argparse
from matplotlib import pyplot as plt
import numpy as np
# Script that created a histogram of the years that citation come from. This can be used for data analysis of a test
# set
# The examples must be labeled with a "Year" column
# Usage: python3 data_analysis.py examples.csv
parser = argparse.ArgumentParser(description='Train CSV file with Anystyle and get accuracy.')
parser.add_argument('examples', metavar='examples.csv', type=str,
help='All examples as a CSV file.')
args = parser.parse_args()
# Put the command line variables in to python variables
all_examples_file_name = args.examples
# Open the examples file
all_examples_file = open(all_examples_file_name, "r", newline='\n', encoding="utf-8")
csv_reader = csv.DictReader(all_examples_file)
# Count amount of examples are in the year range
amount_of_examples_in_range = 0
all_years = []
for row in csv_reader:
try:
year = int(row["Year"])
if year != 300000:
all_years.append(year)
except ValueError:
pass
all_years.sort()
data = np.array(all_years)
n, bins, patches = plt.hist(x=data, bins='auto', color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Year')
plt.ylabel('Amount of citations')
plt.title('Years of citations')
plt.text(23, 45, r'$\mu=15, b=3$')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.show() | StarcoderdataPython |
6454987 | <reponame>xiangshiyin/coding-challenge
class Solution:
def isValidSerialization(self, preorder: str) -> bool:
nodes = preorder.split(',')
slots = 1
for node in nodes:
slots -= 1
if slots < 0:
return False
if node.isnumeric():
slots += 2
return slots == 0
| StarcoderdataPython |
8153478 | class Робочи_дни():
def Pon(pon):
print(""" Урок : Година : Оцынка
Физика 08:00 8
Инф.мат 08:50 10
Укр.лит 09:30 11
Укр.мова 10:15 5
Химия 12:00 7
""")
def Viv(der):
print(""" Урок : Година : Оцынка
Фыз.кул 08:00 12
Алгебра 08:50 7
Геомет 09:30 4
Географ 10:15 6
Муз.мист 12:00 11
""")
def Cer(cer):
print(""" Урок : Година : Оцынка
Фыз.кул 08:00 12
Алгебра 08:50 7
Геомет 09:30 4
Географ 10:15 6
Муз.мист 12:00 11
""")
def Cha(cha):
print(""" Урок : Година : Оцынка
Физика 08:00 8
Инф.мат 08:50 10
Укр.лит 09:30 11
Укр.мова 10:15 5
Химия 12:00 7
""")
def Pat(pat):
print(""" Урок : Година : Оцынка
Фыз.кул 08:00 12
Алгебра 08:50 7
Геомет 09:30 4
Географ 10:15 6
Муз.мист 12:00 11
""")
Понедилок = Робочи_дни()
Вывторок = Робочи_дни()
Середа = Робочи_дни()
Четверг = Робочи_дни()
Пятниця = Робочи_дни()
z = input("Введите учебние дны: ")
if (z == "Понедилок"):
print(Робочи_дни.Pon(Понедилок))
if (z == "Вывторок"):
print(Робочи_дни.Viv(Вывторок))
if (z == "Середа"):
print(Робочи_дни.Cer(Середа))
if (z == "Четверг"):
print(Робочи_дни.Cha(Четверг))
if (z == "Пятниця"):
print(Робочи_дни.Pat(Пятниця))
| StarcoderdataPython |
393786 | <filename>plugins/rapid7_insightops/komand_rapid7_insightops/connection/connection.py
import komand
from .schema import ConnectionSchema
# Custom imports below
class Connection(komand.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
self.api_key = None
self.insighturl = None
self.postopsdataurl = None
def connect(self, params):
self.api_key = params["api_key"].get("secretKey")
self.insighturl = "https://{}.rest.logs.insight.rapid7.com".format(params["region"])
self.postdataurl = "https://{}.js.logs.insight.rapid7.com/v1/noformat/".format(params["region"])
self.logger.info("Connect: Connecting..")
| StarcoderdataPython |
1630267 | <filename>registry/app.py
"""The app module, containing the app factory function."""
import logging
import sys
from flask import Flask, render_template
from registry import batch, commands, donor, public, user
from registry.extensions import (
bcrypt,
csrf_protect,
db,
debug_toolbar,
login_manager,
migrate,
)
from registry.utils import template_globals
def create_app(config_object="registry.settings"):
"""Create application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split(".")[0])
app.config.from_object(config_object)
app.context_processor(template_globals)
register_extensions(app)
register_blueprints(app)
register_commands(app)
configure_logger(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.template_filter("format_time")
def format_time(date, format="%d.%m.%Y %H:%M:%S"):
return date.strftime(format)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(donor.views.blueprint)
app.register_blueprint(batch.views.blueprint)
return None
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.create_user)
app.cli.add_command(commands.install_test_data)
def configure_logger(app):
"""Configure loggers."""
handler = logging.StreamHandler(sys.stdout)
if not app.logger.handlers:
app.logger.addHandler(handler)
| StarcoderdataPython |
1676630 | #!/usr/bin/env python
class SkipObject:
def __init__(self, wrapped):
self.wrapped = wrapped
return
def __iter__(self):
return SkipIterator(self.wrapped)
class SkipIterator:
def __init__(self, wrapped):
self.wrapped = wrapped
self.offset = 0
def __next__(self):
if self.offset > len(self.wrapped):
raise StopIteration
else:
item = self.wrapped[self.offset]
self.offset += 2
return item
alpha = 'abcde'
skipper = SkipObject(alpha)
I = iter(skipper)
print(next(I))
print(next(I))
print(next(I))
for x in skipper:
for y in skipper:
print(x+y, end=' ')
print()
| StarcoderdataPython |
11341566 | from direct.showbase.PythonUtil import POD
class QuestRewardStruct(POD):
DataSet = {'rewardType': None,'amount': None,'questId': None} | StarcoderdataPython |
11254869 | <reponame>probcomp/hierarchical-irm
# Copyright 2021 MIT Probabilistic Computing Project
# Apache License, Version 2.0, refer to LICENSE.txt
from scipy.io import loadmat
# Animals as a single binary relation"
# has: Animals x Features -> {0,1}
x = loadmat('50animalbindat.mat')
features = [y[0][0] for y in x['features'].T]
animals = [y[0][0] for y in x['names'].T]
data = x['data']
with open('animals.binary.schema', 'w') as f:
f.write('bernoulli has feature animal\n')
with open('animals.binary.obs', 'w') as f:
for i, animal in enumerate(animals):
for j, feature in enumerate(features):
value = int(data[i,j])
a = animal.replace(' ', '')
f.write('%d has %s %s\n' % (value, feature, a))
with open('animals.unary.schema', 'w') as f:
for feature in features:
f.write('bernoulli %s animal\n' % (feature,))
with open('animals.unary.obs', 'w') as f:
for j, feature in enumerate(features):
for i, animal in enumerate(animals):
value = data[i,j]
a = animal.replace(' ', '')
f.write('%d %s %s\n' % (value, feature, a))
| StarcoderdataPython |
6492559 | <reponame>linuxfood/pyobjc-framework-Cocoa-test<filename>PyObjCTest/test_nswindowrestoration.py
import AppKit
import objc
from PyObjCTools.TestSupport import TestCase, min_os_level
class RestorationHelper(AppKit.NSObject):
def restoreWindowWithIdentifier_state_completionHandler_(self, a, b, c):
pass
class TestNSWindowRestoration(TestCase):
@min_os_level("10.7")
def testProtocol10_7(self):
objc.protocolNamed("NSWindowRestoration")
self.assertArgIsBlock(
RestorationHelper.restoreWindowWithIdentifier_state_completionHandler_,
2,
b"v@@",
)
@min_os_level("10.7")
def testMethods10_7(self):
self.assertResultIsBOOL(
AppKit.NSApplication.restoreWindowWithIdentifier_state_completionHandler_
)
self.assertArgIsBlock(
AppKit.NSApplication.restoreWindowWithIdentifier_state_completionHandler_,
2,
b"v@@",
)
self.assertArgIsBOOL(AppKit.NSWindow.setRestorable_, 0)
self.assertResultIsBOOL(AppKit.NSWindow.isRestorable)
self.assertArgIsBlock(
AppKit.NSDocument.restoreDocumentWindowWithIdentifier_state_completionHandler_,
2,
b"v@@",
)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(
AppKit.NSApplicationDidFinishRestoringWindowsNotification, str
)
| StarcoderdataPython |
9740554 | import webbrowser, os
import json
import boto3
import io
from io import BytesIO
import sys
file_name = sys.argv[1]
# get the results
client = boto3.client(
service_name='textract',
region_name= 'us-east-1'
)
def get_table_html_results(file_name):
with open(file_name, 'rb') as file:
img_test = file.read()
bytes_test = bytearray(img_test)
print('Image loaded', file_name)
# process using image bytes
response = client.analyze_document(Document={'Bytes': bytes_test}, FeatureTypes=["TABLES"])
# Get the text blocks
blocks=response['Blocks']
#print (blocks)
blocks_map = {}
table_blocks = []
for block in blocks:
blocks_map[block['Id']] = block
if block['BlockType'] == "TABLE":
table_blocks.append(block)
if len(table_blocks) <= 0:
return "<b> NO Table FOUND </b>"
html = ''
for index, table in enumerate(table_blocks):
html += generate_table_html(table, blocks_map, index +1)
html += '<hr>\n\n'
return html
def get_rows_columns_map(table_result, blocks_map):
rows = {}
for relationship in table_result['Relationships']:
if relationship['Type'] == 'CHILD':
for child_id in relationship['Ids']:
cell = blocks_map[child_id]
if cell['BlockType'] == 'CELL':
row_index = cell['RowIndex']
col_index = cell['ColumnIndex']
if row_index not in rows:
# create new row
rows[row_index] = {}
# get the text value
rows[row_index][col_index] = get_text(cell, blocks_map)
return rows
def get_text(result, blocks_map):
text = ''
if 'Relationships' in result:
for relationship in result['Relationships']:
if relationship['Type'] == 'CHILD':
for child_id in relationship['Ids']:
word = blocks_map[child_id]
if word['BlockType'] == 'WORD':
text += word['Text'] + ' '
return text
def generate_table_html(table_result, blocks_map, table_index):
rows = get_rows_columns_map(table_result, blocks_map)
is_first_table = True if table_index == 1 else False
table_id = 'Table_' + str(table_index)
# get cells.
area_expanded = 'true' if is_first_table else 'false'
colapse_show = 'show' if is_first_table else ''
table_html = '<div class="card"><div class="card-header" id="headingOne">' \
'<h5 class="mb-0">' \
'<button class="btn btn-link" data-toggle="collapse" data-target="#{}" ' \
'aria-expanded="{}" aria-controls="collapseOne">{}</button>' \
'</h5>' \
'</div>'.format(table_id, area_expanded, table_id)
table_html += '<div id="{}" class="collapse {}" data-parent="#accordion">' \
'<div class="card-body">' \
'<table class="myTable table table-hover">\n'.format(table_id, colapse_show)
for row_index, cols in rows.items():
table_html += '<tr row="{}">\n'.format(row_index)
for col_index, text in cols.items():
table_html += '<td col="{}">{}</td>\n'.format(col_index, text)
table_html += '<tr>\n'
table_html += '</table> </div> </div></div>\n'
return table_html
table_html = get_table_html_results(file_name)
template_file = 'template.html'
output_file = 'output.html'
# replace content
with open(template_file, "rt") as fin:
with open(output_file, "wt") as fout:
for line in fin:
fout.write(line
.replace('[[REPLACE_TITLE]]', 'Table Results for ' + file_name)
.replace('[[REPLACE_TABLE]]', table_html))
# show the results
webbrowser.open('file://' + os.path.realpath(output_file))
| StarcoderdataPython |
4904260 | # Generated by Django 3.2.9 on 2021-11-16 11:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('academicInfo', '0001_initial'),
('faculty', '0001_initial'),
('student', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='courseregistration',
name='faculty',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='faculty.faculty'),
),
migrations.AddField(
model_name='courseregistration',
name='registration',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='academicInfo.registration'),
),
migrations.AddField(
model_name='courseregistration',
name='students',
field=models.ManyToManyField(to='student.Student'),
),
]
| StarcoderdataPython |
5010732 | <filename>tests/actions/test_store_metric_action.py
from great_expectations.core import ExpectationSuiteValidationResult, ExpectationValidationResult, \
ExpectationConfiguration
from great_expectations.core.metric import ValidationMetricIdentifier
from great_expectations.data_context.types.resource_identifiers import ValidationResultIdentifier, \
ExpectationSuiteIdentifier
from great_expectations.validation_operators.actions import StoreMetricsAction
def test_StoreMetricsAction(basic_in_memory_data_context_for_validation_operator):
action = StoreMetricsAction(
data_context=basic_in_memory_data_context_for_validation_operator,
requested_metrics={
"*": [
"statistics.evaluated_expectations",
"statistics.successful_expectations"
]
},
target_store_name="metrics_store"
)
validation_result = ExpectationSuiteValidationResult(
success=False,
meta={
"expectation_suite_name": "foo",
"run_id": "bar"
},
statistics={
"evaluated_expectations": 5,
"successful_expectations": 3
}
)
# Run the action and store our metrics
action.run(validation_result, ValidationResultIdentifier.from_object(validation_result), data_asset=None)
validation_result = ExpectationSuiteValidationResult(
success=False,
meta={
"expectation_suite_name": "foo.warning",
"run_id": "bar"
},
statistics={
"evaluated_expectations": 8,
"successful_expectations": 4
}
)
action.run(validation_result, ValidationResultIdentifier.from_object(validation_result), data_asset=None)
assert basic_in_memory_data_context_for_validation_operator.stores["metrics_store"].get(ValidationMetricIdentifier(
run_id="bar",
expectation_suite_identifier=ExpectationSuiteIdentifier("foo"),
metric_name="statistics.evaluated_expectations",
metric_kwargs_id=None
)) == 5
assert basic_in_memory_data_context_for_validation_operator.stores["metrics_store"].get(ValidationMetricIdentifier(
run_id="bar",
expectation_suite_identifier=ExpectationSuiteIdentifier("foo"),
metric_name="statistics.successful_expectations",
metric_kwargs_id=None
)) == 3
assert basic_in_memory_data_context_for_validation_operator.stores["metrics_store"].get(ValidationMetricIdentifier(
run_id="bar",
expectation_suite_identifier=ExpectationSuiteIdentifier("foo.warning"),
metric_name="statistics.evaluated_expectations",
metric_kwargs_id=None
)) == 8
assert basic_in_memory_data_context_for_validation_operator.stores["metrics_store"].get(ValidationMetricIdentifier(
run_id="bar",
expectation_suite_identifier=ExpectationSuiteIdentifier("foo.warning"),
metric_name="statistics.successful_expectations",
metric_kwargs_id=None
)) == 4
def test_StoreMetricsAction_column_metric(basic_in_memory_data_context_for_validation_operator):
action = StoreMetricsAction(
data_context=basic_in_memory_data_context_for_validation_operator,
requested_metrics={
"*": [
{
"column": {
"provider_id": ["expect_column_values_to_be_unique.result.unexpected_count"]
}
},
"statistics.evaluated_expectations",
"statistics.successful_expectations"
]
},
target_store_name="metrics_store"
)
validation_result = ExpectationSuiteValidationResult(
success=False,
meta={
"expectation_suite_name": "foo",
"run_id": "bar"
},
results=[
ExpectationValidationResult(
meta={},
result={
"element_count": 10,
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_count": 7,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"partial_unexpected_list": []
},
success=True,
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_unique",
kwargs={
"column": "provider_id",
"result_format": "BASIC"
}
),
exception_info=None
)
],
statistics={
"evaluated_expectations": 5,
"successful_expectations": 3
}
)
action.run(validation_result, ValidationResultIdentifier.from_object(validation_result), data_asset=None)
assert basic_in_memory_data_context_for_validation_operator.stores["metrics_store"].get(ValidationMetricIdentifier(
run_id="bar",
expectation_suite_identifier=ExpectationSuiteIdentifier("foo"),
metric_name="expect_column_values_to_be_unique.result.unexpected_count",
metric_kwargs_id="column=provider_id"
)) == 7
| StarcoderdataPython |
8067855 | <reponame>usrl-uofsc/WaterSampling
import RPi.GPIO as GPIO
from time import sleep
class Bottle:
# init function takes in values for the arguments.
def __init__(self, pin):
self.pin = pin
self.__isfull = False
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
@property
def is_full(self):
if GPIO.input(self.pin) == False:
self.__isfull = True
#print("fetching data")
return self.__isfull
@is_full.setter
def is_full(self, value):
if value==True:
self.__isfull = True
else:
self.__isfull = False
#print("setting data")
| StarcoderdataPython |
3376686 | import argparse
from google_drive_backup import GoogleDriveBackupCreator
parser = argparse.ArgumentParser()
parser.add_argument("dir", help="Directory name to backup")
args = parser.parse_args()
gdrive_backup_creator = GoogleDriveBackupCreator()
gdrive_backup_creator.backup(args.dir)
| StarcoderdataPython |
5113060 | """
"""
import json
import os
from openeo_odc.map_to_odc import map_to_odc
from openeo_pg_parser.translate import translate_process_graph
from openeo_pg_parser.validate import validate_processes
def test_job():
"""Create a xarray/opendatacube job based on an openEO process graph."""
# Set input parameters
tests_folder = os.path.dirname(os.path.abspath(__file__))
process_graph_json = os.path.join(tests_folder, "process_graphs/evi.json")
process_defs = json.load(open(
os.path.join(tests_folder, 'backend_processes.json')
))['processes']
odc_env = 'default'
odc_url = 'tcp://xx.yyy.zz.kk:8786'
graph = translate_process_graph(process_graph_json,
process_defs).sort(by='result')
# Check if process graph is valid
validate_processes(graph, process_defs)
nodes = map_to_odc(graph, odc_env, odc_url)
# Write to disk
with open("evi_odc.py", "w") as f:
for node in nodes:
f.write(nodes[node])
# Check it matches the reference file
f_name = "evi_odc"
with open(f_name + ".py") as f:
this_file = f.readlines()
with open(os.path.join(tests_folder, f"ref_jobs/{f_name}_ref.py")) as f:
ref_file = f.readlines()
assert this_file == ref_file
# Clean up
os.remove(f_name + ".py")
| StarcoderdataPython |
11317254 | # import libraries here
import numpy as np
import cv2
def count_blood_cells(image_path):
"""
Procedura prima putanju do fotografije i vraca broj crvenih krvnih zrnaca, belih krvnih zrnaca i
informaciju da li pacijent ima leukemiju ili ne, na osnovu odnosa broja krvnih zrnaca
Ova procedura se poziva automatski iz main procedure i taj deo kod nije potrebno menjati niti implementirati.
:param image_path: <String> Putanja do ulazne fotografije.
:return: <int> Broj prebrojanih crvenih krvnih zrnaca,
<int> broj prebrojanih belih krvnih zrnaca,
<bool> da li pacijent ima leukemniju (True ili False)
"""
red_blood_cell_count = 0
white_blood_cell_count = 0
has_leukemia = None
cvimg = cv2.imread(image_path)
greenimg = cvimg[:, :, 1].astype('float64')
greenimg *= (255.0 / greenimg.max())
greenimg = greenimg.astype('uint8')
adabingreenimg = cv2.adaptiveThreshold(greenimg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 535, 62)
invadabingreenimg = 255 - adabingreenimg
img, contours, hierarchy = cv2.findContours(invadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cv2.fillPoly(invadabingreenimg, pts=[contour], color=255)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
oinvadabingreenimg = cv2.morphologyEx(invadabingreenimg, cv2.MORPH_OPEN, kernel, iterations=3)
_, whitecellscontours, _ = cv2.findContours(oinvadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
white_blood_cell_count = len(whitecellscontours)
adabingreenimg = cv2.adaptiveThreshold(greenimg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 535, 0)
invadabingreenimg = 255 - adabingreenimg
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
einvadabingreenimg = cv2.morphologyEx(invadabingreenimg, cv2.MORPH_ERODE, kernel, iterations=1)
img, contours, hierarchy = cv2.findContours(einvadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cv2.fillPoly(einvadabingreenimg, pts=[contour], color=255)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
oeinvadabingreenimg = cv2.morphologyEx(einvadabingreenimg, cv2.MORPH_OPEN, kernel, iterations=3)
_, cellscontours, _ = cv2.findContours(oeinvadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
red_blood_cell_count = len(cellscontours) - white_blood_cell_count
has_leukemia = True if red_blood_cell_count/(red_blood_cell_count+white_blood_cell_count) < 0.925 else False
if white_blood_cell_count > 1 and white_blood_cell_count <= 4:
m = cv2.moments(whitecellscontours[0])
cX = int(m["m10"] / m["m00"])
cY = int(m["m01"] / m["m00"])
center = [cX, cY]
canMergeCells = True
for c in whitecellscontours:
m = cv2.moments(c)
cX = int(m["m10"] / m["m00"])
cY = int(m["m01"] / m["m00"])
if abs(center[0] - cX) < 0.16 * greenimg.shape[1] and abs(center[1] - cY) < 0.16 * greenimg.shape[0]:
center = [(center[0] + cX) / 2, (center[1] + cY) / 2]
else:
canMergeCells = False
break
if canMergeCells:
white_blood_cell_count = 1
has_leukemia = False
has_leukemia = True if white_blood_cell_count > 4 and red_blood_cell_count <= 1800 else has_leukemia
has_leukemia = False if white_blood_cell_count == 1 else has_leukemia
return red_blood_cell_count, white_blood_cell_count, has_leukemia
| StarcoderdataPython |
159632 | <filename>python/StringCalculator/StringCalculator.py
class StringCalculator:
def __init__(self):
self.string = ''
self.list_of_numbers = []
self.delimiter = ','
def add(self, string_of_numbers):
self.validate_input(string_of_numbers)
self.process_delimiter()
self.generate_list_of_numbers()
return sum(self.list_of_numbers)
def generate_list_of_numbers(self):
negatives = []
for str_number in self.string.split(self.delimiter):
number = int(str_number)
if number < 0: negatives.append(str_number)
if number > 1000: number = 0
self.list_of_numbers.append(number)
if negatives: raise NegativeException("Negatives not allowed: " + ",".join(negatives))
def process_delimiter(self):
if self.string.startswith('//'):
has_bracket_delimiter = self.string.find('[')
if has_bracket_delimiter == -1:
self.delimiter = self.string[2]
else:
delimiters = self.string[3:self.string.find('\n')-1]
delimiters = delimiters.replace('[', self.delimiter)
delimiters = delimiters.replace(']','')
for item in delimiters.split(self.delimiter):
self.string = self.string.replace(item, self.delimiter)
self.string = self.string[self.string.find('\n')+1:]
self.string = self.string.replace('\n', self.delimiter)
def validate_input(self, string_of_numbers):
self.string = string_of_numbers
if self.string == "": self.string = "0"
class NegativeException(Exception):
pass | StarcoderdataPython |
1804904 | #******************************************************************************
# (C) 2018, <NAME>, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Utilities - System Module *
#******************************************************************************
import os
#############
# constants #
#############
ESCAPE_STR = "\033"
LOG_STR = ""
INFO_STR = ESCAPE_STR + "[30;102m"
WARNING_STR = ESCAPE_STR + "[30;103m"
ERROR_STR = ESCAPE_STR + "[30;101m"
RESET_STR = ESCAPE_STR + "[39;49m"
###########
# classes #
###########
# =============================================================================
class Error(Exception):
"""module specific exception to support selective catching"""
pass
# =============================================================================
class Logger(object):
"""interface for logger implementation"""
# ---------------------------------------------------------------------------
def setColorLogging(self, enable):
"""enables/disables color logging"""
pass
# ---------------------------------------------------------------------------
def _log(self, message, subsystem):
"""logs a message"""
raise Error("Logger interface method _log() not implemented")
# ---------------------------------------------------------------------------
def _logInfo(self, message, subsystem):
"""logs an info message"""
raise Error("Logger interface method _logInfo() not implemented")
# ---------------------------------------------------------------------------
def _logWarning(self, message, subsystem):
"""logs a warning message"""
raise Error("Logger interface method _logWarning() not implemented")
# ---------------------------------------------------------------------------
def _logError(self, message, subsystem):
"""logs an error message"""
raise Error("Logger interface method _logError() not implemented")
# =============================================================================
class DefaultLogger(Logger):
"""simple logger that logs via print"""
# ---------------------------------------------------------------------------
def __init__(self):
"""defines the printout format"""
self.childDefaultLogger = None
self.childLoggers = {}
self.logFile = None
self.setColorLogging(False)
# ---------------------------------------------------------------------------
def setColorLogging(self, enable):
"""enables/disables color logging"""
if enable:
self.logStr = LOG_STR
self.infoStr = INFO_STR
self.warningStr = WARNING_STR
self.errorStr = ERROR_STR
self.resetStr = RESET_STR
else:
self.logStr = ""
self.infoStr = "INFO: "
self.warningStr = "WARNING: "
self.errorStr = "ERROR: "
self.resetStr = ""
# delegate also to child loggers
if self.childDefaultLogger != None:
self.childDefaultLogger.setColorLogging(enable)
for childLogger in self.childLoggers.values():
childLogger.setColorLogging(enable)
# ---------------------------------------------------------------------------
def _log(self, message, subsystem):
"""logs a message"""
if self.logFile != None:
if subsystem == None:
self.logFile.write(self.logStr + message + self.resetStr + "\n")
else:
self.logFile.write(self.logStr + "[" + subsystem + "] " + message + self.resetStr + "\n")
self.logFile.flush()
elif subsystem in self.childLoggers:
self.childLoggers[subsystem]._log(message, subsystem)
elif self.childDefaultLogger != None:
self.childDefaultLogger._log(message, subsystem)
else:
if subsystem == None:
print(self.logStr + message + self.resetStr)
else:
print(self.logStr + "[" + subsystem + "] " + message + self.resetStr)
# ---------------------------------------------------------------------------
def _logInfo(self, message, subsystem):
"""logs an info message"""
if self.logFile != None:
if subsystem == None:
self.logFile.write(self.infoStr + message + self.resetStr + "\n")
else:
self.logFile.write(self.infoStr + "[" + subsystem + "] " + message + self.resetStr + "\n")
self.logFile.flush()
elif subsystem in self.childLoggers:
self.childLoggers[subsystem]._logInfo(message, subsystem)
elif self.childDefaultLogger != None:
self.childDefaultLogger._logInfo(message, subsystem)
else:
if subsystem == None:
print(self.infoStr + message + self.resetStr)
else:
print(self.infoStr + "[" + subsystem + "] " + message + self.resetStr)
# ---------------------------------------------------------------------------
def _logWarning(self, message, subsystem):
"""logs a warning message"""
if self.logFile != None:
if subsystem == None:
self.logFile.write(self.warningStr + message + self.resetStr + "\n")
else:
self.logFile.write(self.warningStr + "[" + subsystem + "] " + message + self.resetStr + "\n")
self.logFile.flush()
elif subsystem in self.childLoggers:
self.childLoggers[subsystem]._logWarning(message, subsystem)
elif self.childDefaultLogger != None:
self.childDefaultLogger._logWarning(message, subsystem)
else:
if subsystem == None:
print(self.warningStr + message + self.resetStr)
else:
print(self.warningStr + "[" + subsystem + "] " + message + self.resetStr)
# ---------------------------------------------------------------------------
def _logError(self, message, subsystem):
"""logs an error message"""
if self.logFile != None:
if subsystem == None:
self.logFile.write(self.errorStr + message + self.resetStr + "\n")
else:
self.logFile.write(self.errorStr + "[" + subsystem + "] " + message + self.resetStr + "\n")
self.logFile.flush()
elif subsystem in self.childLoggers:
self.childLoggers[subsystem]._logError(message, subsystem)
elif self.childDefaultLogger != None:
self.childDefaultLogger._logError(message, subsystem)
else:
if subsystem == None:
print(self.errorStr + message + self.resetStr)
else:
print(self.errorStr + "[" + subsystem + "] " + message + self.resetStr)
# ---------------------------------------------------------------------------
def registerChildLogger(self, childLogger, subsystem=None):
"""
registers a child logger,
either for a specific subsystem or as replacement for default logs
"""
if subsystem == None:
self.childDefaultLogger = childLogger
else:
self.childLoggers[subsystem] = childLogger
# ---------------------------------------------------------------------------
def unregisterChildLogger(self, subsystem=None):
"""
unregisters a child logger,
either for a specific subsystem or for default logs
"""
if subsystem == None:
self.childDefaultLogger = None
else:
del self.childLoggers[subsystem]
# ---------------------------------------------------------------------------
def enableFileLogging(self, fileName):
"""enables logging to a file"""
if self.logFile != None:
self.logFile.close()
try:
self.logFile = open(fileName, "w")
except:
self.logFile = None
LOG_WARNING("Can not open log file " + fileName)
# ---------------------------------------------------------------------------
def disableFileLogging(self):
"""disables logging to a file"""
if self.logFile != None:
self.logFile.close()
self.logFile = None
# =============================================================================
class Configuration(object):
"""configuration manager"""
# ---------------------------------------------------------------------------
def __init__(self):
"""set an empty configuration dictionary"""
self.configDictionary = {}
# ---------------------------------------------------------------------------
def setDefaults(self, defaults):
"""set default values of configuration variables"""
global s_logger
LOG_INFO("configuration variables")
for default in defaults:
configVar = default[0]
# the default value can be overruled by an environment variable
configVal = os.getenv(configVar)
if configVal == None:
configVal = default[1]
LOG(configVar + " = " + configVal + " (default)")
else:
LOG(configVar + " = " + configVal + " (env)")
# special handling of configuration variables inside the SYS module
if configVar == "SYS_COLOR_LOG":
s_logger.setColorLogging(configVal == "1")
self.configDictionary[configVar] = configVal
# ---------------------------------------------------------------------------
def __getattr__(self, name):
"""read access to configuration variables"""
# try first access to fields from attribute map 1
if name in self.configDictionary:
return self.configDictionary[name]
raise AttributeError("configuration variable not found")
####################
# global variables #
####################
# configuration is a singleton
s_configuration = Configuration()
# logger is a singleton
s_logger = DefaultLogger()
#############
# functions #
#############
def LOG(message, subsystem=None):
"""convenience wrapper for logging"""
s_logger._log(message, subsystem)
def LOG_INFO(message, subsystem=None):
"""convenience wrapper for info logging"""
s_logger._logInfo(message, subsystem)
def LOG_WARNING(message, subsystem=None):
"""convenience wrapper for warning logging"""
s_logger._logWarning(message, subsystem)
def LOG_ERROR(message, subsystem=None):
"""convenience wrapper for error logging"""
s_logger._logError(message, subsystem)
| StarcoderdataPython |
6425952 | <reponame>jalmquist/aleph
"""collection languages
Revision ID: <KEY>
Revises: 9<PASSWORD>
Create Date: 2017-06-14 10:13:23.270229
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '9be0f89c9088'
def upgrade():
op.add_column('collection', sa.Column('languages', postgresql.ARRAY(sa.Unicode()), nullable=True)) # noqa
def downgrade():
pass
| StarcoderdataPython |
5043660 | # Generated by Django 3.0.8 on 2020-09-02 18:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0008_auto_20200902_1625'),
]
operations = [
migrations.AlterField(
model_name='author',
name='date_of_birth',
field=models.DateField(blank=True, null=True, verbose_name='Born'),
),
]
| StarcoderdataPython |
34669 | # Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions for caching data for MORF jobs.
"""
import os
import subprocess
import shutil
from urllib.parse import urlparse
import logging
from morf.utils.docker import load_docker_image
from morf.utils.log import set_logger_handlers, execute_and_log_output
from morf.utils.s3interface import sync_s3_bucket_cache
module_logger = logging.getLogger(__name__)
def make_course_session_cache_dir_fp(job_config, bucket, data_dir, course, session):
fp = os.path.join(job_config.cache_dir, bucket, data_dir, course, session)
return fp
def update_raw_data_cache(job_config):
"""
Update the raw data cache using the parameters in job_config; if job_config contains multiple raw data buckets, cache all of them.
:param job_config: MorfJobConfig object.
:return:
"""
# cache each bucket in a named directory within job_cache_dir
for raw_data_bucket in job_config.raw_data_buckets:
sync_s3_bucket_cache(job_config, raw_data_bucket)
return
def update_proc_data_cache(job_config):
"""
Update the processed data cache using the parameters in job_config. Assumes job_config contains only a single proc_data_bucket.
:param job_config: MorfJobConfig object.
:return:
"""
proc_data_bucket = getattr(job_config, "proc_data_bucket", None)
sync_s3_bucket_cache(job_config, proc_data_bucket)
return
def fetch_from_cache(job_config, cache_file_path, dest_dir):
"""
Fetch a file from the cache for job_config into dest_dir, if it exists.
:param job_config:
:param cache_file_path: string, relative path to file in cache (this is identical to the directory path in s3; e.g. "/bucket/path/to/somefile.csv"
:param dest_dir: absolute path of directory to fetch file into (will be created if not exists)
:return: path to fetched file (string); return None if cache is not used.
"""
logger = set_logger_handlers(module_logger, job_config)
logger.info("fetching file {} from cache".format(cache_file_path))
abs_cache_file_path = os.path.join(getattr(job_config, "cache_dir", None), cache_file_path)
if hasattr(job_config, "cache_dir") and os.path.exists(abs_cache_file_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_fp = shutil.copy(abs_cache_file_path, dest_dir)
else:
logger.warning("file {} does not exist in cache".format(abs_cache_file_path))
dest_fp = None
return dest_fp
def docker_cloud_login(job_config):
"""
Log into docker cloud using creds in job_config.
:param job_config: MorfJobConfig object.
:return: None
"""
cmd = "docker login --username={} --password={}".format(job_config.docker_cloud_username, job_config.docker_cloud_password)
logger = set_logger_handlers(module_logger, job_config)
execute_and_log_output(cmd, logger)
return
def docker_cloud_push(job_config, image_uuid):
"""
Push image to Docker Cloud repo in job_config; tagging the image with its morf_id.
:param job_config: MorfJobConfig object
:param image_uuid: Docker image uuid
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
docker_cloud_repo_and_tag_path = "{}:{}".format(job_config.docker_cloud_repo, job_config.morf_id)
# tag the docker image using the morf_id
tag_cmd = "docker tag {} {}".format(image_uuid, docker_cloud_repo_and_tag_path)
execute_and_log_output(tag_cmd, logger)
# push the image to docker cloud
push_cmd = "docker push {}".format(docker_cloud_repo_and_tag_path)
execute_and_log_output(push_cmd, logger)
return docker_cloud_repo_and_tag_path
def cache_to_docker_hub(job_config, dir, image_name):
"""
Push image to MORF repo in Docker Hub.
:param job_config: MorfJobConfig object.
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
image_uuid = load_docker_image(dir, job_config, logger, image_name)
docker_cloud_login(job_config)
docker_cloud_repo_and_tag_path = docker_cloud_push(job_config, image_uuid)
return docker_cloud_repo_and_tag_path
| StarcoderdataPython |
8141383 | #!/usr/bin/env python
from nodes import RootNode, FilterNode, HamlNode, create_node
from optparse import OptionParser
import sys
VALID_EXTENSIONS=['haml', 'hamlpy']
class Compiler:
def process(self, raw_text, options=None):
split_text = raw_text.split('\n')
return self.process_lines(split_text, options)
def process_lines(self, haml_lines, options=None):
root = RootNode()
line_iter = iter(haml_lines)
haml_node=None
for line_number, line in enumerate(line_iter):
node_lines = line
if root.parent_of(HamlNode(line)).should_treat_children_as_multiline():
if line.count('{') - line.count('}') == 1:
start_multiline=line_number # For exception handling
while line.count('{') - line.count('}') != -1:
try:
line = line_iter.next()
except StopIteration:
raise Exception('No closing brace found for multi-line HAML beginning at line %s' % (start_multiline+1))
node_lines += line
# Blank lines
if haml_node is not None and len(node_lines.strip()) == 0:
haml_node.newlines += 1
else:
haml_node = create_node(node_lines)
if haml_node:
root.add_node(haml_node)
if options and options.debug_tree:
return root.debug_tree()
else:
return root.render()
def convert_files():
import sys
import codecs
parser = OptionParser()
parser.add_option("-d", "--debug-tree", dest="debug_tree",
action="store_true", help="Print the generated tree instead of the HTML")
(options, args) = parser.parse_args()
if len(args) < 1:
print "Specify the input file as the first argument."
else:
infile = args[0]
haml_lines = codecs.open(infile, 'r', encoding='utf-8').read().splitlines()
compiler = Compiler()
output = compiler.process_lines(haml_lines, options=options)
if len(args) == 2:
outfile = codecs.open(args[1], 'w', encoding='utf-8')
outfile.write(output)
else:
print output
if __name__ == '__main__':
convert_files()
| StarcoderdataPython |
149651 | <gh_stars>1-10
import boto3
def fetchDynamoClient(peer):
kwargs = _getClientConfig(peer)
dynamodb = boto3.resource('dynamodb', **kwargs)
return dynamodb.Table(peer['name'])
def fetchKinesisClient(peer):
kwargs = _getClientConfig(peer)
return boto3.client('kinesis', **kwargs)
def fetchSSMClient(peer):
kwargs = _getClientConfig(peer)
return boto3.client('ssm', **kwargs)
def setupKinesisArgs(peer, actionName, xargs, xkwargs):
if actionName == 'get_records':
return
if actionName == 'list_shards' and 'NextToken' in xkwargs:
return
xkwargs['StreamName'] = peer['name']
def setupParameterArgs(peer, actionName, xargs, xkwargs):
xkwargs['Name'] = peer['name']
def _getClientConfig(peer):
kwargs = {}
peerConfig = peer.get('config')
if peerConfig:
region = peerConfig.get('region')
if region:
kwargs['region_name'] = region
credentials = peerConfig.get('credentials')
if credentials:
key = credentials.get('accessKeyId')
if key:
kwargs['aws_access_key_id'] = key
secret = credentials.get('secretAccessKey')
if secret:
kwargs['aws_secret_access_key'] = secret
return kwargs | StarcoderdataPython |
1892120 | <filename>aws_networkacl.py
#!/usr/bin/env python
import jmespath
import argparse
import csv
import sys
from terminaltables import SingleTable
from aws_queries import query, NETWORKACL
from aws_info import vpc_info
def network_acls(environment,table_flag=False):
field_names = ['Environment','ID','Name','VPC(ID/Name)','Rule Number','Port Range','Protocol','Action','CIDR']
network_acl_json = query('describe-network-acls',environment)
network_acl_data = jmespath.search(NETWORKACL, network_acl_json)
vpc2name = vpc_info(environment)
csv_data = [
[environment,
network_acl['id'],
network_acl['tag'],
'{vpc_id}/{name}'.format(
vpc_id = network_acl['vpcId'],
name = vpc2name.get(network_acl['vpcId'],'-')
),
network_acl_entry['ruleNum'],
network_acl_entry['portRange'],
network_acl_entry['protocol'],
network_acl_entry['action'],
network_acl_entry['cidr']
]
for network_acl in network_acl_data
for network_acl_entry in network_acl['entries']
]
csv_data.insert(0,field_names)
if table_flag==False:
csv_writer = csv.writer(sys.stdout)
for line in csv_data:
csv_writer.writerow(line)
else:
table = SingleTable(csv_data)
table.inner_row_border = True
print table.table
###############################################################################
## http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml - protocol integer definitions
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('environment',nargs=1)
args = parser.parse_args()
ENVIRONMENT=args.environment[0]
network_acls(ENVIRONMENT,table_flag=True)
| StarcoderdataPython |
9758014 | <reponame>dbobrenko/AsynQ-Learning
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import threading as th
import time
from datetime import datetime
from six.moves import range # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from asyncrl.agent import QlearningAgent, AgentSummary
from asyncrl.environment import GymWrapperFactory
# To reproduce minimum epsilon sampling
random.seed(201)
# Distribution of epsilon exploration chances (0.1 = 0.4; 0.01 = 0.3; 05 = 0.3)
EPS_MIN_SAMPLES = 4 * [0.1] + 3 * [0.01] + 3 * [0.5]
# Configurations
tf.app.flags.DEFINE_integer("threads", 8, "Number of threads to use")
tf.app.flags.DEFINE_boolean("use_cpu", False, "Use CPU or GPU for training (by default is GPU)")
# Training settings
tf.app.flags.DEFINE_integer("total_frames", 40000000, "Total frames (across all threads)")
tf.app.flags.DEFINE_integer("update_interval", 40000, "Update target network after X frames")
tf.app.flags.DEFINE_float("eps_steps", 4000000.0, "Decrease epsilon over X frames")
tf.app.flags.DEFINE_float("eps_start", 1.0, "Starting epsilon (initial exploration chance)")
tf.app.flags.DEFINE_float("gamma", 0.99, "Gamma discount factor")
tf.app.flags.DEFINE_integer("tmax", 5, "Maximum batch size")
tf.app.flags.DEFINE_integer("action_repeat", 4, "Applies last action to X next frames")
tf.app.flags.DEFINE_integer("memory_len", 4, "Memory length - number of stacked input images")
# Environment settings
tf.app.flags.DEFINE_string("env", 'SpaceInvaders-v0', "Environment name (available all OpenAI Gym environments)")
tf.app.flags.DEFINE_boolean("render", False, "Render frames? Significantly slows down training process")
tf.app.flags.DEFINE_integer("width", 84, "Screen image width")
tf.app.flags.DEFINE_integer("height", 84, "Screen image height")
# Logging
tf.app.flags.DEFINE_integer("test_iter", 3, "Number of test iterations. Used for logging.")
tf.app.flags.DEFINE_string("logdir", 'logs/', "Path to the directory used for checkpoints and loggings")
tf.app.flags.DEFINE_integer("log_interval", 80000, "Log and checkpoint every X frame")
# Evaluation
tf.app.flags.DEFINE_boolean("eval", False, "Disables training, evaluates agent's performance")
tf.app.flags.DEFINE_string("evaldir", 'eval/', "Path to the evaluation logging")
tf.app.flags.DEFINE_integer("eval_iter", 5, "Number of evaluation episodes")
# Optimizer
tf.app.flags.DEFINE_float("lr", 1e-4, "Starting learning rate")
FLAGS = tf.app.flags.FLAGS
# Hide all GPUs for current process if CPU was chosen
if FLAGS.use_cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
global_epsilons = [0.] * FLAGS.threads
training_finished = False
def update_epsilon(frames, eps_steps, eps_min):
"""Anneals epsilon based on current frame
:param frames: (float) current frame number
:param eps_steps: (int) total number of epsilon annealing steps
:param eps_min: (float) minimum allowed epsilon
:type frames: float
:type eps_steps: int
:type eps_min: float"""
eps = FLAGS.eps_start - (frames / eps_steps) * (FLAGS.eps_start - eps_min)
return eps if eps > eps_min else eps_min
def evaluate():
"""Evaluated current agent, and records a video with it's performance"""
envwrap = GymWrapperFactory.make(FLAGS.env,
actrep=FLAGS.action_repeat,
memlen=FLAGS.memory_len,
w=FLAGS.width,
h=FLAGS.height)
with tf.Session() as sess:
agent = QlearningAgent(session=sess,
action_size=envwrap.action_size,
h=FLAGS.height,
w=FLAGS.width,
channels=FLAGS.memory_len,
opt=tf.train.AdamOptimizer(FLAGS.lr))
sess.run(tf.initialize_all_variables())
if not os.path.exists(FLAGS.logdir):
print('ERROR! No', FLAGS.logdir, 'folder found!')
return
ckpt = tf.train.latest_checkpoint(FLAGS.logdir)
if ckpt is not None:
tf.train.Saver().restore(sess, ckpt)
agent.update_target()
print('Session was restored from %s' % ckpt)
else:
print('ERROR! No checkpoint found at', FLAGS.logdir)
return
envwrap.env.monitor.start(os.path.join(FLAGS.evaldir, FLAGS.env))
total_reward = 0
for _ in range(FLAGS.eval_iter):
s = envwrap.reset()
terminal = False
while not terminal:
reward_per_action = agent.predict_rewards(s)
s, r, terminal, info = envwrap.step(np.argmax(reward_per_action), test=True)
total_reward += r
envwrap.render()
envwrap.env.monitor.close()
print('Evaluation finished.')
print('Average reward per episode: %0.4f' % (total_reward / FLAGS.eval_iter))
def test(agent, env, episodes):
"""Tests agent's performance on given number of games
:param agent: agent to test
:param env: environment
:param episodes: (int) number of testing episodes
:type agent: agent.QlearningAgent
:type env: environment.GymWrapper"""
ep_rewards = []
ep_q = []
for _ in range(episodes):
ep_reward = 0
s = env.reset()
terminal = False
while not terminal:
reward_per_action = agent.predict_rewards(s)
s, r, terminal, info = env.step(np.argmax(reward_per_action), test=True)
ep_q.append(np.max(reward_per_action))
ep_reward += r
ep_rewards.append(ep_reward)
return ep_rewards, ep_q
def train_async_dqn(agent, env, sess, agent_summary, saver, thread_idx=0):
"""Starts Asynchronous one/n-step Q-Learning.
Can be used as a worker for threading.Thread
:param agent: learning (asynchronous) agent
:param env: environment
:param sess: tensorflow session
:param agent_summary: object used for summary tensorboard logging
:param saver: tensorflow session saver
:param thread_idx: (int) thread index. Thread with index=0 used for target network update and logging
:type agent: agent.QlearningAgent
:type env: environment.GymWrapper
:type sess: tensorflow.Session
:type agent_summary: agent.AgentSummary
:type saver: tensorflow.train.Saver"""
global global_epsilons
eps_min = random.choice(EPS_MIN_SAMPLES)
epsilon = update_epsilon(agent.frame, FLAGS.eps_steps, eps_min)
print('Thread: %d. Sampled min epsilon: %f' % (thread_idx, eps_min))
last_logging = agent.frame
last_target_update = agent.frame
terminal = True
# Training loop:
while agent.frame < FLAGS.total_frames:
batch_states, batch_rewards, batch_actions = [], [], []
if terminal:
terminal = False
screen = env.reset_random()
# Batch update loop:
while not terminal and len(batch_states) < FLAGS.tmax:
# Increment shared frame counter
agent.frame_increment()
batch_states.append(screen)
# Exploration vs Exploitation, E-greedy action choose
if random.random() < epsilon:
action_index = random.randrange(agent.action_size)
else:
reward_per_action = agent.predict_rewards(screen)
# Choose an action index with maximum expected reward
action_index = np.argmax(reward_per_action)
# Execute an action and receive new state, reward for action
screen, reward, terminal, _ = env.step(action_index)
reward = np.clip(reward, -1, 1)
# one-step Q-Learning: add discounted expected future reward
if not terminal:
reward += FLAGS.gamma * agent.predict_target(screen)
batch_rewards.append(reward)
batch_actions.append(action_index)
# Apply asynchronous gradient update to shared agent
agent.train(np.vstack(batch_states), batch_actions, batch_rewards)
# Anneal epsilon
epsilon = update_epsilon(agent.frame, FLAGS.eps_steps, eps_min)
global_epsilons[thread_idx] = epsilon # Logging
# Logging and target network update
if thread_idx == 0:
if agent.frame - last_target_update >= FLAGS.update_interval:
last_target_update = agent.frame
agent.update_target()
if agent.frame - last_logging >= FLAGS.log_interval and terminal:
last_logging = agent.frame
saver.save(sess, os.path.join(FLAGS.logdir, "sess.ckpt"), global_step=agent.frame)
print('Session saved to %s' % FLAGS.logdir)
episode_rewards, episode_q = test(agent, env, episodes=FLAGS.test_iter)
avg_r = np.mean(episode_rewards)
avg_q = np.mean(episode_q)
avg_eps = np.mean(global_epsilons)
print("%s. Avg.Ep.R: %.4f. Avg.Ep.Q: %.2f. Avg.Eps: %.2f. T: %d" %
(str(datetime.now())[11:19], avg_r, avg_q, avg_eps, agent.frame))
agent_summary.write_summary({
'total_frame_step': agent.frame,
'episode_avg_reward': avg_r,
'avg_q_value': avg_q,
'epsilon': avg_eps
})
global training_finished
training_finished = True
print('Thread %d. Training finished. Total frames: %s' % (thread_idx, agent.frame))
def run(worker):
"""Launches worker asynchronously in 'FLAGS.threads' threads
:param worker: worker function"""
print('Starting %s threads.' % FLAGS.threads)
processes = []
envs = []
for _ in range(FLAGS.threads):
env = GymWrapperFactory.make(FLAGS.env,
actrep=FLAGS.action_repeat,
memlen=FLAGS.memory_len,
w=FLAGS.width,
h=FLAGS.height)
envs.append(env)
with tf.Session() as sess:
agent = QlearningAgent(session=sess,
action_size=envs[0].action_size,
h=FLAGS.height,
w=FLAGS.width,
channels=FLAGS.memory_len,
opt=tf.train.AdamOptimizer(FLAGS.lr))
saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
sess.run(tf.global_variables_initializer())
if not os.path.exists(FLAGS.logdir):
os.makedirs(FLAGS.logdir)
ckpt = tf.train.latest_checkpoint(FLAGS.logdir)
if ckpt is not None:
saver.restore(sess, ckpt)
agent.update_target()
print('Restoring session from %s' % ckpt)
summary = AgentSummary(FLAGS.logdir, agent, FLAGS.env)
for i in range(FLAGS.threads):
processes.append(th.Thread(target=worker, args=(agent, envs[i], sess, summary, saver, i,)))
for p in processes:
p.daemon = True
p.start()
while not training_finished:
if FLAGS.render:
for i in range(FLAGS.threads):
envs[i].render()
time.sleep(.01)
for p in processes:
p.join()
def main():
if FLAGS.eval:
evaluate()
else:
run(train_async_dqn)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8121358 | import networkx as nx
import topology as tp
import matplotlib.pyplot as plt
import selection
def greedy_partition(g, max_hop, max_cluster_size, node_sequence):
unvisited_nodes = list(g.nodes())
cluster_id = 1
cluster_dict = {}
ndcount = 0
while len(unvisited_nodes) > 0:
stnode = node_sequence[ndcount]
ndcount += 1
if len(unvisited_nodes) == len(list(g.nodes())):
cluster_dict[str(cluster_id)] = []
cluster_dict[str(cluster_id)].append(stnode)
else:
ccount = 1
cluster_sizes = []
cluster_ids = []
while ccount <= cluster_id:
ccnodes = cluster_dict[str(ccount)]
dcsn = tp.include_node_into_cluster(g, new_node=stnode, other_nodes=ccnodes, max_hop=max_hop)
if dcsn:
cluster_sizes.append(len(ccnodes))
cluster_ids.append(ccount)
ccount += 1
if len(cluster_ids) <= 0:
cluster_id += 1
cluster_dict[str(cluster_id)] = []
cluster_dict[str(cluster_id)].append(stnode)
else:
cluster_sizes.sort(reverse=True)
k = 0
not_added = True
while k < len(cluster_sizes):
if cluster_sizes[k] <= max_cluster_size:
chosen_cluster = cluster_ids[k]
cluster_dict[str(chosen_cluster)].append(stnode)
not_added = False
break
k += 1
if not_added:
chosen_cluster = cluster_ids[-1]
cluster_dict[str(chosen_cluster)].append(stnode)
unvisited_nodes.remove(stnode)
result = []
for key,val in cluster_dict.items():
result.append(val)
return result
# def greedy_partition(g, max_hop, max_cluster_size):
#
# all_result = []
# is_converged = False
# run_no = 1
# while not is_converged:
# print("Run Number: {}".format(run_no))
# reslt = partition_single_run(g, max_hop=max_hop, max_cluster_size=max_cluster_size)
# j = 0
# while j < len(all_result):
# dcsn = smt.check_all_clusters_similar(reslt, all_result[j])
# if dcsn:
# print("final result: {}".format(reslt))
# is_converged = True
# break
# else:
# all_result.append(reslt)
# run_no += 1
if __name__ == "__main__":
nous = 10
p = 0.2
g = nx.read_yaml('graphs/node_{}_p_{}.yaml'.format(str(nous), str(p).replace(".", "")))
### MAX_HOP 1
## [[2, 0], [1, 6], [3, 7], [5], [9], [4], [8]]
### MAX_HOP 2
## [[2, 0, 1, 3, 5], [6, 9], [7, 4, 8]]
### MAX_HOP 3
## [[2, 0, 1, 3, 5, 6, 9], [7, 4, 8]]
node_seq = selection.generate_node_selection_sequence(g)
all_lists = greedy_partition(g, max_hop=2, max_cluster_size=10, node_sequence=node_seq)
print(all_lists)
color_map = []
color_list = ['b', 'g', 'r', 'c', 'm', 'y', 'grey', 'w', 'orange', 'tomato', 'coral', 'maroon']
for node in g:
j = 0
while node not in all_lists[j]:
j = j + 1
color_map.append(color_list[j])
print(color_map)
nx.draw_kamada_kawai(g, node_color=color_map, with_labels=True)
plt.show() | StarcoderdataPython |
46733 | import argparse
import copy
import datetime
import re
import shlex
from typing import Union
import time
import discord
from discord.ext import commands
class Arguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
def setup(bot):
bot.add_cog(Moderation(bot))
def is_owner(ctx):
return ctx.author == ctx.guild.owner_id
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(hidden=True)
@commands.check(is_owner)
async def sudo(self, ctx, who: Union[discord.Member, discord.User], *, command: str):
"""Run a command as another user."""
msg = copy.copy(ctx.message)
msg.author = who
msg.content = ctx.prefix + command
new_ctx = await self.bot.get_context(msg, cls=type(ctx))
await self.bot.invoke(new_ctx)
| StarcoderdataPython |
1946494 | # -*- coding:utf-8 -*-
import urllib.request
import urllib.parse
import http.cookiejar
from bs4 import BeautifulSoup
def test_bs():
response = urllib.request.urlopen('http://www.shanbay.com/team/members/')
page_html = response.read()
soup = BeautifulSoup(page_html)
pre = soup.prettify()
print(pre)
return
def login():
cookie = http.cookiejar.CookieJar()
cookie_handler = urllib.request.HTTPCookieProcessor(cookie)
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Length': '99',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'pgv_pvi=7034558464; pgv_si=s7130073088; jiathis_rdc=%7B%22http%3A//www.shanbay.com/vocabtest/result/%22%3A-39112541%2C%22http%3A//www.shanbay.com/vocabtest/share/%3Fvocab%3D9000%22%3A%22163%7C1408710333623%22%7D; language_code=zh-CN; sessionid=dckzcprofzz6xvlyk4gb6woyxycvsanj; csrftoken=<KEY>; __utmt=1; __utma=183787513.1933368942.1408332161.1414587127.1414589766.31; __utmb=183787513.1.10.1414589766; __utmc=183787513; __utmz=183787513.1408709798.10.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)',
'Host': 'www.shanbay.com',
'Origin': 'http://www.shanbay.com',
'Referer': 'http://www.shanbay.com/accounts/login/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36'}
headers_new = {}
post_data_origin = {'csrfmiddlewaretoken': '<KEY>',
'username': 'ibluecoffee',
'password': '<PASSWORD>'}
post_data = urllib.parse.urlencode(post_data_origin)
binary_data = post_data.encode('utf-8')
req = urllib.request.Request(url='http://www.shanbay.com/accounts/login/', data=binary_data, headers=headers)
opener = urllib.request.build_opener(cookie_handler)
response = opener.open(req)
page = response.read()
print(page)
soup = BeautifulSoup(page)
pre = soup.prettify()
print(pre)
return
if __name__ == '__main__':
login()
| StarcoderdataPython |
3544429 | <gh_stars>0
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
from time import sleep # Import the sleep function from the time module
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(12, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(22, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(24, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(26, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(32, GPIO.OUT, initial=GPIO.LOW)
while True: # Run forever
GPIO.output(12, GPIO.HIGH) # Turn on
GPIO.output(22, GPIO.HIGH) # Turn on
GPIO.output(24, GPIO.HIGH) # Turn on
GPIO.output(26, GPIO.HIGH) # Turn on
GPIO.output(32, GPIO.HIGH) # Turn on
sleep(1) # Sleep for 1 second
GPIO.output(12, GPIO.LOW) # Turn off
GPIO.output(22, GPIO.LOW) # Turn off
GPIO.output(24, GPIO.LOW) # Turn off
GPIO.output(26, GPIO.LOW) # Turn off
GPIO.output(32, GPIO.LOW) # Turn off
sleep(1) # Sleep for 1 second | StarcoderdataPython |
9717891 | <filename>src/fleetctrl/planning/VehiclePlan.py
# -------------------------------------------------------------------------------------------------------------------- #
# standard distribution imports
# -----------------------------
import logging
from os import startfile
# additional module imports (> requirements)
# ------------------------------------------
import numpy as np
from abc import abstractmethod, ABCMeta
from typing import List, Dict, Tuple
# src imports
# -----------
from src.simulation.Legs import VehicleRouteLeg, VehicleChargeLeg
from src.simulation.Vehicles import SimulationVehicle
from src.fleetctrl.planning.PlanRequest import PlanRequest
from src.routing.NetworkBase import NetworkBase
# -------------------------------------------------------------------------------------------------------------------- #
# global variables
# ----------------
from src.misc.globals import *
LOG = logging.getLogger(__name__)
LARGE_INT = 100000000
# =================================================================================================================== #
# ========= PLAN STOP CLASSES ======================================================================================= #
# =================================================================================================================== #
# class PlanStop:
# def __init__(self, position, boarding_dict, max_trip_time_dict, earliest_pickup_time_dict, latest_pickup_time_dict,
# change_nr_pax, planned_arrival_time=None, planned_departure=None, planned_arrival_soc=None,
# locked=False, charging_power=0, started_at=None,
# existing_vcl=None, charging_unit_id=None):
class PlanStopBase(metaclass=ABCMeta):
""" this abstract class defines all methods a PlanStop-Class has to implement
this class corresponds to one spatiotemporal action a vehicle is planned to do during a vehicle plan
a vehicle plan thereby consists of an temporal ordered list of PlanStops which are performed one after another
vehicles are moving between these different plan stops.
"""
@abstractmethod
def get_pos(self) -> tuple:
"""returns network position of this plan stop
:return: network position tuple """
pass
@abstractmethod
def get_state(self) -> G_PLANSTOP_STATES:
""" returns the state of the planstop
:return: plan stop state"""
@abstractmethod
def get_list_boarding_rids(self) -> list:
"""returns list of all request ids boarding at this plan stop
:return: list of boarding rids"""
@abstractmethod
def get_list_alighting_rids(self) -> list:
""" returns list of all request ids alighting at this plan stop
:return: list of alighting rids"""
@abstractmethod
def get_earliest_start_time(self) -> float:
""" this function evaluates all time constraints and returns the
earliest start time for the PlanStop
:return: (float) earliest start time """
pass
@abstractmethod
def get_latest_start_time(self, pax_infos : dict) -> float:
""" this function evaluates all time constraints and returns the
latest start time of the Plan Stop.
if maximum trip time constraints are applied, infos about boarding times are need to evaluate the
latest drop off time constraints
:param pax_infos: (dict) from corresponding vehicle plan rid -> list (boarding_time, deboarding time) (only boarding time needed)
:return: (float) latest start time"""
pass
@abstractmethod
def get_duration_and_earliest_departure(self) -> tuple:
""" returns a tuple of planned stop duration and absolute earliest departure time at stop
:return: (stop duration, earliest departure time) | None if not given"""
@abstractmethod
def get_started_at(self) -> float:
""" this function returns the time this plan stop started at; None if not started by the vehicle yet
:return: float of time or None"""
@abstractmethod
def get_change_nr_pax(self) -> int:
""" get the change of person occupancy after this plan stop
:return: change number pax (difference between boarding and deboarding persons)"""
@abstractmethod
def get_change_nr_parcels(self) -> int:
""" get the change of parcel occupancy after this plan stop
:return: change number parcels (difference between boarding and deboarding parcels)"""
@abstractmethod
def get_departure_time(self, start_time : float) -> float:
""" this function returns the time the vehicle leaves the plan stop if it is started at start_time
:param start_time: time the plan stop has been started
:return: time vehicle is supposed to leave"""
@abstractmethod
def get_charging_power(self) -> float:
""" returns the charging power at this plan stop
:return: charging power"""
@abstractmethod
def get_charging_unit_and_vcl(self):
""" returns the charging unit and the existing vcl
:return: tuple (charging_unit_id, vcl); None if not existent"""
@abstractmethod
def get_boarding_time_constraint_dicts(self) -> Tuple[Dict, Dict, Dict, Dict]:
""" returns a tuple of all boarding constraints dicts (rid -> time constraint)
:return: dict earliest_boarding_time, latest_boarding_times, max_travel_times, latest_arrival_times"""
@abstractmethod
def get_planned_arrival_and_departure_time(self) -> Tuple[float, float]:
""" returns time of arrival and departure planned within the plan
:return: tuple of planned arrival time and planned departure time"""
@abstractmethod
def get_planned_arrival_and_departure_soc(self) -> Tuple[float, float]:
"""returns the planned soc when arriving at plan stop
:return: planned soc at start and end of charging process"""
@abstractmethod
def is_locked(self) -> bool:
"""test for lock
:return: bool True, if plan stop is locked"""
@abstractmethod
def is_locked_end(self) -> bool:
""" ths for end lock
:return: bool True, if plan stop is locked at end of plan stop (no insertion after this possible)"""
@abstractmethod
def is_infeasible_locked(self) -> bool:
""" this if planstop is locked due to infeasible time constraints
:return: True, if infeasible locked"""
@abstractmethod
def is_inactive(self) -> bool:
""" this function evaluates if this is an inactive PlanStop (i.e. undefined duration and no tasks)
:return: (bool) True if inactive, else False """
pass
@abstractmethod
def is_empty(self) -> bool:
""" tests if nothing has to be done here and its just a routing target marker (i.e. reloc target)
:return: (bool)"""
@abstractmethod
def set_locked(self, locked : bool):
""" sets the locked state of the plan stop
:param locked: True, if this plan stop should be locked"""
@abstractmethod
def set_infeasible_locked(self, infeasible_locked : bool):
""" sets infeasible locked state if time constraints can no longer be fullfilled
:param infeasible_locked: True, if infeasible locked state applied"""
@abstractmethod
def set_started_at(self, start_time : float):
"""this function sets the time when the plan stop has been started by a vehicle
:param start_time: float; simulation time when vehicle started the plan stop"""
@abstractmethod
def set_planned_arrival_and_departure_time(self, arrival_time : float, departure_time : float):
""" set the planned arrival and departure time at plan stop
:param arrival_time: time of vehicle arrival
:param departure_time: planned time of departure"""
@abstractmethod
def set_duration_and_earliest_end_time(self, duration : float=None, earliest_end_time : float=None):
""" can be used to reset duration and earliest end time of the plan stop (ignored if None)
:param duration: new duration of plan stop
:param earliest_end_time: new earliest end time of plan stop"""
@abstractmethod
def set_planned_arrival_and_departure_soc(self, arrival_soc : float, departure_soc : float):
""" set the planned soc at arrival and departure at plan stop
:param arrival soc: soc of vehicle at arrival
:param departure_soc: soc at end of charging process"""
@abstractmethod
def set_charging_attributes(self, charging_unit_id : int, existing_vcl : VehicleChargeLeg, power : float):
""" this methods sets entries for charging tasks when defined later on
:param charging_unit_id: id of charging uit
:param existing_vcl: corresponding vehicle charge leg
:param power: power of charging"""
@abstractmethod
def update_rid_boarding_time_constraints(self, rid, new_earliest_pickup_time : float=None, new_latest_pickup_time : float=None):
""" this method can be used to update boarding time constraints a request in this plan stop (if given)
:param rid: request id
:param new_earliest_pickup_time: new earliest pick up time constraint of rid
:param new_latest_pickup_time: new latest pick up time constraint of rid"""
@abstractmethod
def update_rid_alighting_time_constraints(self, rid, new_maxmium_travel_time : float=None, new_latest_dropoff_time : float=None):
""" this method can be used to update alighting time constraints a request in this plan stop (if given)
:param rid: request id
:param new_maxmium_travel_time: new maximum travel time constraint of rid
:param new_latest_dropoff_time: new latest dropoff time constraint of rid"""
@abstractmethod
def copy(self):
""" this function returns the copy of a plan stop
:return: PlanStop copy
"""
pass
class PlanStop(PlanStopBase):
"""this class corresponds to one spatiotemporal action a vehicle is planned to do during a vehicle plan
a vehicle plan thereby consists of an temporal ordered list of PlanStops which are performed one after another
vehicles are moving between these different plan stops.
this class is the most general class of plan stops"""
def __init__(self, position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={}, earliest_pickup_time_dict={}, latest_pickup_time_dict={},
change_nr_pax=0, change_nr_parcels=0, earliest_start_time=None, latest_start_time=None, duration=None, earliest_end_time=None,
locked=False, locked_end=False, charging_power=0, existing_vcl=None, charging_unit_id=None, planstop_state : G_PLANSTOP_STATES=G_PLANSTOP_STATES.MIXED):
"""
:param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)
:param boarding_dict: dictionary with entries +1 -> list of request ids that board the vehicle there; -1 -> list of requests that alight the vehicle there
:param max_trip_time_dict: dictionary request_id -> maximum trip time of all requests alighting at this stop to check max trip time constraint
:param latest_arrival_time_dict: dictionary request_id -> absolute latest arival time of all requests alighting at this stop to check latest arrival time constraint
:param earliest_pickup_time_dict: dictionary request_id -> earliest pickup time of all requests boarding at this stop to check earliest pickup time constraint
:param latest_pickup_time_dict: dictionary request_id -> latest pickup time of all requests boarding at this top to check latest pickup time constraint
:param change_nr_pax: (int) change of number of passengers at this point: number people boarding - number people alighting to check capacity constraint
:param change_nr_parcels: (int) change of number of parcels at this point: number boarding parcels - number alighting parcels to check capacity constraint
:param earliest_start_time: (float) absolute earliest start time this plan stop is allowed to start
:param latest_start_time: (float) absolute latest start time this plan stop is allowed to start
:param duration: (float) minimum duration this plan stops takes at this location
:param earliest_end_time: (float) absolute earliest time a vehicle is allowed to leave at this plan stop
:param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too
:param locked_end: (bool) false by default; if true, no planstops can be added after this planstop in the assignment algorithm and it cannot be removed by the assignemnt algorithm (insertions before are possible!)
:param charging_power: optional (float); if given the vehicle is charged with this power (TODO unit!) while at this stop
:param existing_vcl: optional reference to VehicleChargeLeg TODO (is this a VCL or the ID?)
:param charging_unit_id: optional (int) charging unit it the vehicle is supposed to charge at, defined in ChargingInfrastructure
:param planstop_state: used to characterize the planstop state (task to to there)"""
self.pos = position
self.state = planstop_state
self.boarding_dict = boarding_dict # +1: [rids] for boarding | -1: [rids] for alighting
self.locked = locked
self.locked_end = locked_end
# charging
self.charging_power = charging_power
self.charging_unit_id = charging_unit_id
self.existing_vcl = existing_vcl
# parameters that define capacity constraints
self.change_nr_pax = change_nr_pax
self.change_nr_parcels = change_nr_parcels
# parameters that define time constraints
self.max_trip_time_dict = max_trip_time_dict # deboarding rid -> max_trip_time constraint
self.latest_arrival_time_dict = latest_arrival_time_dict # deboarding rid -> latest_arrival_time constraint
self.earliest_pickup_time_dict = earliest_pickup_time_dict # boarding rid -> earliest pickup time
self.latest_pickup_time_dict = latest_pickup_time_dict # boarding rid -> latest pickup time
if type(self.boarding_dict) != dict:
raise TypeError
if type(self.max_trip_time_dict) != dict:
raise TypeError
if type(self.latest_arrival_time_dict) != dict:
raise TypeError
if type(self.earliest_pickup_time_dict) != dict:
raise TypeError
if type(self.latest_pickup_time_dict) != dict:
raise TypeError
# constraints independent from boarding processes
self.direct_earliest_start_time = earliest_start_time
self.direct_latest_start_time = latest_start_time
self.direct_duration = duration
self.direct_earliest_end_time = earliest_end_time
if duration is not None:
x = int(self.direct_duration)
# constraints (will be computed in update travel time by evaluating the whole plan)
self._latest_start_time = None
self._earliest_start_time = None
# planning properties (will be set during evaluation of whole plan)
self._planned_arrival_time = None
self._planned_departure_time = None
self._planned_arrival_soc = None
self._planned_departure_soc = None
self.started_at = None # is only set in update_plan
self.infeasible_locked = False
def get_pos(self) -> tuple:
"""returns network position of this plan stop
:return: network position tuple """
return self.pos
def get_state(self) -> G_PLANSTOP_STATES:
return self.state
def get_list_boarding_rids(self) -> list:
"""returns list of all request ids boarding at this plan stop
:return: list of boarding rids"""
return self.boarding_dict.get(1, [])
def get_list_alighting_rids(self) -> list:
""" returns list of all request ids alighting at this plan stop
:return: list of alighting rids"""
return self.boarding_dict.get(-1, [])
def copy(self):
""" this function returns the copy of a plan stop
:return: PlanStop copy
"""
cp_ps = PlanStop(self.pos, boarding_dict=self.boarding_dict.copy(), max_trip_time_dict=self.max_trip_time_dict.copy(),
latest_arrival_time_dict=self.latest_arrival_time_dict.copy(), earliest_pickup_time_dict=self.earliest_pickup_time_dict.copy(),
latest_pickup_time_dict=self.latest_pickup_time_dict.copy(), change_nr_pax=self.change_nr_pax,
earliest_start_time=self.direct_earliest_start_time, latest_start_time=self.direct_latest_start_time,
duration=self.direct_duration, earliest_end_time=self.direct_earliest_end_time, locked=self.locked, locked_end=self.locked_end,
charging_power=self.charging_power, existing_vcl=self.existing_vcl, charging_unit_id=self.charging_unit_id)
cp_ps._planned_arrival_time = self._planned_arrival_time
cp_ps._planned_departure_time = self._planned_departure_time
cp_ps._planned_arrival_soc = self._planned_arrival_soc
cp_ps._planned_departure_soc = self._planned_departure_soc
cp_ps.started_at = self.started_at
return cp_ps
def get_earliest_start_time(self) -> float:
""" this function evaluates all time constraints and returns the
earliest start time for the PlanStop
:return: (float) earliest start time """
self._earliest_start_time = -1
if self.direct_earliest_start_time is not None and self.direct_earliest_start_time > self._earliest_start_time:
self._earliest_start_time = self.direct_earliest_start_time
if len(self.earliest_pickup_time_dict.values()) > 0:
ept = np.floor(max(self.earliest_pickup_time_dict.values()))
if ept > self._earliest_start_time:
self._earliest_start_time = ept
#LOG.debug("get earliest start time: {}".format(str(self)))
return self._earliest_start_time
def get_latest_start_time(self, pax_infos : dict) -> float:
""" this function evaluates all time constraints and returns the
latest start time of the Plan Stop.
if maximum trip time constraints are applied, infos about boarding times are need to evaluate the
latest drop off time constraints
:param pax_infos: (dict) from corresponding vehicle plan rid -> list (boarding_time, deboarding time) (only boarding time needed)
:return: (float) latest start time"""
self._latest_start_time = LARGE_INT
if self.direct_latest_start_time is not None and self.direct_latest_start_time < self._latest_start_time:
self._latest_start_time = self.direct_latest_start_time
if len(self.latest_pickup_time_dict.values()) > 0:
la = np.ceil(min(self.latest_pickup_time_dict.values()))
if la < self._latest_start_time:
self._latest_start_time = la
if len(self.max_trip_time_dict.values()) > 0:
la = np.ceil(min((pax_infos[rid][0] + self.max_trip_time_dict[rid] for rid in self.boarding_dict.get(-1, []))))
if la < self._latest_start_time:
self._latest_start_time = la
if len(self.latest_arrival_time_dict.values()) > 0:
la = np.ceil(min(self.latest_arrival_time_dict.values()))
if la < self._latest_start_time:
self._latest_start_time = la
#LOG.debug("get latest start time: {}".format(str(self)))
return self._latest_start_time
def get_started_at(self) -> float:
return self.started_at
def get_change_nr_pax(self) -> int:
return self.change_nr_pax
def get_change_nr_parcels(self) -> int:
return self.change_nr_parcels
def get_departure_time(self, start_time: float) -> float:
""" this function returns the time the vehicle leaves the plan stop if it is started at start_time
:param start_time: time the plan stop has been started
:return: time vehicle is supposed to leave"""
departure_time = start_time
if self.direct_duration is not None:
departure_time = start_time + self.direct_duration
if self.direct_earliest_end_time is not None and departure_time < self.direct_earliest_end_time:
departure_time = self.direct_earliest_end_time
return departure_time
def get_duration_and_earliest_departure(self) -> tuple:
return self.direct_duration, self.direct_earliest_end_time
def get_charging_power(self) -> float:
return self.charging_power
def get_charging_unit_and_vcl(self):
return self.charging_unit_id, self.existing_vcl
def get_boarding_time_constraint_dicts(self) -> Tuple[Dict, Dict, Dict, Dict]:
return self.earliest_pickup_time_dict, self.latest_pickup_time_dict, self.max_trip_time_dict, self.latest_arrival_time_dict
def get_planned_arrival_and_departure_time(self) -> Tuple[float, float]:
return self._planned_arrival_time, self._planned_departure_time
def get_planned_arrival_and_departure_soc(self) -> Tuple[float, float]:
return self._planned_arrival_soc, self._planned_departure_soc
def is_inactive(self) -> bool:
""" this function evaluates if this is an inactive PlanStop (i.e. undefined duration and no tasks)
:return: (bool) True if inactive, else False """
if self.get_departure_time(0) > LARGE_INT:
return True
else:
return False
def is_locked(self) -> bool:
return self.locked
def is_locked_end(self) -> bool:
return self.locked_end
def is_infeasible_locked(self) -> bool:
return self.infeasible_locked
def set_locked(self, locked: bool):
self.locked = locked
def set_infeasible_locked(self, infeasible_locked: bool):
self.infeasible_locked = infeasible_locked
def set_started_at(self, start_time: float):
self.started_at = start_time
def set_planned_arrival_and_departure_soc(self, arrival_soc: float, departure_soc: float):
self._planned_arrival_soc = arrival_soc
self._planned_departure_soc = departure_soc
def set_planned_arrival_and_departure_time(self, arrival_time: float, departure_time: float):
self._planned_arrival_time = arrival_time
self._planned_departure_time = departure_time
def set_duration_and_earliest_end_time(self, duration: float = None, earliest_end_time: float = None):
if duration is not None:
self.direct_duration = duration
if earliest_end_time is not None:
self.direct_earliest_end_time = earliest_end_time
def set_charging_attributes(self, charging_unit_id: int, existing_vcl: VehicleChargeLeg, power: float):
self.charging_unit_id = charging_unit_id
self.existing_vcl = existing_vcl
self.charging_power = power
def update_rid_boarding_time_constraints(self, rid, new_earliest_pickup_time: float = None, new_latest_pickup_time: float = None):
if new_earliest_pickup_time is not None:
self.earliest_pickup_time_dict[rid] = new_earliest_pickup_time
if new_latest_pickup_time is not None:
self.latest_pickup_time_dict[rid] = new_latest_pickup_time
def update_rid_alighting_time_constraints(self, rid, new_maxmium_travel_time: float = None, new_latest_dropoff_time: float = None):
if new_maxmium_travel_time is not None:
self.max_trip_time_dict[rid] = new_maxmium_travel_time
if new_latest_dropoff_time is not None:
self.latest_arrival_time_dict[rid] = new_latest_dropoff_time
def __str__(self):
return f"PS: {self.pos} bd {self.boarding_dict} earl dep {self._earliest_start_time} latest arr " \
f"{self._latest_start_time} eta {self._planned_arrival_time}"
def is_empty(self) -> bool:
""" tests if nothing has to be done here and its just a routing target marker (i.e. reloc target)
:return: (bool)"""
if self.change_nr_pax == 0 and len(self.boarding_dict.get(1, [])) == 0 and len(self.boarding_dict.get(-1, [])) == 0 and self.charging_power == 0: #and len(self.planned_departure) == 0
return True
else:
return False
class BoardingPlanStop(PlanStop):
""" this class can be used to generate a plan stop where only boarding processes take place """
def __init__(self, position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={},
earliest_pickup_time_dict={}, latest_pickup_time_dict={}, change_nr_pax=0, change_nr_parcels=0, duration=None, locked=False):
"""
:param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)
:param boarding_dict: dictionary with entries +1 -> list of request ids that board the vehicle there; -1 -> list of requests that alight the vehicle there
:param max_trip_time_dict: dictionary request_id -> maximum trip time of all requests alighting at this stop to check max trip time constraint
:param latest_arrival_time_dict: dictionary request_id -> absolute latest arival time of all requests alighting at this stop to check latest arrival time constraint
:param earliest_pickup_time_dict: dictionary request_id -> earliest pickup time of all requests boarding at this stop to check earliest pickup time constraint
:param latest_pickup_time_dict: dictionary request_id -> latest pickup time of all requests boarding at this top to check latest pickup time constraint
:param change_nr_pax: (int) change of number of passengers at this point: number people boarding - number people alighting to check capacity constraint
:param change_nr_parcels: (int) change of number of parcels at this point: number boarding parcels - number alighting parcels to check capacity constraint
:param duration: (float) minimum duration this plan stops takes at this location
:param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too
"""
super().__init__(position, boarding_dict=boarding_dict, max_trip_time_dict=max_trip_time_dict,
latest_arrival_time_dict=latest_arrival_time_dict, earliest_pickup_time_dict=earliest_pickup_time_dict,
latest_pickup_time_dict=latest_pickup_time_dict, change_nr_pax=change_nr_pax, change_nr_parcels=change_nr_parcels,
earliest_start_time=None, latest_start_time=None,
duration=duration, earliest_end_time=None, locked=locked,
charging_power=0, existing_vcl=None, charging_unit_id=None, planstop_state=G_PLANSTOP_STATES.BOARDING)
class RoutingTargetPlanStop(PlanStop):
""" this plan stop can be used to schedule a routing target for vehicles with the only task to drive there
i.e repositioning"""
def __init__(self, position, earliest_start_time=None, latest_start_time=None, duration=None, earliest_end_time=None, locked=False, locked_end=False, planstop_state=G_PLANSTOP_STATES.REPO_TARGET):
"""
:param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)
:param earliest_start_time: (float) absolute earliest start time this plan stop is allowed to start
:param latest_start_time: (float) absolute latest start time this plan stop is allowed to start
:param duration: (float) minimum duration this plan stops takes at this location
:param earliest_end_time: (float) absolute earliest time a vehicle is allowed to leave at this plan stop
:param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too
:param locked_end: (bool) false by default; if true, no planstops can be added after this planstop in the assignment algorithm and it cannot be removed by the assignemnt algorithm (insertions before are possible!)
:param planstop_state: (G_PLANSTOP_STATES) indicates the planstop state. should be in (REPO_TARGET, INACTIVE, RESERVATION)
"""
super().__init__(position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={}, earliest_pickup_time_dict={}, latest_pickup_time_dict={},
change_nr_pax=0, earliest_start_time=earliest_start_time, latest_start_time=latest_start_time, duration=duration,
earliest_end_time=earliest_end_time, locked=locked, locked_end=locked_end, charging_power=0, existing_vcl=None, charging_unit_id=None, planstop_state=planstop_state)
class ChargingPlanStop(PlanStop):
""" this plan stop can be used to schedule a charging only process """
def __init__(self, position, earliest_start_time=None, latest_start_time=None, duration=None,
earliest_end_time=None, locked=False, locked_end=False, charging_power=0, existing_vcl=None, charging_unit_id=None):
"""
:param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)
:param earliest_start_time: (float) absolute earliest start time this plan stop is allowed to start
:param latest_start_time: (float) absolute latest start time this plan stop is allowed to start
:param duration: (float) minimum duration this plan stops takes at this location
:param earliest_end_time: (float) absolute earliest time a vehicle is allowed to leave at this plan stop
:param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too
:param locked_end: (bool) false by default; if true, no planstops can be added after this planstop in the assignment algorithm and it cannot be removed by the assignemnt algorithm (insertions before are possible!)
:param charging_power: optional (float); if given the vehicle is charged with this power (TODO unit!) while at this stop
:param existing_vcl: optional reference to VehicleChargeLeg TODO (is this a VCL or the ID?)
:param charging_unit_id: optional (int) charging unit it the vehicle is supposed to charge at, defined in ChargingInfrastructure
"""
super().__init__(position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={},
earliest_pickup_time_dict={}, latest_pickup_time_dict={}, change_nr_pax=0,
earliest_start_time=earliest_start_time, latest_start_time=latest_start_time, duration=duration,
earliest_end_time=earliest_end_time, locked=locked, locked_end=locked_end, charging_power=charging_power,
existing_vcl=existing_vcl, charging_unit_id=charging_unit_id, planstop_state=G_PLANSTOP_STATES.CHARGING)
class VehiclePlan:
""" this class is used to plan tasks for a vehicle and evaluates feasiblity of time constraints of this plan
a Vehicle mainly consists of two parts:
- a vehicle this plan is assigned to, and therefore the current state of the vehicle
- an ordered list of PlanStops defining the tasks the vehicle is supposed to perform (vehicles move from one plan stop to another)"""
def __init__(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, list_plan_stops : List[PlanStopBase], copy: bool =False, external_pax_info : dict = {}):
"""
:param veh_obj: corresponding simulation vehicle reference
:param sim_time: current simulation time
:param routing_engine: reference to routing engine
:param list_plan_stops: ordered list of plan stops to perform
:param copy: optional; set if an init is set for creation of a copy of the plan (only for internal use)
:param external_pax_info: optional; dictionary of allready computed pax info (only for internal use)
"""
self.list_plan_stops = list_plan_stops
self.utility = None
# pax info:
# rid -> [start_boarding, end_alighting] where start_boarding can be in past or planned
# rid -> [start_boarding_time] in case only boarding is planned
self.pax_info = external_pax_info
self.vid = None
self.feasible = None
self.structural_feasible = True # indicates if plan is in line with vehicle state ignoring time constraints
if not copy:
self.vid = veh_obj.vid
self.feasible = self.update_tt_and_check_plan(veh_obj, sim_time, routing_engine, keep_feasible=True)
def __str__(self):
return "veh plan for vid {} feasible? {} : {} | pax info {}".format(self.vid, self.feasible,
[str(x) for x in self.list_plan_stops],
self.pax_info)
def copy(self):
"""
creates a copy
"""
tmp_VehiclePlan = VehiclePlan(None, None, None, [ps.copy() for ps in self.list_plan_stops], copy=True)
tmp_VehiclePlan.vid = self.vid
tmp_VehiclePlan.utility = self.utility
tmp_VehiclePlan.pax_info = self.pax_info.copy()
tmp_VehiclePlan.feasible = True
return tmp_VehiclePlan
def is_feasible(self) -> bool:
""" this method can be used to check of plan is feasible
:return: (bool) True if feasible"""
return self.feasible
def is_structural_feasible(self) -> bool:
""" indicates if stop order is feasible with current vehicles state (ignoring time constraints)
:return: (bool) True if structural feasible """
return self.structural_feasible
def get_pax_info(self, rid) -> list:
""" this function returns passenger infos regarding planned boarding and alighting time for this plan
:param rid: request id involved in this plan
:return: list with maximally length 2; first entry planned boarding time; second entry planned alighting time; None if no information found"""
return self.pax_info.get(rid)
def get_involved_request_ids(self) -> list:
""" get a list of all request ids that are scheduled in this plan
:return: list of request ids"""
return list(self.pax_info.keys())
def set_utility(self, utility_value : float):
""" this method is used to set the utility (cost function value) of this plan
:param utility_value: float of utility value"""
self.utility = utility_value
def get_utility(self) -> float:
""" returns the utility value of the plan (None if not set yet)
:return: utility value (cost function value) or None"""
return self.utility
def add_plan_stop(self, plan_stop : PlanStopBase, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, return_copy : bool=False, position : tuple=None):
"""This method adds a plan stop to an existing vehicle plan. After that, it updates the plan.
:param plan_stop: new plan stop
:param veh_obj: simulation vehicle instance
:param sim_time: current simulation time
:param routing_engine: routing engine
:param return_copy: controls whether the current plan is changed or a changed copy will be returned
:param position: position in list_plan_stops in which the plan stop should be added
:return: None (return_copy=False) or VehiclePlan instance (return_copy=True)
"""
if return_copy:
new_veh_plan = self.copy()
else:
new_veh_plan = self
if position is None:
new_veh_plan.list_plan_stops.append(plan_stop)
else:
new_veh_plan.list_plan_stops.insert(position, plan_stop)
new_veh_plan.update_tt_and_check_plan(veh_obj, sim_time, routing_engine, keep_feasible=True)
def update_plan(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, list_passed_VRLs : List[VehicleRouteLeg]=None, keep_time_infeasible : bool=True) -> bool:
"""This method checks whether the simulation vehicle passed some of the planned stops and removes them from the
plan after passing. It returns the feasibility of the plan.
:param veh_obj: vehicle object to which plan is applied
:param sim_time: current simulation time
:param routing_engine: reference to routing engine
:param list_passed_VRLs: list of passed VRLs
:param keep_time_infeasible: if True full evaluation of feasiblity even though infeasibility of time constraints have been found
:return: is_feasible returns True if all
"""
# 1) check if list_passed_VRLs invalidates the plan or removes some stops
# LOG.debug("update_plan")
self.feasible = True
if list_passed_VRLs is None:
list_passed_VRLs = []
# LOG.debug(str(self))
# LOG.debug([str(x) for x in list_passed_VRLs])
# LOG.debug([str(x) for x in self.list_plan_stops])
key_translator = {sub_rid[0]: sub_rid for sub_rid in self.pax_info.keys() if type(sub_rid) == tuple}
if list_passed_VRLs and self.list_plan_stops:
for vrl in list_passed_VRLs:
if vrl.status in G_DRIVING_STATUS or vrl.status in G_LAZY_STATUS:
continue
# if vrl.status in G_LAZY_STATUS:
# # waiting part should not be part of the vehicle plan
# continue
# if vrl.status in G_DRIVING_STATUS or vrl.status in G_LAZY_STATUS:
# if vrl.destination_pos == self.list_plan_stops[0].get_pos() and self.list_plan_stops[0].is_empty():
# # LOG.info("jumped ps {} becouse of vrl {}".format(self.list_plan_stops[0], vrl))
# self.list_plan_stops = self.list_plan_stops[1:]
# continue
if vrl.destination_pos == self.list_plan_stops[0].get_pos():
# plan infeasible as soon as other people board the vehicle
rid_boarded_at_stop = set([key_translator.get(rq.get_rid_struct(), rq.get_rid_struct())
for rq in vrl.rq_dict.get(1, [])])
if not rid_boarded_at_stop == set(self.list_plan_stops[0].get_list_boarding_rids()):
# LOG.debug(" -> wrong boarding")
self.feasible = False
self.structural_feasible = False
return False
# other people alighting should not be possible. keep check nevertheless
rid_alighted_at_stop = set([key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()) for rq in
vrl.rq_dict.get(-1, [])])
if not rid_alighted_at_stop == set(self.list_plan_stops[0].get_list_alighting_rids()):
# LOG.debug(" -> wrong alighting")
self.feasible = False
self.structural_feasible = False
return False
# remove stop from plan
self.list_plan_stops = self.list_plan_stops[1:]
else:
# plan infeasible as soon as anybody boarded or alighted the vehicle
if vrl.rq_dict.get(1) or vrl.rq_dict.get(-1):
# LOG.debug(" -> unplanned boarding step")
self.feasible = False
self.structural_feasible = False
return False
# 2) check for current boarding processes and check if current stop should be locked
if veh_obj.assigned_route and self.list_plan_stops:
ca = veh_obj.assigned_route[0]
if not ca.status in G_DRIVING_STATUS and not ca.status in G_LAZY_STATUS:
if ca.destination_pos == self.list_plan_stops[0].get_pos():
rid_boarding_at_stop = set(
[key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()) for rq in ca.rq_dict.get(1, [])])
if not rid_boarding_at_stop == set(self.list_plan_stops[0].get_list_boarding_rids()):
# LOG.debug(" -> current boarding states is wrong!")
self.feasible = False
self.structural_feasible = False
return False
rid_deboarding_at_stop = set(
[key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()) for rq in ca.rq_dict.get(-1, [])])
if not rid_deboarding_at_stop == set(self.list_plan_stops[0].get_list_alighting_rids()):
# LOG.debug(" -> current deboarding states is wrong!")
self.feasible = False
self.structural_feasible = False
return False
else:
# LOG.debug(" -> infeasible planned stop")
self.feasible = False
self.structural_feasible = False
return False
if ca.locked and ca.destination_pos == self.list_plan_stops[0].get_pos():
# LOG.debug(" -> LOCK!")
self.list_plan_stops[0].set_locked(True)
# LOG.verbose("set starting time: {}".format(veh_obj.cl_start_time))
if not ca.status in G_DRIVING_STATUS and not ca.status in G_LAZY_STATUS: # TODO #
self.list_plan_stops[0].set_started_at(veh_obj.cl_start_time)
# 3) update planned attributes (arrival_time, arrival_soc, departure)
# LOG.debug("after update plan:")
# LOG.debug(str(self))
# LOG.debug(f"currently ob: {veh_obj.pax}")
self.feasible = self.update_tt_and_check_plan(veh_obj, sim_time, routing_engine,
keep_feasible=keep_time_infeasible)
return self.feasible
def return_intermediary_plan_state(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, stop_index : int) -> dict:
""" this function evalutes the future vehicle state after it would have performed the next stop_index plan stops of the vehicle plan
and returns a dictionary specifing the vehicle state
:param veh_obj: reference the vehicle object
:param sim_time: simulation time
:param routing_engine: routing engine reference
:param stop_index: index of list plan stops of vehicle plan until the state is evaluated
:return: dictionary specifying the future vehicle state"""
c_pos = veh_obj.pos
c_soc = veh_obj.soc
c_time = sim_time
if self.list_plan_stops[0].is_locked(): # set time at start_time of boarding process
boarding_startet = self.list_plan_stops[0].get_started_at()
if boarding_startet is not None:
c_time = boarding_startet
key_translator = {sub_rid[0]: sub_rid for sub_rid in self.pax_info.keys() if type(sub_rid) == tuple}
c_pax = {key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()): 1 for rq in veh_obj.pax}
nr_pax = veh_obj.get_nr_pax_without_currently_boarding() # sum([rq.nr_pax for rq in veh_obj.pax])
nr_parcels = veh_obj.get_nr_parcels_without_currently_boarding()
self.pax_info = {}
for rq in veh_obj.pax:
rid = key_translator.get(rq.get_rid_struct(), rq.get_rid_struct())
self.pax_info[rid] = [rq.pu_time]
# for pstop in self.list_plan_stops[:stop_index + 1]:
for i, pstop in enumerate(self.list_plan_stops[:stop_index + 1]):
if c_pos != pstop.get_pos():
_, tt, tdist = routing_engine.return_travel_costs_1to1(c_pos, pstop.get_pos())
c_pos = pstop.get_pos()
c_time += tt
c_soc -= veh_obj.compute_soc_consumption(tdist)
if c_pos == pstop.get_pos():
last_c_time = c_time
last_c_soc = c_soc
earliest_time = pstop.get_earliest_start_time()
if c_time < earliest_time:
c_time = earliest_time
# LOG.debug(f"c_time 3 {c_time}")
# update pax and check max. passenger constraint
nr_pax += pstop.get_change_nr_pax()
nr_parcels += pstop.get_change_nr_parcels()
for rid in pstop.get_list_boarding_rids():
if self.pax_info.get(rid):
continue
self.pax_info[rid] = [c_time]
c_pax[rid] = 1
for rid in pstop.get_list_alighting_rids():
self.pax_info[rid].append(c_time)
try:
del c_pax[rid]
except KeyError:
LOG.warning(f"update_tt_and_check_plan(): try to remove a rid that is not on board!")
# set departure time
c_time = pstop.get_departure_time(c_time)
pstop.set_planned_arrival_and_departure_time(last_c_time, c_time)
# set charge
if pstop.get_charging_power() > 0: # TODO # is charging now in waiting included as planned here?
c_soc += veh_obj.compute_soc_charging(pstop.get_charging_power(), c_time - last_c_time)
c_soc = max(c_soc, 1.0)
pstop.set_planned_arrival_and_departure_soc(last_c_soc, c_soc)
return {"stop_index": stop_index, "c_pos": c_pos, "c_soc": c_soc, "c_time": c_time, "c_pax": c_pax,
"pax_info": self.pax_info.copy(), "c_nr_pax": nr_pax, "c_nr_parcels" : nr_parcels}
def update_tt_and_check_plan(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, init_plan_state : dict=None, keep_feasible : bool=False):
"""This method updates the planning properties of all PlanStops of the Plan according to the new vehicle
position and checks if it is still feasible.
:param veh_obj: vehicle object to which plan is applied
:param sim_time: current simulation time
:param routing_engine: reference to routing engine
:param init_plan_state: {} requires "stop_index" "c_index", "c_pos", "c_soc", "c_time", "c_pax" and "pax_info"
:param keep_feasible: useful flag to keep assigned VehiclePlans for simulations with dynamic travel times
:return: is_feasible returns True if all
"""
# TODO # think about update of duration of VehicleChargeLegs
# LOG.verbose(f"update tt an check plan {veh_obj} pax {veh_obj.pax} | at {sim_time} | pax info {self.pax_info}")
is_feasible = True
if len(self.list_plan_stops) == 0:
self.pax_info = {}
return is_feasible
infeasible_index = -1 # lock all plan stops until last infeasible stop if vehplan is forced to stay feasible
if init_plan_state is not None:
start_stop_index = init_plan_state["stop_index"] + 1
c_pos = init_plan_state["c_pos"]
c_soc = init_plan_state["c_soc"]
c_time = init_plan_state["c_time"]
c_pax = init_plan_state["c_pax"].copy()
c_nr_pax = init_plan_state["c_nr_pax"]
c_nr_parcels = init_plan_state["c_nr_parcels"]
self.pax_info = {}
for k, v in init_plan_state["pax_info"].items():
self.pax_info[k] = v.copy()
# LOG.debug(f"init plan state available | c_pos {c_pos} c_pax {c_pax} pax info {self.pax_info}")
else:
key_translator = {sub_rid[0]: sub_rid for sub_rid in self.pax_info.keys() if type(sub_rid) == tuple}
self.pax_info = {}
start_stop_index = 0
c_pos = veh_obj.pos
c_soc = veh_obj.soc
c_time = sim_time
if self.list_plan_stops[0].is_locked(): # set time at start_time of boarding process
boarding_started = self.list_plan_stops[0].get_started_at()
if boarding_started is not None:
c_time = boarding_started
c_pax = {key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()): 1 for rq in veh_obj.pax}
c_nr_pax = veh_obj.get_nr_pax_without_currently_boarding() # sum([rq.nr_pax for rq in veh_obj.pax])
c_nr_parcels = veh_obj.get_nr_parcels_without_currently_boarding()
for rq in veh_obj.pax:
# LOG.debug(f"add pax info {rq.get_rid_struct()} : {rq.pu_time}")
rid = key_translator.get(rq.get_rid_struct(), rq.get_rid_struct())
self.pax_info[rid] = [rq.pu_time]
#LOG.verbose("init pax {} | {} | {}".format(c_pax, veh_obj.pax, self.pax_info))
# LOG.debug(f"c_time 1 {c_time}")
for i, pstop in enumerate(self.list_plan_stops[start_stop_index:], start=start_stop_index):
pstop_pos = pstop.get_pos()
if c_pos != pstop_pos:
if not is_feasible and not keep_feasible:
# LOG.debug(f" -> break because infeasible | is feasible {is_feasible} keep_feasible {keep_feasible}")
break
_, tt, tdist = routing_engine.return_travel_costs_1to1(c_pos, pstop_pos)
c_pos = pstop_pos
c_time += tt
# LOG.debug(f"c_time 2 {c_time}")
c_soc -= veh_obj.compute_soc_consumption(tdist)
if c_soc < 0:
is_feasible = False
infeasible_index = i
# LOG.debug(" -> charging wrong")
if c_pos == pstop_pos:
last_c_time = c_time
last_c_soc = c_soc
earliest_time = pstop.get_earliest_start_time()
if c_time < earliest_time:
c_time = earliest_time
# LOG.debug(f"c_time 3 {c_time}")
# update pax and check max. passenger constraint
c_nr_pax += pstop.get_change_nr_pax()
c_nr_parcels += pstop.get_change_nr_parcels()
#LOG.debug(f"change nr pax {pstop.change_nr_pax}")
for rid in pstop.get_list_boarding_rids():
if i == 0 and self.pax_info.get(rid):
continue
self.pax_info[rid] = [c_time]
c_pax[rid] = 1
for rid in pstop.get_list_alighting_rids():
self.pax_info[rid].append(c_time)
try:
del c_pax[rid]
except KeyError:
LOG.warning(f"update_tt_and_check_plan(): try to remove a rid that is not on board!")
LOG.warning(f"{self}")
is_feasible = False
infeasible_index = i
raise EnvironmentError
latest_time = pstop.get_latest_start_time(self.pax_info)
if c_time > latest_time:
is_feasible = False
infeasible_index = i
# LOG.debug(f" -> arrival after latest {c_time} > {latest_time}")
#LOG.debug(f"-> c nr {c_nr_pax} | cap {veh_obj.max_pax}")
if c_nr_pax > veh_obj.max_pax or c_nr_parcels > veh_obj.max_parcels:
# LOG.debug(" -> capacity wrong")
is_feasible = False
infeasible_index = i
c_time = pstop.get_departure_time(c_time)
pstop.set_planned_arrival_and_departure_time(last_c_time, c_time)
if pstop.get_charging_power() > 0: # TODO # is charging now in waiting included as planned here?
c_soc += veh_obj.compute_soc_charging(pstop.get_charging_power(), c_time - last_c_time)
c_soc = max(c_soc, 1.0)
pstop.set_planned_arrival_and_departure_soc(last_c_soc, c_soc)
if keep_feasible and not is_feasible:
for i, p_stop in enumerate(self.list_plan_stops):
if i > infeasible_index:
break
# LOG.debug("LOCK because infeasible {}".format(i))
p_stop.set_infeasible_locked(True)
# LOG.debug(f"is feasible {is_feasible} | pax info {self.pax_info}")
# LOG.debug("update plan and check tt {}".format(self))
return is_feasible
def build_VRL(self, veh_obj : SimulationVehicle, prq_db : dict, charging_management=None) -> List[VehicleRouteLeg]:
"""This method builds VRL for simulation vehicles from a given Plan. Since the vehicle could already have the
VRL with the correct route, the route from veh_obj.assigned_route[0] will be used if destination positions
are matching
:param veh_obj: vehicle object to which plan is applied
:param prq_db: reference to PlanRequest database
:param charging_management: reference to charging management
:return: VRL according to the given plan
"""
list_vrl = []
c_pos = veh_obj.pos
for pstop in self.list_plan_stops:
boarding_dict = {1: [], -1: []}
if len(pstop.get_list_boarding_rids()) > 0 or len(pstop.get_list_alighting_rids()) > 0:
boarding = True
for rid in pstop.get_list_boarding_rids():
boarding_dict[1].append(prq_db[rid])
for rid in pstop.get_list_alighting_rids():
boarding_dict[-1].append(prq_db[rid])
else:
boarding = False
if pstop.get_charging_power() > 0:
charging = True
else:
charging = False
if pstop.get_departure_time(0) > LARGE_INT:
inactive = True
else:
inactive = False
if pstop.get_departure_time(0) != 0:
planned_stop = True
repo_target = False
else:
planned_stop = False
repo_target = True
if c_pos != pstop.get_pos():
# driving vrl
if boarding:
status = VRL_STATES.ROUTE
elif charging:
status = VRL_STATES.TO_CHARGE
elif inactive:
status = VRL_STATES.TO_DEPOT
else:
# repositioning
status = VRL_STATES.REPOSITION
# use empty boarding dict for this VRL, but do not overwrite boarding_dict!
list_vrl.append(VehicleRouteLeg(status, pstop.get_pos(), {1: [], -1: []}, locked=pstop.is_locked()))
c_pos = pstop.get_pos()
# stop vrl
if boarding and charging:
status = VRL_STATES.BOARDING_WITH_CHARGING
elif boarding:
status = VRL_STATES.BOARDING
elif charging:
status = VRL_STATES.CHARGING
elif inactive:
status = VRL_STATES.OUT_OF_SERVICE
elif planned_stop:
status = VRL_STATES.PLANNED_STOP
elif repo_target:
status = VRL_STATES.REPO_TARGET
else:
# TODO # after ISTTT: add other states if necessary; for now assume vehicle idles
status = VRL_STATES.IDLE
if status == VRL_STATES.BOARDING_WITH_CHARGING:
create_new_vcl = True
charging_unit_id, existing_vcl = pstop.get_charging_unit_and_vcl()
if existing_vcl is not None and charging_unit_id is not None:
# use existing VCL if it already has been scheduled
existing_vcl = charging_management.get_vcl(charging_unit_id, existing_vcl)
if existing_vcl is not None:
create_new_vcl = False
# TODO # check schedule and adapt existing_vcl if necessary?
list_vrl.append(existing_vcl)
if create_new_vcl:
# schedule new VCL if there was no vcl assigned
new_vcl, power, charging_unit_id, vcl_id = charging_management.create_new_vcl(veh_obj, pstop)
pstop.set_charging_attributes(charging_unit_id, vcl_id, power)
list_vrl.append(new_vcl)
elif status != VRL_STATES.IDLE:
dur, edep = pstop.get_duration_and_earliest_departure()
earliest_start_time = pstop.get_earliest_start_time()
#LOG.debug("vrl earliest departure: {} {}".format(dur, edep))
if edep is not None:
LOG.warning("absolute earliest departure not implementen in build VRL!")
departure_time = edep
else:
departure_time = -LARGE_INT
if dur is not None:
stop_duration = dur
else:
stop_duration = 0
list_vrl.append(VehicleRouteLeg(status, pstop.get_pos(), boarding_dict, pstop.get_charging_power(),
duration=stop_duration, earliest_start_time=earliest_start_time,
locked=pstop.is_locked()))
return list_vrl
def get_dedicated_rid_list(self) -> list:
""" returns a list of request-ids whicht are part of this vehicle plan
:return: list of rid
"""
return list(self.pax_info.keys())
def update_prq_hard_constraints(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, prq : PlanRequest, new_lpt : float, new_ept : float=None,
keep_feasible : bool=False):
"""Adapts the earliest_pickup_time_dict and latest_pickup_time_dict of the pick-up PlanStop of a request.
:param veh_obj: simulation vehicle
:param sim_time: current simulation time
:param routing_engine: routing engine
:param prq: PlanRequest
:param new_lpt: new latest pick-up time constraint
:param new_ept: new earliest pick-up time constraint, not set if None
:param keep_feasible: optional argument to add as input in update_tt_and_check_plan
:return: feasibility of plan
:rtype: bool
"""
for ps in self.list_plan_stops:
if prq.get_rid_struct() in ps.get_list_boarding_rids():
ps.update_rid_boarding_time_constraints(new_latest_pickup_time=new_lpt, new_earliest_pickup_time=new_ept)
return self.update_tt_and_check_plan(veh_obj, sim_time, routing_engine, keep_feasible=keep_feasible)
def copy_and_remove_empty_planstops(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase):
""" this function removes all plan stops from the vehicle plan that are empty
i.e. are not locked and no pick-up/drop-offs are performes
:param veh_obj: vehicle object
:param sim_time: simulation time
:param routing_engine: routing engine
:return: vehicle plan without empty planstops
:rtype: vehicleplan
"""
new_plan = self.copy()
tmp = []
rm = False
for ps in new_plan.list_plan_stops:
if not ps.is_empty() or ps.is_locked() or ps.is_locked_end():
tmp.append(ps)
else:
rm = True
if rm:
new_plan.list_plan_stops = tmp
new_plan.update_tt_and_check_plan(veh_obj, sim_time, routing_engine)
return new_plan | StarcoderdataPython |
1768919 | <reponame>frangiz/AdventOfCode2018
"""--- Day 10: The Stars Align ---"""
import helpers
class Point():
def __init__(self, x, y, dx, dy):
self.x = x
self.y = y
self.dx = dx
self.dy = dy
def __repr__(self):
return '({}, {} -> {}, {})'.format(self.x, self.y, self.dx, self.dy)
def tick(self):
self.x = self.x + self.dx
self.y = self.y + self.dy
def parse(puzzle_input):
points = []
for line in puzzle_input:
parts = line[len('position=<'):].strip().split('> velocity=<')
x, y = parts[0].split(',')
dx, dy = parts[1][:-1].split(',')
points.append(Point(int(x), int(y), int(dx), int(dy)))
return points
def part_a(puzzle_input):
"""
Calculate the answer for part_a.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_a.
"""
points = parse(puzzle_input)
for _ in range(100_000):
min_x = min(points, key=lambda p: p.x).x
max_x = max(points, key=lambda p: p.x).x
min_y = min(points, key=lambda p: p.y).y
max_y = max(points, key=lambda p: p.y).y
if abs(max_y - min_y) < 20:
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
if any(p.x == x and p.y == y for p in points):
print('#', end='')
else:
print('.', end='')
print('')
print('-' * 50)
for p in points:
p.tick()
return str(0)
def part_b(puzzle_input):
"""
Calculate the answer for part_b.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_b.
"""
points = parse(puzzle_input)
time = 0
for _ in range(100_000):
min_x = min(points, key=lambda p: p.x).x
max_x = max(points, key=lambda p: p.x).x
min_y = min(points, key=lambda p: p.y).y
max_y = max(points, key=lambda p: p.y).y
if abs(max_y - min_y) < 20:
print(time)
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
if any(p.x == x and p.y == y for p in points):
print('#', end='')
else:
print('.', end='')
print('')
print('-' * 50)
for p in points:
p.tick()
time += 1
return str(0)
def solve(puzzle_input):
"""Returs the answer for both parts."""
return {'a': part_a(puzzle_input), 'b': part_b(puzzle_input)}
| StarcoderdataPython |
8010734 | <filename>DataGenerators/prepare_dataset.py
# this file is used to convert '.mat' files to one '.npz' file
# for human3.6m dataset, it also select the 17 joints from the whole 32
# input: some '.mat' files in './data/NAME/'
# output: one '.npz' file in same path
import numpy as np
import os
from scipy.io import loadmat
from glob import glob
from arguments import parse_args
from data_utils import *
args = parse_args()
print('Loading dataset...')
metadata = suggest_metadata(args.dataset)
num_joints = metadata['num_joints']
keypoints = metadata['keypoints']
dataset_path = 'data/' + args.dataset
file_list = glob(dataset_path + '/*.mat')
output_filename = 'data/data_3d_' + args.dataset
print('Preparing dataset...')
output = {}
for f in file_list:
action = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0]
hf = loadmat(f)
positions = hf['data'].reshape(-1,num_joints,3)[:,keypoints,:]
output[action] = positions.astype('float32')
print('Saving...')
np.savez_compressed(output_filename, positions_3d=output)
print('Done.') | StarcoderdataPython |
5151038 | <reponame>slegroux/NeMo
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import hashlib
import json
import os
import shutil
import tarfile
import tempfile
from typing import Any, Dict, Optional, Union
import torch
from megatron import mpu
from megatron.checkpointing import get_checkpoint_version, set_checkpoint_version
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.utilities import rank_zero_only
from transformers import TRANSFORMERS_CACHE
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.modules import BertModule, MegatronBertEncoder
from nemo.collections.nlp.modules.common.megatron.megatron_utils import compute_model_parallel_rank
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.collections.nlp.parts.nlp_overrides import NLPCheckpointConnector
from nemo.core.classes import ModelPT
from nemo.core.classes.exportable import Exportable
from nemo.utils import AppState, logging
from nemo.utils.exp_manager import configure_checkpointing
from nemo.utils.get_rank import is_global_rank_zero
__all__ = ['NLPModel']
NEMO_NLP_TMP = os.path.join(os.path.dirname(str(TRANSFORMERS_CACHE)), "nemo_nlp_tmp")
os.makedirs(NEMO_NLP_TMP, exist_ok=True)
class NLPModel(ModelPT, Exportable):
"""Base class for NLP Models.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg, trainer)
self.set_world_size(trainer)
@rank_zero_only
def register_bert_model(self):
"""Adds encoder config to .nemo archive.
"""
# check if there is an encoder, warn if not
if self.bert_model is None:
raise ValueError('Instantiate self.bert_model before registering it.')
else:
# get encoder config and create source for artifact
if isinstance(self.bert_model, MegatronBertEncoder):
pretrained_model_name = self.bert_model._model_name
encoder_config_path = pretrained_model_name + '_encoder_config.json'
encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path)
config_for_json = OmegaConf.to_container(self.bert_model.config)
with open(encoder_config_src, 'w', encoding='utf-8') as f:
f.write(json.dumps(config_for_json, indent=2, sort_keys=True) + '\n')
self.register_artifact(encoder_config_path, encoder_config_src)
self.cfg.language_model.config_file = encoder_config_path
elif isinstance(self.bert_model, BertModule):
# HuggingFace Transformer Config
pretrained_model_name = self.bert_model.name_or_path
# Some HF names have "/" in them so we replace with _
pretrained_model_name = pretrained_model_name.replace("/", "_")
encoder_config_path = pretrained_model_name + '_encoder_config.json'
encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path)
self.bert_model.config.to_json_file(encoder_config_src) # name requested by jarvis team
self.register_artifact(encoder_config_path, encoder_config_src)
self.cfg.language_model.config_file = encoder_config_path
else:
logging.info(
f'Registering BERT model config for {self.bert_model} is not yet supported. Please override this method if needed.'
)
def setup_tokenizer(self, cfg: DictConfig):
"""Instantiates tokenizer based on config and registers tokenizer artifacts.
If model is being restored from .nemo file then the tokenizer.vocab_file will
be used (if it exists).
Otherwise, we will use the vocab file provided in the config (if it exists).
Finally, if no vocab file is given (this happens frequently when using HF),
we will attempt to extract the vocab from the tokenizer object and then register it.
Args:
cfg (DictConfig): Tokenizer config
"""
vocab_file = None
if self._is_model_being_restored():
if os.path.exists('tokenizer.vocab_file'):
# model is being restored from .nemo file so tokenizer.vocab_file has precedence
vocab_file = self.register_artifact(config_path='tokenizer.vocab_file', src='tokenizer.vocab_file')
cfg.vocab_file = vocab_file
# tokenizer.vocab_file is added to the config file and registered as artifact for .nemo file
# during training but this file is missing for load_from_checkpoint() method call
# it's safer to use restore_from .nemo file
elif cfg.vocab_file and not os.path.exists(cfg.vocab_file):
logging.warning(
f'tokenizer.vocab_file not found at {cfg.vocab_file}. It is recommended to use restore_from() method with .nemo file.'
)
else:
vocab_file = self.register_artifact(config_path='tokenizer.vocab_file', src=cfg.vocab_file)
elif cfg.vocab_file:
# use vocab file from config
vocab_file = self.register_artifact(config_path='tokenizer.vocab_file', src=cfg.vocab_file)
tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
vocab_file=vocab_file,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
tokenizer_model=self.register_artifact(config_path='tokenizer.tokenizer_model', src=cfg.tokenizer_model),
)
self.tokenizer = tokenizer
if vocab_file is None:
# when there is no vocab file we try to get the vocab from the tokenizer and register it
self._register_vocab_from_tokenizer(vocab_file_config_path='tokenizer.vocab_file', cfg=cfg)
@rank_zero_only
def _register_vocab_from_tokenizer(
self,
vocab_file_config_path: str = 'tokenizer.vocab_file',
vocab_dict_config_path: str = 'tokenizer_vocab_dict.json',
cfg: DictConfig = None,
):
"""Creates vocab file from tokenizer if vocab file is None.
Args:
vocab_file_config_path: path to the vocab_file in the config
vocab_dict_config_path: path to the vocab_dict in the config
cfg: tokenizer config
"""
if self.tokenizer is None:
raise ValueError('Instantiate self.tokenizer before registering vocab from it.')
else:
if isinstance(self.tokenizer, AutoTokenizer):
# extract vocab from tokenizer
vocab_dict = self.tokenizer.tokenizer.get_vocab()
# for fast and slow tokenizer vocabularies compatibility
vocab_dict = dict(sorted(vocab_dict.items(), key=lambda item: item[1]))
# get hash of vocab_dict to create a unique directory to write vocab_dict and vocab_file
m = hashlib.md5()
if 'tokenizer_name' in cfg:
if cfg.tokenizer_name is not None:
# different pretrained models with the same vocab will have different hash
m.update(cfg.tokenizer_name.encode())
# get string representation of vocab_dict
vocab_dict_str = json.dumps(vocab_dict, sort_keys=True).encode()
m.update(vocab_dict_str)
vocab_dict_hash = m.hexdigest()
hash_path = os.path.join(NEMO_NLP_TMP, vocab_dict_hash)
os.makedirs(hash_path, exist_ok=True)
vocab_json_src = os.path.join(hash_path, vocab_dict_config_path)
with open(vocab_json_src, 'w', encoding='utf-8') as f:
f.write(json.dumps(vocab_dict, indent=2, sort_keys=True) + '\n')
self.register_artifact(config_path=vocab_dict_config_path, src=vocab_json_src)
# create vocab file
vocab_file_src = os.path.join(hash_path, vocab_file_config_path)
with open(vocab_file_src, 'w', encoding='utf-8') as f:
for key in vocab_dict:
f.write(key + '\n')
cfg.vocab_file = vocab_file_src
self.register_artifact(config_path=vocab_file_config_path, src=vocab_file_src)
else:
logging.info(
f'Registering tokenizer vocab for {self.tokenizer} is not yet supported. Please override this method if needed.'
)
def _clip_gradients(self, optimizer, clip_val=None):
""" Override of PTL Gradient Clipping.
Enables model parallel gradient clipping from Megatron-LM.
Args:
optimizer ([type]): [description]
clip_val ([type], optional): [description]. Defaults to None.
"""
app_state = AppState()
# get clip_val from trainer if None is provided
if clip_val is None:
clip_val = float(self._trainer.gradient_clip_val)
if app_state.model_parallel_size is not None:
model = self._trainer.get_model()
parameters = model.parameters()
if mpu.model_parallel_is_initialized():
mpu.grads.clip_grad_norm(parameters=parameters, max_norm=clip_val)
else:
raise ValueError('Model parallel groups must be intialized to use model parallel gradient clipping.')
else:
return Accelerator._clip_gradients(self, optimizer, clip_val)
def setup(self, stage: str) -> None:
""" PTL hook that is called on all DDP processes. """
if stage == 'fit':
# adds self.bert_model config to .nemo file
if hasattr(self, 'bert_model') and self.bert_model is not None:
self.register_bert_model()
app_state = AppState()
if app_state.model_parallel_size is not None:
self._trainer.checkpoint_connector = NLPCheckpointConnector(self._trainer)
# Configure checkpointing for model parallel
if app_state.create_checkpoint_callback:
# global rank 0 is configured by exp_manager
if not is_global_rank_zero() and app_state.data_parallel_rank == 0:
configure_checkpointing(
self._trainer,
app_state.log_dir,
app_state.checkpoint_name,
app_state.checkpoint_callback_params,
)
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
""" LightningModule hook that's used to save things in addition to model weights. """
if hasattr(self, "bert_model") and isinstance(self.bert_model, MegatronBertEncoder):
checkpoint['checkpoint_version'] = get_checkpoint_version()
return None
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
""" LightningModule hook that's used to restore things saved with on_save_checkpoint."""
if hasattr(self, "bert_model") and isinstance(self.bert_model, MegatronBertEncoder):
if get_checkpoint_version():
assert (
checkpoint['checkpoint_version'] == get_checkpoint_version()
), 'checkpoint version found on_load_checkpoint different than get_checkpoint_version'
else:
set_checkpoint_version(checkpoint['checkpoint_version'])
logging.info(f"Setting Megatron checkpoint version: {checkpoint['checkpoint_version']}")
return None
# no rank check as model parallel models need to be saved on data parallel rank 0
def save_to(self, save_path: str):
"""
Saves model instance (weights and configuration) into .nemo file
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_weights.ckpt - model checkpoint
Args:
save_path: Path to .nemo file where model instance should be saved
"""
app_state = AppState()
if app_state.model_parallel_size is not None:
self._default_save_to(save_path)
else:
# super.save_to only runs on global rank 0
return super().save_to(save_path)
def _default_save_to(self, save_path: str):
app_state = AppState()
if app_state.model_parallel_size is not None:
# each model parallel rank creates a .nemo file
# after all .nemo files are created, each rank
# will add their checkpoint to global rank 0
base_dir = os.path.dirname(save_path) # use the directory to merge mp_rank .nemo files into one
# update save_path based on model parallel_rank
base_path = os.path.splitext(save_path)[0] # everything except the extension
mp_save_path = f'{base_path}_mp_rank_{app_state.model_parallel_rank:02d}.nemo'
if app_state.data_parallel_rank == 0:
super()._default_save_to(mp_save_path)
# barrier so that all processes have finished writing their weights before creating .nemo file
torch.distributed.barrier()
if is_global_rank_zero():
# extract all tar files
for mp_rank in range(app_state.model_parallel_size):
mp_tar_path = f'{base_path}_mp_rank_{mp_rank:02d}.nemo'
mp_tar = tarfile.open(mp_tar_path, 'r:gz')
mp_tar.extractall(path=os.path.join(base_dir, f'mp_rank_{mp_rank:02d}'))
mp_tar.close()
os.remove(mp_tar_path)
# move rank 0 .nemo extract to base_path
shutil.move(os.path.join(base_dir, 'mp_rank_00'), base_path)
# move mp_rank_00 checkpoint to mp_rank_00 directory inside base_path
os.mkdir(os.path.join(base_path, 'mp_rank_00'))
shutil.move(os.path.join(base_path, 'model_weights.ckpt'), os.path.join(base_path, 'mp_rank_00'))
# move other mp_rank checkpoints from base_dir to base_path
for mp_rank in range(1, app_state.model_parallel_size):
os.mkdir(os.path.join(base_path, f'mp_rank_{mp_rank:02d}'))
shutil.move(
os.path.join(base_dir, f'mp_rank_{mp_rank:02d}', 'model_weights.ckpt'),
os.path.join(base_path, f'mp_rank_{mp_rank:02d}'),
)
# clean up leftover directory
shutil.rmtree(os.path.join(base_dir, f'mp_rank_{mp_rank:02d}'))
# create tar file from base_path
self._make_nemo_file_from_folder(save_path, base_path)
# clean up base_path
shutil.rmtree(base_path)
elif is_global_rank_zero():
return super()._default_save_to(save_path)
else:
return
@classmethod
def restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
trainer: Trainer = None,
):
"""
Restores model instance (weights and configuration) from .nemo file.
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict.
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
trainer: PyTorch Lightning trainer. Must be passed in order to use model parallel .nemo
Example:
```
model = nemo.collections.nlp.models.TokenClassificationModel.restore_from('token_classification.nemo')
assert isinstance(model, nemo.collections.nlp.models.TokenClassificationModel)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
if not os.path.exists(restore_path):
raise FileNotFoundError(f"Can't find {restore_path}")
app_state = AppState()
app_state.model_restore_path = os.path.abspath(os.path.expanduser(restore_path))
# detect if we have a model parallel .nemo file
with tempfile.TemporaryDirectory() as tmpdir:
cwd = os.getcwd()
os.chdir(tmpdir)
# detect if model parallel from tarfile
tar = tarfile.open(restore_path, "r:gz")
names = tar.getnames()
mp_ranks = []
for name in names:
if 'mp_rank' in name:
mp_ranks.append(name)
if mp_ranks:
app_state.model_parallel_size = len(mp_ranks) // 2 # directory and file are included in getnames()
# get checkpoint version
tar.extract('./megatron_checkpoint_version.json', tmpdir)
with open('megatron_checkpoint_version.json', 'r') as f:
checkpoint_version = json.load(f).get('checkpoint_version', None)
logging.info(
(
f'Detected model parallel .nemo file: {restore_path}. '
f'Assuming megatron model parallelism with '
f'model_parallel_size: {app_state.model_parallel_size} '
f'and checkpoint version: {checkpoint_version}'
)
)
tar.close()
os.chdir(cwd)
if app_state.model_parallel_size is not None:
if not isinstance(trainer, Trainer):
raise ValueError("trainer must be a PyTorch Lightning Trainer to restore model parallel .nemo files.")
if checkpoint_version is None:
raise ValueError(
"Restoring from megatron model parallel .nemo but could not find megatron checkpoint version."
)
else:
logging.info(f"Setting megatron checkpoint version: {checkpoint_version}")
set_checkpoint_version(checkpoint_version)
app_state.world_size = trainer.num_gpus * trainer.num_nodes
if trainer.local_rank is not None:
app_state.local_rank = trainer.local_rank
else:
raise ValueError("trainer.local_rank is None. local_rank needed to restore model parallel models.")
model_parallel_rank = compute_model_parallel_rank(trainer.local_rank, app_state.model_parallel_size)
app_state.model_parallel_rank = model_parallel_rank
restored_model = cls._default_restore_from(
restore_path, override_config_path, map_location, strict, return_config
)
restored_model._trainer = trainer
return restored_model
else:
return super().restore_from(restore_path, override_config_path, map_location, strict, return_config)
@rank_zero_only
def register_megatron_checkpoint_version(self):
""" Adds checkpoint version to .nemo archive """
if self.bert_model is None:
raise ValueError('Instantiate self.bert_model before registering megatron checkpoint version.')
else:
# get encoder config and create source for artifact
if isinstance(self.bert_model, MegatronBertEncoder):
checkpoint_version = get_checkpoint_version()
if checkpoint_version is None:
raise ValueError('Unable to get megatron checkpoint version.')
else:
checkpoint_version_dict = {'checkpoint_version': checkpoint_version}
checkpoint_version_path = 'megatron_checkpoint_version.json'
checkpoint_version_src = os.path.join(NEMO_NLP_TMP, checkpoint_version_path)
with open(checkpoint_version_src, 'w') as f:
f.write(json.dumps(checkpoint_version_dict))
self.register_artifact(checkpoint_version_path, checkpoint_version_src)
@staticmethod
def _unpack_nemo_file(path2file: str, out_folder: str) -> str:
return super(NLPModel, NLPModel)._unpack_nemo_file(path2file, out_folder)
@staticmethod
def _make_nemo_file_from_folder(filename, source_dir):
return super(NLPModel, NLPModel)._make_nemo_file_from_folder(filename, source_dir)
@property
def input_module(self):
return self.bert_model
@property
def output_module(self):
return self.classifier
| StarcoderdataPython |
6570606 | from io import BytesIO
from sys import version_info
from unittest import TestCase
from xml.etree import ElementTree
import datetime
import pytest
from pyclarity_lims.constants import nsmap
from pyclarity_lims.descriptors import StringDescriptor, StringAttributeDescriptor, StringListDescriptor, \
StringDictionaryDescriptor, IntegerDescriptor, BooleanDescriptor, UdfDictionary, EntityDescriptor, \
InputOutputMapList, EntityListDescriptor, PlacementDictionary, EntityList, SubTagDictionary, ExternalidList,\
XmlElementAttributeDict, XmlAttributeList, XmlReagentLabelList, XmlPooledInputDict, XmlAction, QueuedArtifactList
from pyclarity_lims.entities import Artifact, ProtocolStep, Container, Process, Step
from pyclarity_lims.lims import Lims
from tests import elements_equal
if version_info[0] == 2:
from mock import Mock
else:
from unittest.mock import Mock
def _tostring(e):
outfile = BytesIO()
ElementTree.ElementTree(e).write(outfile, encoding='utf-8', xml_declaration=True)
return outfile.getvalue().decode("utf-8")
class TestDescriptor(TestCase):
def _make_desc(self, klass, *args, **kwargs):
return klass(*args, **kwargs)
class TestStringDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="utf-8"?>
<test-entry>
<name>test name</name>
</test-entry>
""")
self.instance = Mock(root=self.et)
def test__get__(self):
sd = self._make_desc(StringDescriptor, 'name')
assert sd.__get__(self.instance, None) == "test name"
def test__set__(self):
sd = self._make_desc(StringDescriptor, 'name')
sd.__set__(self.instance, "new test name")
assert self.et.find('name').text == "new test name"
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
sd = self._make_desc(StringDescriptor, 'name')
sd.__set__(instance_new, "test name")
assert instance_new.root.find('name').text == 'test name'
class TestIntegerDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<count>32</count>
</test-entry>
""")
self.instance = Mock(root=self.et)
def test__get__(self):
sd = self._make_desc(IntegerDescriptor, 'count')
assert sd.__get__(self.instance, None) == 32
def test__set__(self):
sd = self._make_desc(IntegerDescriptor, 'count')
sd.__set__(self.instance, 23)
assert self.et.find('count').text == '23'
sd.__set__(self.instance, '23')
assert self.et.find('count').text == '23'
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
sd = self._make_desc(IntegerDescriptor, 'count')
sd.__set__(instance_new, 23)
assert instance_new.root.find('count').text == '23'
class TestBooleanDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<istest>true</istest>
</test-entry>
""")
self.instance = Mock(root=self.et)
def test__get__(self):
bd = self._make_desc(BooleanDescriptor, 'istest')
assert bd.__get__(self.instance, None)
def test__set__(self):
bd = self._make_desc(BooleanDescriptor, 'istest')
bd.__set__(self.instance, False)
assert self.et.find('istest').text == 'false'
bd.__set__(self.instance, 'true')
assert self.et.find('istest').text == 'true'
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
bd = self._make_desc(BooleanDescriptor, 'istest')
bd.__set__(instance_new, True)
assert instance_new.root.find('istest').text == 'true'
class TestEntityDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.a1 = Artifact(self.lims, id='a1')
self.a2 = Artifact(self.lims, id='a2')
self.instance = Mock(root=self.et, lims=self.lims)
def test__get__(self):
ed = self._make_desc(EntityDescriptor, 'artifact', Artifact)
assert ed.__get__(self.instance, None) == self.a1
def test__set__(self):
ed = self._make_desc(EntityDescriptor, 'artifact', Artifact)
ed.__set__(self.instance, self.a2)
assert self.et.find('artifact').attrib['uri'] == 'http://testgenologics.com:4040/api/v2/artifacts/a2'
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
ed = self._make_desc(EntityDescriptor, 'artifact', Artifact)
ed.__set__(instance_new, self.a1)
assert instance_new.root.find('artifact').attrib['uri'] == 'http://testgenologics.com:4040/api/v2/artifacts/a1'
class TestEntityListDescriptor(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a2"></artifact>
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.a1 = Artifact(self.lims, id='a1')
self.a2 = Artifact(self.lims, id='a2')
self.instance1 = Mock(root=et, lims=self.lims)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<nesting>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a2"></artifact>
</nesting>
</test-entry>
""")
self.instance2 = Mock(root=et, lims=self.lims)
def test__get__(self):
ed = self._make_desc(EntityListDescriptor, 'artifact', Artifact)
assert ed.__get__(self.instance1, None) == [self.a1, self.a2]
ed = self._make_desc(EntityListDescriptor, 'artifact', Artifact, nesting=['nesting'])
assert ed.__get__(self.instance2, None) == [self.a1, self.a2]
class TestStringAttributeDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry name="test name">
</test-entry>""")
self.instance = Mock(root=self.et)
def test__get__(self):
sd = self._make_desc(StringAttributeDescriptor, 'name')
assert sd.__get__(self.instance, None) == "test name"
def test__set__(self):
sd = self._make_desc(StringAttributeDescriptor, 'name')
sd.__set__(self.instance, "test name2")
assert self.et.attrib['name'] == "test name2"
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
bd = self._make_desc(StringAttributeDescriptor, 'name')
bd.__set__(instance_new, "test name2")
assert instance_new.root.attrib['name'] == "test name2"
class TestStringListDescriptor(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<test-subentry>A01</test-subentry>
<test-subentry>B01</test-subentry>
</test-entry>""")
self.instance1 = Mock(root=et)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<nesting>
<test-subentry>A01</test-subentry>
<test-subentry>B01</test-subentry>
</nesting>
</test-entry>""")
self.instance2 = Mock(root=et)
def test__get__(self):
sd = self._make_desc(StringListDescriptor, 'test-subentry')
assert sd.__get__(self.instance1, None) == ['A01', 'B01']
sd = self._make_desc(StringListDescriptor, 'test-subentry', nesting=['nesting'])
assert sd.__get__(self.instance2, None) == ['A01', 'B01']
def test__set__(self):
sd = self._make_desc(StringListDescriptor, 'test-subentry')
sd.__set__(self.instance1, ['A02', 'B02'])
res = sd.__get__(self.instance1, None)
assert isinstance(res, list)
assert res == ['A02', 'B02']
class TestStringDictionaryDescriptor(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<test-subentry>
<test-firstkey/>
<test-secondkey>second value</test-secondkey>
</test-subentry>
</test-entry>""")
self.instance = Mock(root=et)
def test__get__(self):
sd = self._make_desc(StringDictionaryDescriptor, 'test-subentry')
res = sd.__get__(self.instance, None)
assert isinstance(res, dict)
assert res['test-firstkey'] is None
assert res['test-secondkey'] == 'second value'
def test__set__(self):
sd = self._make_desc(StringDictionaryDescriptor, 'test-subentry')
sd.__set__(self.instance, {'mykey1': 'myvalue1'})
res = sd.__get__(self.instance, None)
assert isinstance(res, dict)
assert res['mykey1'] == 'myvalue1'
class TestUdfDictionary(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<udf:field type="String" name="test">stuff</udf:field>
<udf:field type="Numeric" name="how much">42</udf:field>
<udf:field type="Boolean" name="really?">true</udf:field>
</test-entry>""")
self.instance1 = Mock(root=et)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<nesting>
<udf:field type="String" name="test">stuff</udf:field>
<udf:field type="Numeric" name="how much">42</udf:field>
<udf:field type="Boolean" name="really?">true</udf:field>
</nesting>
</test-entry>""")
self.instance2 = Mock(root=et)
self.dict1 = UdfDictionary(self.instance1)
self.dict2 = UdfDictionary(self.instance2, nesting=['nesting'])
self.dict_fail = UdfDictionary(self.instance2)
self.empty_et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
def _get_udf_value(self, udf_dict, key):
for e in udf_dict._elems:
if e.attrib['name'] != key:
continue
else:
return e.text
def test___getitem__(self):
assert self.dict1.__getitem__('test') == self._get_udf_value(self.dict1, 'test')
assert self.dict2.__getitem__('test') == self._get_udf_value(self.dict2, 'test')
self.assertRaises(KeyError, self.dict_fail.__getitem__, 'test')
def test___setitem__(self):
assert self._get_udf_value(self.dict1, 'test') == 'stuff'
self.dict1.__setitem__('test', 'other')
assert self._get_udf_value(self.dict1, 'test') == 'other'
assert self._get_udf_value(self.dict1, 'how much') == '42'
self.dict1.__setitem__('how much', 21)
assert self._get_udf_value(self.dict1, 'how much') == '21'
assert self._get_udf_value(self.dict1, 'really?') == 'true'
self.dict1.__setitem__('really?', False)
assert self._get_udf_value(self.dict1, 'really?') == 'false'
self.assertRaises(TypeError, self.dict1.__setitem__, 'how much', '433')
# FIXME: I'm not sure if this is the expected behaviour
self.dict1.__setitem__('how much', None)
assert self._get_udf_value(self.dict1, 'how much') == b'None'
assert self._get_udf_value(self.dict2, 'test') == 'stuff'
self.dict2.__setitem__('test', 'other')
assert self._get_udf_value(self.dict2, 'test') == 'other'
def test___setitem__new(self):
self.dict1.__setitem__('new string', 'new stuff')
assert self._get_udf_value(self.dict1, 'new string') == 'new stuff'
self.dict1.__setitem__('new numeric', 21)
assert self._get_udf_value(self.dict1, 'new numeric') == '21'
self.dict1.__setitem__('new bool', False)
assert self._get_udf_value(self.dict1, 'new bool') == 'false'
self.dict2.__setitem__('new string', 'new stuff')
assert self._get_udf_value(self.dict2, 'new string') == 'new stuff'
def test___setitem__unicode(self):
assert self._get_udf_value(self.dict1, 'test') == 'stuff'
self.dict1.__setitem__('test', u'unicode')
assert self._get_udf_value(self.dict1, 'test') == 'unicode'
self.dict1.__setitem__(u'test', 'unicode2')
assert self._get_udf_value(self.dict1, 'test') == 'unicode2'
def test_create(self):
instance = Mock(root=self.empty_et)
dict1 = UdfDictionary(instance)
dict1['test'] = 'value1'
assert self._get_udf_value(dict1, 'test') == 'value1'
def test_create_with_nesting(self):
instance = Mock(root=self.empty_et)
dict1 = UdfDictionary(instance, nesting=['cocoon'])
dict1['test'] = 'value1'
assert self._get_udf_value(dict1, 'test') == 'value1'
def test___delitem__(self):
assert self.dict1['test'] == self._get_udf_value(self.dict1, 'test')
del self.dict1['test']
with pytest.raises(KeyError):
_ = self.dict1['test']
assert self._get_udf_value(self.dict1, 'test') is None
def test_items(self):
pass
def test_clear(self):
assert self.dict1
self.dict1.clear()
assert not self.dict1
assert len(self.dict1) == 0
def test___iter__(self):
expected_content = [
("test", "stuff"),
("really?", True),
("how much", 42)
]
for k in self.dict1:
assert (k, self.dict1[k]) in expected_content
class TestPlacementDictionary(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<placement uri="http://testgenologics.com:4040/api/v2/artifacts/a1" limsid="a1">
<value>A:1</value>
</placement>
<other>thing</other>
</test-entry>""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = PlacementDictionary(self.instance1)
self.art1 = Artifact(lims=self.lims, id='a1')
def test___getitem__(self):
assert self.dict1['A:1'] == self.art1
def test___setitem__(self):
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
art2 = Artifact(lims=self.lims, id='a2')
self.dict1['A:1'] = art2
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
self.dict1['A:2'] = art2
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 2
assert self.dict1['A:2'] == art2
def test___setitem__2(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
instance = Mock(root=et, lims=self.lims)
d = PlacementDictionary(instance)
assert len(d.rootnode(d.instance).findall('placement')) == 0
d['A:1'] = self.art1
assert len(d.rootnode(d.instance).findall('placement')) == 1
def test___delitem__(self):
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
del self.dict1['A:1']
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 0
def test_clear(self):
sd = self._make_desc(StringDescriptor, 'other')
assert sd.__get__(self.instance1, None) == "thing"
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
self.dict1.clear()
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 0
assert sd.__get__(self.instance1, None) == "thing"
class TestSubTagDictionary(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<test-tag>
<key1>value1</key1>
</test-tag>
</test-entry>""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = SubTagDictionary(self.instance1, tag='test-tag')
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
self.instance2 = Mock(root=et, lims=self.lims)
self.dict2 = SubTagDictionary(self.instance2, tag='test-tag')
def test___getitem__(self):
assert self.dict1['key1'] == 'value1'
def test___setitem__(self):
assert len(self.dict1.rootnode(self.dict1.instance)) == 1
assert self.dict1.rootnode(self.dict1.instance).find('key1').text == 'value1'
self.dict1['key1'] = 'value11'
assert len(self.dict1.rootnode(self.dict1.instance)) == 1
assert self.dict1.rootnode(self.dict1.instance).find('key1').text == 'value11'
self.dict1['key2'] = 'value2'
assert len(self.dict1.rootnode(self.dict1.instance)) == 2
assert self.dict1.rootnode(self.dict1.instance).find('key2').text == 'value2'
assert self.dict1['key2'] == 'value2'
def test___setitem__from_empty(self):
assert len(self.dict2.rootnode(self.dict2.instance)) == 0
self.dict2['key1'] = 'value1'
assert self.dict2.rootnode(self.dict2.instance).find('key1').text == 'value1'
assert len(self.dict2.rootnode(self.dict2.instance)) == 1
class TestXmlElementAttributeDict(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<test-tag attrib1="value1" attrib2="value2"/>
<test-tag attrib1="value11" attrib2="value12" attrib3="value13"/>
</test-entry>""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = XmlElementAttributeDict(self.instance1, tag='test-tag', position=0)
self.dict2 = XmlElementAttributeDict(self.instance1, tag='test-tag', position=1)
def test___getitem__(self):
assert self.dict1['attrib1'] == 'value1'
assert self.dict2['attrib1'] == 'value11'
def test__len__(self):
assert len(self.dict1) == 2
assert len(self.dict2) == 3
def test___setitem__(self):
assert self.dict1['attrib1'] == 'value1'
assert self.dict1.rootnode(self.dict1.instance).findall('test-tag')[0].attrib['attrib1'] == 'value1'
self.dict1['attrib1'] = 'value2'
assert self.dict1.rootnode(self.dict1.instance).findall('test-tag')[0].attrib['attrib1'] == 'value2'
class TestXmlPooledInputDict(TestCase):
def setUp(self):
et = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<pooled-inputs>
<pool output-uri="{uri}/out1" name="pool1">
<input uri="{uri}/in1"/>
<input uri="{uri}/in2"/>
</pool>
<pool output-uri="{uri}/out2" name="pool2">
<input uri="{uri}/in3"/>
<input uri="{uri}/in4"/>
</pool>
</pooled-inputs>
</test-entry>'''.format(uri='http://testgenologics.com:4040'))
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = XmlPooledInputDict(self.instance1)
self.out1 = Artifact(self.lims, uri='http://testgenologics.com:4040/out1')
self.in1 = Artifact(self.lims, uri='http://testgenologics.com:4040/in1')
self.in2 = Artifact(self.lims, uri='http://testgenologics.com:4040/in2')
def test___getitem__(self):
assert self.dict1['pool1'] == (self.out1, (self.in1, self.in2))
def test___setitem1__(self):
assert len(self.dict1) == 2
assert len(self.dict1.rootnode(self.dict1.instance)) == 2
# This works in the test but does not work in reality because
# the pool artifact needs to be creaated by the LIMS.
self.dict1['pool3'] = (self.out1, (self.in1, self.in2))
assert len(self.dict1) == 3
assert len(self.dict1.rootnode(self.dict1.instance)) == 3
def test___setitem2__(self):
assert len(self.dict1) == 2
assert len(self.dict1.rootnode(self.dict1.instance)) == 2
# This is the correct way of creating a pool from scratch
self.dict1['pool3'] = (None, (self.in1, self.in2))
assert len(self.dict1) == 3
assert len(self.dict1.rootnode(self.dict1.instance)) == 3
class TestEntityList(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a2"></artifact>
<other>thing</other>
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.a1 = Artifact(self.lims, id='a1')
self.a2 = Artifact(self.lims, id='a2')
self.instance1 = Mock(root=et, lims=self.lims)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<nesting>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a2"></artifact>
</nesting>
</test-entry>
""")
self.instance2 = Mock(root=et, lims=self.lims)
def test__get__(self):
el = EntityList(self.instance1, 'artifact', Artifact)
assert el[0] == self.a1
assert el[1] == self.a2
el = EntityList(self.instance2, 'artifact', Artifact, nesting=['nesting'])
assert el[0] == self.a1
assert el[1] == self.a2
def test_append(self):
el = EntityList(self.instance1, 'artifact', Artifact)
assert len(el) == 2
assert len(el.instance.root.findall('artifact')) == 2
a3 = Artifact(self.lims, id='a3')
el.append(a3)
assert len(el) == 3
assert el[2] == a3
assert len(el._elems) == 3
assert len(el.instance.root.findall('artifact')) == 3
def test_insert(self):
el = EntityList(self.instance1, 'artifact', Artifact)
assert len(el) == 2
assert len(el.instance.root.findall('artifact')) == 2
a3 = Artifact(self.lims, id='a3')
el.insert(1, a3)
assert len(el) == 3
assert el[1] == a3
assert el[2] == self.a2
assert len(el._elems) == 3
assert len(el.instance.root.findall('artifact')) == 3
def test_set(self):
el = EntityList(self.instance1, 'artifact', Artifact)
assert len(el) == 2
assert len(el.instance.root.findall('artifact')) == 2
a3 = Artifact(self.lims, id='a3')
el[1] = a3
assert len(el) == 2
assert el[1] == a3
assert len(el._elems) == 2
assert el.instance.root.findall('artifact')[1].attrib['uri'] == 'http://testgenologics.com:4040/api/v2/artifacts/a3'
def test_set_list(self):
el = EntityList(self.instance1, 'artifact', Artifact)
assert len(el) == 2
assert len(el.instance.root.findall('artifact')) == 2
a3 = Artifact(self.lims, id='a3')
a4 = Artifact(self.lims, id='a4')
el[0:2] = [a3, a4]
assert len(el) == 2
assert el[0] == a3
assert el[1] == a4
def test_clear(self):
el = EntityList(self.instance1, 'artifact', Artifact)
sd = self._make_desc(StringDescriptor, 'other')
assert sd.__get__(self.instance1, None) == "thing"
assert len(el) == 2
el.clear()
assert len(el) == 0
assert sd.__get__(self.instance1, None) == "thing"
def test___add__(self):
el1 = EntityList(self.instance1, 'artifact', Artifact)
assert len(el1) == 2
assert len(el1.instance.root.findall('artifact')) == 2
el2 = [Artifact(self.lims, id='a3'), Artifact(self.lims, id='a4')]
el3 = el1 + el2
assert len(el3) == 4
assert el3[:2] == el1
assert el3[2:] == el2
def test__iadd__(self):
el1 = EntityList(self.instance1, 'artifact', Artifact)
id1 = id(el1)
assert len(el1) == 2
assert len(el1.instance.root.findall('artifact')) == 2
el2 = [Artifact(self.lims, id='a3'), Artifact(self.lims, id='a4')]
el1 += el2
id2 = id(el1)
assert id1 == id2 # still the same object
assert len(el1) == 4
assert el1[2:] == el2
class TestInputOutputMapList(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<input-output-map>
<input uri="http://testgenologics.com:4040/api/v2/artifacts/1" limsid="1">
<parent-process uri="http://testgenologics.com:4040//api/v2/processes/1" limsid="1"/>
</input>
<output uri="http://testgenologics.com:4040/api/v2/artifacts/2" output-generation-type="PerAllInputs" output-type="ResultFile" limsid="2"/>
</input-output-map>
</test-entry>""")
self.instance1 = Mock(root=et, lims=Mock(cache={}))
self.IO_map = InputOutputMapList()
def test___get__(self):
expected_keys_input = ['limsid', 'parent-process', 'uri']
expected_keys_ouput = ['limsid', 'output-type', 'output-generation-type', 'uri']
res = self.IO_map.__get__(self.instance1, None)
assert sorted(res[0][0].keys()) == sorted(expected_keys_input)
assert sorted(res[0][1].keys()) == sorted(expected_keys_ouput)
def test_create(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
lims = Mock(cache={})
instance = Mock(root=et, lims=lims)
res = self.IO_map.__get__(instance, None)
input_dict = {'uri': Artifact(lims, uri='input_uri'), 'limsid': 'a1', 'parent-process': Process(lims, uri='p_uri')}
output_dict = {'uri': Artifact(lims, uri='output_uri'), 'limsid': 'a2', 'output-type': 'PerInput'}
res.append((input_dict, output_dict))
assert len(et) == 1
node = et.find('input-output-map')
assert len(node) == 2 # input and output
assert node.find('input').attrib['uri'] == 'input_uri'
assert node.find('input').attrib['limsid'] == 'a1'
assert node.find('input').find('parent-process').attrib['uri'] == 'p_uri'
assert node.find('output').attrib['uri'] == 'output_uri'
assert node.find('output').attrib['limsid'] == 'a2'
assert node.find('output').attrib['output-type'] == 'PerInput'
class TestExternalidList(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<ri:externalid xmlns:ri="http://genologics.com/ri" id="1" uri="http://testgenologics.com:4040/api/v2/external/1" />
<ri:externalid xmlns:ri="http://genologics.com/ri" id="2" uri="http://testgenologics.com:4040/api/v2/external/2" />
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
def test_get(self):
el = ExternalidList(self.instance1)
assert len(el) == 2
assert el[0] == ("1", "http://testgenologics.com:4040/api/v2/external/1")
assert el[1] == ("2", "http://testgenologics.com:4040/api/v2/external/2")
def test_append(self):
el = ExternalidList(self.instance1)
assert len(el) == 2
el.append(("3", "http://testgenologics.com:4040/api/v2/external/3"))
assert len(el) == 3
assert el[2] == ("3", "http://testgenologics.com:4040/api/v2/external/3")
assert len(el._elems) == 3
elem = el.instance.root.findall(nsmap('ri:externalid'))
assert elem[2].attrib['id'] == '3'
assert elem[2].attrib['uri'] == 'http://testgenologics.com:4040/api/v2/external/3'
class TestXmlAttributeList(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<test-tags>
<test-tag attrib1="value1" attrib2="value2"/>
<test-tag attrib1="value11" attrib2="value12" attrib3="value13"/>
</test-tags>
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
def test_get(self):
al = XmlAttributeList(self.instance1, tag='test-tag', nesting=['test-tags'])
assert al[0] == {'attrib1': 'value1', 'attrib2': 'value2'}
assert al[1] == {'attrib1': 'value11', 'attrib2': 'value12', 'attrib3': 'value13'}
def test_append(self):
el = XmlAttributeList(self.instance1, tag='test-tag', nesting=['test-tags'])
el.append({'attrib1': 'value21'})
elements_equal(
el.instance.root.find('test-tags').findall('test-tag')[-1],
ElementTree.fromstring('''<test-tag attrib1="value21" />''')
)
def test_insert(self):
el = XmlAttributeList(self.instance1, tag='test-tag', nesting=['test-tags'])
el.insert(1, {'attrib1': 'value21'})
elements_equal(
el.instance.root.find('test-tags').findall('test-tag')[1],
ElementTree.fromstring('''<test-tag attrib1="value21" />''')
)
elements_equal(
el.instance.root.find('test-tags').findall('test-tag')[2],
ElementTree.fromstring('''<test-tag attrib1="value11" attrib2="value12" attrib3="value13" />''')
)
class TestXmlReagentLabelList(TestCase):
def setUp(self):
et = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<reagent-label name="label name"/>
</test-entry>''')
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
def test_get(self):
ll = XmlReagentLabelList(self.instance1)
assert ll == ['label name']
def test_append(self):
rl = XmlReagentLabelList(self.instance1)
rl.append('another label')
assert rl == ['label name', 'another label']
elements_equal(
rl.instance.root.findall('reagent-label')[1],
ElementTree.fromstring('''<reagent-label name="another label"/>''')
)
class TestXmlAction(TestCase):
def setUp(self):
et = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<next-action step-uri="{url}/prt/1/stp/1" rework-step-uri="{url}/steps/1" action="nextstep" artifact-uri="{url}/arts/a1"/>
</test-entry>'''.format(url='http://testgenologics.com:4040'))
et1 = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<next-action artifact-uri="{url}/arts/a1"/>
</test-entry>'''.format(url='http://testgenologics.com:4040'))
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.instance_empty = Mock(root=et1, lims=self.lims)
def test_parse(self):
action = XmlAction(self.instance1, tag='next-action')
assert action['action'] == 'nextstep'
assert action['step'] == ProtocolStep(self.lims, uri='http://testgenologics.com:4040/prt/1/stp/1')
assert action['artifact'] == Artifact(self.lims, uri='http://testgenologics.com:4040/arts/a1')
assert action['rework-step'] == Step(self.lims, uri='http://testgenologics.com:4040/steps/1')
def test_set(self):
action = XmlAction(self.instance_empty, tag='next-action')
action['step'] = ProtocolStep(self.lims, uri='http://testgenologics.com:4040/prt/1/stp/1')
assert action.instance.root.find('next-action').attrib['step-uri'] == 'http://testgenologics.com:4040/prt/1/stp/1'
action['action'] = 'nextstep'
assert action.instance.root.find('next-action').attrib['action'] == 'nextstep'
action['rework-step'] = Step(self.lims, uri='http://testgenologics.com:4040/steps/1')
assert action.instance.root.find('next-action').attrib['rework-step-uri'] == 'http://testgenologics.com:4040/steps/1'
with pytest.raises(KeyError):
action['whatever'] = 'youwant'
class TestQueuedArtifactList(TestCase):
def setUp(self):
queue_txt = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifacts>
<artifact uri="{url}/artifacts/a1">
<queue-time>2011-12-25T01:10:10.050+00:00</queue-time>
<location>
<container uri="{url}/containers/c1"/>
<value>A:1</value>
</location>
</artifact>
<artifact uri="{url}/artifacts/a2">
<queue-time>2011-12-25T01:10:10.200+01:00</queue-time>
<location>
<container uri="{url}/containers/c1"/>
<value>A:2</value>
</location>
</artifact>
<artifact uri="{url}/artifacts/a3">
<queue-time>2011-12-25T01:10:10.050-01:00</queue-time>
<location>
<container uri="{url}/containers/c1"/>
<value>A:3</value>
</location>
</artifact>
</artifacts>
</test-entry>'''
self.et_page1 = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifacts>
<artifact uri="{url}/artifacts/a1">
<queue-time>2011-12-25T01:10:10.050+00:00</queue-time>
<location>
<container uri="{url}/containers/c1"/>
<value>A:1</value>
</location>
</artifact>
</artifacts>
<next-page uri="{url}/queues/q1?page2=500"/>
</test-entry>'''.format(url='http://testgenologics.com:4040/api/v2'))
self.et_page2 = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifacts>
<artifact uri="{url}/artifacts/a2">
<queue-time>2011-12-25T01:10:10.200+01:00</queue-time>
<location>
<container uri="{url}/containers/c1"/>
<value>A:2</value>
</location>
</artifact>
</artifacts>
<next-page uri="{url}/queues/q1?page3=500"/>
</test-entry>'''.format(url='http://testgenologics.com:4040/api/v2'))
self.et_page3 = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifacts>
<artifact uri="{url}/artifacts/a3">
<queue-time>2011-12-25T01:10:10.050-01:00</queue-time>
<location>
<container uri="{url}/containers/c1"/>
<value>A:3</value>
</location>
</artifact>
</artifacts>
</test-entry>'''.format(url='http://testgenologics.com:4040/api/v2'))
et = ElementTree.fromstring(queue_txt.format(url='http://testgenologics.com:4040/api/v2'))
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.instance2 = Mock(root=self.et_page1, lims=self.lims)
def get_queue_art(self, art_id, pos, microsec, time_delta):
if version_info[0] == 2:
return (
Artifact(self.lims, id=art_id),
datetime.datetime(2011, 12, 25, 1, 10, 10, microsec),
(Container(self.lims, id='c1'), pos)
)
else:
return (
Artifact(self.lims, id=art_id),
datetime.datetime(2011, 12, 25, 1, 10, 10, microsec, tzinfo=datetime.timezone(time_delta)),
(Container(self.lims, id='c1'), pos)
)
def test_parse(self):
queued_artifacts = QueuedArtifactList(self.instance1)
qart = self.get_queue_art('a1', 'A:1', 50000, datetime.timedelta(0, 0))
assert queued_artifacts[0] == qart
qart = self.get_queue_art('a2', 'A:2', 200000, datetime.timedelta(0, 3600))
assert queued_artifacts[1] == qart
qart = self.get_queue_art('a3', 'A:3', 50000, datetime.timedelta(0, -3600))
assert queued_artifacts[2] == qart
def test_set(self):
queued_artifacts = QueuedArtifactList(self.instance1)
qart = self.get_queue_art('a1', 'A:4', 50000, datetime.timedelta(0, 0))
with pytest.raises(NotImplementedError):
queued_artifacts.append(qart)
def test_parse_multipage(self):
self.lims.get = Mock(side_effect=[self.et_page2, self.et_page3])
queued_artifacts = QueuedArtifactList(self.instance2)
qart = self.get_queue_art('a1', 'A:1', 50000, datetime.timedelta(0, 0))
assert queued_artifacts[0] == qart
qart = self.get_queue_art('a2', 'A:2', 200000, datetime.timedelta(0, 3600))
assert queued_artifacts[1] == qart
qart = self.get_queue_art('a3', 'A:3', 50000, datetime.timedelta(0, -3600))
assert queued_artifacts[2] == qart
| StarcoderdataPython |
86028 | import json
import os
from . import jsonencoder
class Objects:
def __init__(self, path: str):
self.path = os.path.abspath(path)
os.makedirs(os.path.join(self.path, "objects"), exist_ok=True)
self.encoder = jsonencoder.Encoder()
def save_object(self, object_name, object_instance):
"""Save object into json file"""
with open(os.path.join(self.path, "objects", object_name + ".json"), "w") as file:
json.dump(object_instance, file, cls=self.encoder.JSONEncoder)
def load_object(self, object_name):
"""Load object from json file"""
if self.save_exists(object_name):
with open(os.path.join(self.path, "objects", object_name + ".json"), "r") as f:
return json.load(f, object_hook=self.encoder.hook)
def save_exists(self, object_name):
"""Check if json file exists"""
return os.access(os.path.join(self.path, "objects", object_name + ".json"), os.R_OK | os.W_OK)
| StarcoderdataPython |
1629231 | from cumulusci.core.utils import process_bool_arg, process_list_arg
from cumulusci.tasks.bulkdata.step import (
DataOperationType,
DataOperationStatus,
DataApi,
get_query_operation,
get_dml_operation,
)
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.core.exceptions import TaskOptionsError, BulkDataException
from cumulusci.tasks.bulkdata.utils import RowErrorChecker
class DeleteData(BaseSalesforceApiTask):
"""Query existing data for a specific sObject and perform a Bulk API delete of all matching records."""
task_options = {
"objects": {
"description": "A list of objects to delete records from in order of deletion. If passed via command line, use a comma separated string",
"required": True,
},
"where": {
"description": "A SOQL where-clause (without the keyword WHERE). Only available when 'objects' is length 1.",
"required": False,
},
"hardDelete": {
"description": "If True, perform a hard delete, bypassing the Recycle Bin. Note that this requires the Bulk API Hard Delete permission. Default: False"
},
"ignore_row_errors": {
"description": "If True, allow the operation to continue even if individual rows fail to delete."
},
"inject_namespaces": {
"description": "If True, the package namespace prefix will be "
"automatically added to (or removed from) objects "
"and fields based on the name used in the org. Defaults to True."
},
"api": {
"description": "The desired Salesforce API to use, which may be 'rest', 'bulk', or "
"'smart' to auto-select based on record volume. The default is 'smart'."
},
}
row_warning_limit = 10
def _init_options(self, kwargs):
super(DeleteData, self)._init_options(kwargs)
# Split and trim objects string into a list if not already a list
self.options["objects"] = process_list_arg(self.options["objects"])
if not len(self.options["objects"]) or not self.options["objects"][0]:
raise TaskOptionsError("At least one object must be specified.")
self.options["where"] = self.options.get("where", None)
if len(self.options["objects"]) > 1 and self.options["where"]:
raise TaskOptionsError(
"Criteria cannot be specified if more than one object is specified."
)
self.options["hardDelete"] = process_bool_arg(
self.options.get("hardDelete") or False
)
self.options["ignore_row_errors"] = process_bool_arg(
self.options.get("ignore_row_errors") or False
)
inject_namespaces = self.options.get("inject_namespaces")
self.options["inject_namespaces"] = process_bool_arg(
True if inject_namespaces is None else inject_namespaces
)
try:
self.options["api"] = {
"bulk": DataApi.BULK,
"rest": DataApi.REST,
"smart": DataApi.SMART,
}[self.options.get("api", "smart").lower()]
except KeyError:
raise TaskOptionsError(
f"{self.options['api']} is not a valid value for API (valid: bulk, rest, smart)"
)
if self.options["hardDelete"] and self.options["api"] is DataApi.REST:
raise TaskOptionsError("The hardDelete option requires Bulk API.")
@staticmethod
def _is_injectable(element: str) -> bool:
return element.count("__") == 1
def _validate_and_inject_namespace(self):
"""Perform namespace injection and ensure that we can successfully delete all of the selected objects."""
global_describe = {
entry["name"]: entry
for entry in self.org_config.salesforce_client.describe()["sobjects"]
}
# Namespace injection
if (
self.options["inject_namespaces"]
and self.project_config.project__package__namespace
):
def inject(element: str):
return f"{self.project_config.project__package__namespace}__{element}"
self.sobjects = []
for sobject in self.options["objects"]:
if self._is_injectable(sobject):
injected = inject(sobject)
if sobject in global_describe and injected in global_describe:
self.logger.warning(
f"Both {sobject} and {injected} are present in the target org. Using {sobject}."
)
if sobject not in global_describe and injected in global_describe:
self.sobjects.append(injected)
else:
self.sobjects.append(sobject)
else:
self.sobjects.append(sobject)
else:
self.sobjects = self.options["objects"]
# Validate CRUD
non_deletable_objects = [
s
for s in self.sobjects
if not (s in global_describe and global_describe[s]["deletable"])
]
if non_deletable_objects:
raise BulkDataException(
f"The objects {', '.join(non_deletable_objects)} are not present or cannot be deleted."
)
def _run_task(self):
self._validate_and_inject_namespace()
for obj in self.sobjects:
query = f"SELECT Id FROM {obj}"
if self.options["where"]:
query += f" WHERE {self.options['where']}"
qs = get_query_operation(
sobject=obj,
fields=["Id"],
api_options={},
context=self,
query=query,
api=self.options["api"],
)
self.logger.info(f"Querying for {obj} objects")
qs.query()
if qs.job_result.status is not DataOperationStatus.SUCCESS:
raise BulkDataException(
f"Unable to query records for {obj}: {','.join(qs.job_result.job_errors)}"
)
if not qs.job_result.records_processed:
self.logger.info(
f"No records found, skipping delete operation for {obj}"
)
continue
self.logger.info(f"Deleting {self._object_description(obj)} ")
ds = get_dml_operation(
sobject=obj,
operation=(
DataOperationType.HARD_DELETE
if self.options["hardDelete"]
else DataOperationType.DELETE
),
fields=["Id"],
api_options={},
context=self,
api=self.options["api"],
volume=qs.job_result.records_processed,
)
ds.start()
ds.load_records(qs.get_results())
ds.end()
if ds.job_result.status not in [
DataOperationStatus.SUCCESS,
DataOperationStatus.ROW_FAILURE,
]:
raise BulkDataException(
f"Unable to delete records for {obj}: {','.join(qs.job_result.job_errors)}"
)
error_checker = RowErrorChecker(
self.logger, self.options["ignore_row_errors"], self.row_warning_limit
)
for result in ds.get_results():
error_checker.check_for_row_error(result, result.id)
def _object_description(self, obj):
"""Return a readable description of the object set to delete."""
if self.options["where"]:
return f'{obj} objects matching "{self.options["where"]}"'
else:
return f"all {obj} objects"
| StarcoderdataPython |
6483269 | import sys
if len(sys.argv) != 2:
print('Usage:\npython3 <TLD list>')
sys.exit()
tld = []
all_tld = []
not_tld = []
for line in open(sys.argv[1]):
i = line.find('//')
if i >= 0:
line = line[:i]
line = line.strip()
if '.' not in line:
continue
if line.startswith('!'):
not_tld.append(line[1:])
elif line.startswith('*.'):
all_tld.append(line[2:])
else:
tld.append(line)
print("""// Generated by convert_tld.py. Don\'t edit manually.
var EFFECTIVE_TLD = [
'{}'
];
var CHILDREN_TLD = [
'{}'
];
var NOT_TLD = [
'{}'
];
""".format('\',\n \''.join(tld),
'\',\n \''.join(all_tld),
'\',\n \''.join(not_tld)))
| StarcoderdataPython |
5150456 | """
Quantiphyse - Widgets for data simulation
Copyright (c) 2013-2020 University of Oxford
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from PySide2 import QtGui, QtCore, QtWidgets
from quantiphyse.gui.options import OptionBox, NumericOption, DataOption, OutputNameOption, ChoiceOption
from quantiphyse.gui.widgets import QpWidget, TitleWidget
from .processes import AddNoiseProcess, SimMotionProcess
class AddNoiseWidget(QpWidget):
"""
Add noise to data
"""
def __init__(self, **kwargs):
super(AddNoiseWidget, self).__init__(name="Add noise", icon="noise",
desc="Add random noise to a data set",
group="Simulation", **kwargs)
def init_ui(self):
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
title = TitleWidget(self, title="Add Noise", help="noise")
vbox.addWidget(title)
self.option_box = OptionBox("Options")
data = self.option_box.add("Data set", DataOption(self.ivm), key="data")
self.option_box.add("Gaussian standard deviation", NumericOption(minval=0, maxval=100, default=50), key="std")
self.option_box.add("Output name", OutputNameOption(src_data=data, suffix="_noisy"), key="output-name")
vbox.addWidget(self.option_box)
run_btn = QtWidgets.QPushButton('Run', self)
run_btn.clicked.connect(self.run)
vbox.addWidget(run_btn)
vbox.addStretch(1)
def batch_options(self):
return "AddNoise", self.option_box.values()
def run(self):
options = self.batch_options()[1]
process = AddNoiseProcess(self.ivm)
process.execute(options)
class SimMotionWidget(QpWidget):
"""
Widget to simulate random motion on a 4D data set
"""
def __init__(self, **kwargs):
super(SimMotionWidget, self).__init__(name="Simulate motion", icon="reg",
desc="Simulate random motion on a 4D data set",
group="Simulation", **kwargs)
def init_ui(self):
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
title = TitleWidget(self, title="Simulate Motion", help="sim_motion")
vbox.addWidget(title)
self.option_box = OptionBox("Options")
data = self.option_box.add("Data set", DataOption(self.ivm, include_4d=True, include_3d=False), key="data")
self.option_box.add("Random translation standard deviation (mm)", NumericOption(minval=0, maxval=5, default=1, decimals=2), key="std")
self.option_box.add("Random rotation standard deviation (\N{DEGREE SIGN})", NumericOption(minval=0, maxval=10, default=1, decimals=2), key="std_rot")
self.option_box.add("Padding (mm)", NumericOption(minval=0, maxval=10, default=5, decimals=1), key="padding", checked=True)
self.option_box.add("Interpolation", ChoiceOption(["Nearest neighbour", "Linear", "Quadratic", "Cubic"], return_values=range(4), default=3), key="order")
self.option_box.add("Output name", OutputNameOption(src_data=data, suffix="_moving"), key="output-name")
vbox.addWidget(self.option_box)
run_btn = QtWidgets.QPushButton('Run', self)
run_btn.clicked.connect(self.run)
vbox.addWidget(run_btn)
vbox.addStretch(1)
def batch_options(self):
return "SimMotion", self.option_box.values()
def run(self):
options = self.batch_options()[1]
process = SimMotionProcess(self.ivm)
process.run(options)
| StarcoderdataPython |
4870341 | import numpy as np
import pandas as pd
import torch
class SessionDataset:
def __init__(self, sep='\t', session_key='SessionId', item_key='ItemId', time_key='TimeStamp', user_key='UserId',
mode='train', train_data=None, test_data=None, n_samples=-1, itemmap=None, time_sort=False, print_info=True):
"""
Args:
path: path of the csv file
sep: separator for the csv
session_key, item_key, time_key: name of the fields corresponding to the sessions, items, time
n_samples: the number of samples to use. If -1, use the whole dataset.
itemmap: mapping between item IDs and item indices
time_sort: whether to sort the sessions by time or not
"""
self.mode = mode
train_data.loc[:, 'in_eval'] = False
if mode == 'train':
self.df = train_data
elif mode == 'test':
test_users = test_data[user_key].unique()
train_data = train_data[train_data[user_key].isin(test_users)].copy()
test_data.loc[:, 'in_eval'] = True
test_data = pd.concat([train_data, test_data], sort=False)
self.df = test_data
else:
raise NotImplementedError('Unsupported dataset mode')
self.session_key = session_key
self.item_key = item_key
self.user_key = user_key
self.time_key = time_key
self.time_sort = time_sort
self.max_session_length= self.df.groupby(self.session_key).count()[self.item_key].max()
# sampling
if n_samples > 0: self.df = self.df[:n_samples]
# Add item indices
self.add_item_indices(itemmap=itemmap)
"""
Sort the df by time, and then by session ID. That is, df is sorted by session ID and
clicks within a session are next to each other, where the clicks within a session are time-ordered.
"""
self.create_offsets()
if print_info:
self.print_info()
def create_offsets(self):
self.df.sort_values([self.user_key, self.session_key, self.time_key], inplace=True)
self.user_indptr, self.session_offsets = self.get_user_session_offsets()
self.click_offsets = self.get_click_offsets()
self.session_idx_arr = self.order_session_idx()
self.user_idx_arr = self.order_user_idx()
def select_test_users(self, user_slice, batch_size=100):
if user_slice.shape[0] % batch_size == 0 and user_slice.shape[0] > 0:
self.df.loc[~self.df[self.user_key].isin(user_slice),'in_eval'] = False
else:
self.df.loc[~self.df[self.user_key].isin(user_slice),'in_eval'] = False
users_to_complete = batch_size - (user_slice.shape[0] % batch_size)
users = self.df[~self.df[self.user_key].isin(user_slice)][self.user_key].unique()[:users_to_complete]
self.df = pd.concat([self.df[self.df[self.user_key].isin(users)].copy(),
self.df[self.df['in_eval']].copy()],
ignore_index=True)
self.create_offsets()
self.print_info()
def select_test_sessions(self, session_slice, batch_size=100):
self.df.loc[~self.df[self.session_key].isin(session_slice),'in_eval'] = False
#user_slice = self.df[self.df['in_eval']==True][self.user_key].unique()
#self.select_test_users(user_slice, batch_size)
def complete_test_sessions(self):
# TODO: remove on final commit
in_eval_rows = self.df[self.df['in_eval']==True].copy()
test_session_length = in_eval_rows.groupby(self.session_key).count().reset_index()
max_test_session_length = test_session_length[self.item_key].max()
test_sessions_ids = in_eval_rows[self.session_key].unique()
dummy_rows =in_eval_rows.drop_duplicates(self.session_key,keep='last').reset_index(drop=True)
dummy_rows[self.time_key] = dummy_rows[self.time_key]+1e4
dummy_rows['in_eval'] = False
new_rows = pd.DataFrame([])
for id_count, session_id in enumerate(test_sessions_ids):
dummy_row = dummy_rows.iloc[id_count]
nrows = max_test_session_length - test_session_length.iloc[id_count][self.item_key]
if nrows > 0 :
rows_to_add = [dummy_row]*nrows
new_rows = new_rows.append(rows_to_add, ignore_index=True)
#print([dummy_row[self.session_key],session_id,
# test_session_length.iloc[id_count][self.item_key],
# nrows,len(rows_to_add),new_rows.shape[0]])
a=self.df.shape[0]
print([a,new_rows.shape[0]])
self.df = self.df.append(new_rows,ignore_index=True)
self.create_offsets()
print([self.df.shape[0],new_rows.shape[0],self.df.shape[0]-a])
#print(self.df[self.df[self.session_key].isin(test_sessions_ids)].groupby(self.session_key).count().describe())
def print_info(self):
test_rows = self.df[self.df['in_eval']==True].shape[0]
print('Dataset: Users:{} Items:{} Sessions:{} Rows:{} Test-Rows: {}'.format(self.df[self.user_key].nunique(),
self.df[self.item_key].nunique(),
self.df[self.session_key].nunique(),
self.df.shape[0],test_rows))
def get_click_offsets(self):
"""
Return the offsets of the beginning clicks of each session IDs,
where the offset is calculated against the first click of the first session ID.
"""
offsets = np.zeros(self.df[self.session_key].nunique() + 1, dtype=np.int32)
# group & sort the df by session_key and get the offset values
offsets[1:] = self.df.groupby(self.session_key).size().cumsum()
return offsets
def get_user_session_offsets(self):
"""
Return the offsets of the beginning clicks of each user,
where the offset is calculated against the first click of the first session ID of the user.
"""
#Ordering dataset by user_key, session_key, time_key
self.df.sort_values([self.user_key, self.session_key, self.time_key], inplace=True)
self.df.reset_index(drop=True, inplace=True)
#Creation arrays for indexes
user_offsets = np.zeros(self.df[self.user_key].nunique()+1, dtype=np.int32)
session_offsets = np.zeros(self.df[self.session_key].nunique()+1 , dtype=np.int32)
# group & sort the df by user_key, session_key and get the offset values
session_offsets[1:] = self.df.groupby([self.user_key, self.session_key], sort=False).size().cumsum()
# group & sort the df by user_key and get the offset values
user_offsets[1:] = self.df.groupby(self.user_key, sort=False)[self.session_key].nunique().cumsum()
return user_offsets, session_offsets
def order_session_idx(self):
""" Order the session indices """
if self.time_sort:
# starting time for each sessions, sorted by session IDs
sessions_start_time = self.df.groupby(self.session_key)[self.time_key].min().values
# order the session indices by session starting times
session_idx_arr = np.argsort(sessions_start_time)
else:
session_idx_arr = np.arange(self.df[self.session_key].nunique())
return session_idx_arr
def order_user_idx(self):
""" Order the user indices """
if self.time_sort:
users_start_time = self.df.groupby(self.user_key)[self.time_key].min().values
# order the session indices by user starting times
user_idx_arr = np.argsort(users_start_time)
else:
user_idx_arr = np.arange(self.df[self.user_key].nunique())
return user_idx_arr
def add_item_indices(self, itemmap=None):
"""
Add item index column named "item_idx" to the df
Args:
itemmap (pd.DataFrame): mapping between the item Ids and indices
"""
if itemmap is None:
item_ids = self.df[self.item_key].unique() # unique item ids
item2idx = pd.Series(data=np.arange(len(item_ids)),
index=item_ids)
itemmap = pd.DataFrame({self.item_key:item_ids,
'item_idx':item2idx[item_ids].values})
self.itemmap = itemmap
self.df = pd.merge(self.df, self.itemmap, on=self.item_key, how='inner')
@property
def items(self):
return self.itemmap[self.item_key].unique()
class SessionDataLoader(torch.utils.data.IterableDataset):
def __init__(self, dataset, batch_size=50,return_extras=False):
"""
A class for creating session-parallel mini-batches.
Args:
dataset (SessionDataset): the session dataset to generate the batches from
batch_size (int): size of the batch
"""
self.dataset = dataset
self.batch_size = batch_size
self.return_extras = return_extras
self.eval_data_cols = [dataset.session_key, dataset.user_key, dataset.item_key]
self.train_data_cols = []
# variables to manage iterations over users
self.user_indptr = self.dataset.user_indptr
self.n_users = len(self.dataset.user_indptr)
self.session_offsets = self.dataset.session_offsets
self.offset_users = self.session_offsets[self.user_indptr]
self.user_idx_arr = np.arange(self.n_users - 1)
self.user_iters = np.arange(self.batch_size)
self.user_maxiter = self.user_iters.max()
self.user_start = self.offset_users[self.user_idx_arr[self.user_iters]]
self.user_end = self.offset_users[self.user_idx_arr[self.user_iters] + 1]
self.session_iters = self.user_indptr[self.user_iters]
self.session_start = self.session_offsets[self.session_iters]
self.session_end = self.session_offsets[self.session_iters + 1]
def __len__(self):
return int((self.dataset.df.shape[0]/self.batch_size) +1)
def preprocess_data(self, data):
# sort by user and time key in order
user_key, item_key, session_key, time_key = self.dataset.user_key, \
self.dataset.item_key, \
self.dataset.session_key, \
self.dataset.time_key
data.sort_values([user_key, session_key, time_key], inplace=True)
data.reset_index(drop=True, inplace=True)
offset_session = np.r_[0, data.groupby([user_key, session_key], sort=False).size().cumsum()[:-1]]
user_indptr = np.r_[0, data.groupby(user_key, sort=False)[session_key].nunique().cumsum()[:-1]]
return user_indptr, offset_session
def __iter__(self):
""" Returns the iterator for producing session-parallel training mini-batches.
Yields:
input (B,): torch.FloatTensor. Item indices that will be encoded as one-hot vectors later.
target (B,): a Variable that stores the target item indices
masks: Numpy array indicating the positions of the sessions to be terminated
"""
mode = self.dataset.mode
# initializations
df = self.dataset.df
user_indptr = self.dataset.user_indptr
# variables to manage iterations over users
user_indptr, session_offsets = self.preprocess_data(df)
n_users = len(self.dataset.user_indptr)
#session_offsets = self.dataset.session_offsets
offset_users = session_offsets[user_indptr]
user_idx_arr = np.arange(n_users-1)
user_iters = np.arange(self.batch_size)
user_maxiter = user_iters.max()
user_start = offset_users[user_idx_arr[user_iters]]
user_end = offset_users[user_idx_arr[user_iters] + 1]
session_iters = user_indptr[user_iters]
session_start = session_offsets[session_iters]
session_end = session_offsets[session_iters + 1]
sstart = np.zeros((self.batch_size,), dtype=np.float32)
ustart = np.zeros((self.batch_size,), dtype=np.float32)
#mask_zerofill = np.array([True]*self.batch_size, dtype=np.float32)
finished = False
while not finished:
session_minlen = (session_end - session_start).min()
idx_target = df.item_idx.values[session_start]
for i in range(session_minlen - 1):
# Build inputs & targets
idx_input = idx_target
idx_target = df.item_idx.values[session_start + i + 1]
input_id = torch.LongTensor(idx_input)
target = torch.LongTensor(idx_target)
# Retrieving extra columns for various purposes
data = self.collect_extra_columns(session_start+i+1, mode=mode)
yield input_id, target, sstart, ustart, data
sstart = np.zeros_like(sstart, dtype=np.float32)
ustart = np.zeros_like(ustart, dtype=np.float32)
session_start = session_start + session_minlen -1
session_start_mask = np.arange(len(session_iters))[(session_end - session_start) <= 1]
sstart[session_start_mask] = 1
for idx in session_start_mask:
session_iters[idx] += 1
if session_iters[idx] + 1 >= len(session_offsets):
# retreiving previous rows keeping end the same end
#session_start[idx] -= session_end.max()
#mask_zerofill[idx] = False
#if session_end.max()==df.shape[0]:
# break
finished = True
break
session_start[idx] = session_offsets[session_iters[idx]]
session_end[idx] = session_offsets[session_iters[idx] + 1]
# reset the User hidden state at user change
user_change_mask = np.arange(len(user_iters))[(user_end - session_start <= 0)]
ustart[user_change_mask] = 1
for idx in user_change_mask:
user_maxiter += 1
if user_maxiter + 1 >= len(offset_users):
#mask_zerofill[idx] = False
#user_end[idx] = df.shape[0]-1
#session_iters[idx]
#session_start[idx]=0
#session_end[idx]=df.shape[0]
print(['user_end', len(offset_users), user_maxiter])
finished = True
break
user_iters[idx] = user_maxiter
user_start[idx] = offset_users[user_maxiter]
user_end[idx] = offset_users[user_maxiter + 1]
session_iters[idx] = user_indptr[user_maxiter]
session_start[idx] = session_offsets[session_iters[idx]]
session_end[idx] = session_offsets[session_iters[idx] + 1]
def skip_sessions(self, minibatch_ids):
return
def generate_batches(self):
self.batches = []
for batch in self:
self.batches.append(batch)
print('Batches in Dataset:{}'.format(len(self.batches)))
def get_batches(self, batch_id_start, batch_id_end):
batches = []
if batch_id_end < len(self.batches) and batch_id_start <len(self.batches):
batches = self.batches[batch_id_start:batch_id_end]
else:
batches = self.batches[batch_id_start:]
if len(batches) == 0:
raise AssertionError('batches collecting failed from indexes {} to {}'.
format(batch_id_start, batch_id_end))
return batches
def collect_eval_data(self, row_indexes, key='in_eval'):
eval_data = {}
eval_mask = self.dataset.df[key].values[row_indexes]
eval_data[key] = np.arange(self.batch_size, dtype=np.int32)[eval_mask]
data_src = self.dataset.df.iloc[row_indexes].copy()
for col in self.eval_data_cols:
eval_col = data_src[col].values[eval_mask]
eval_data[col] = eval_col
return eval_data
def collect_extra_columns(self,row_indexes, mode='train'):
data = None
if mode == 'train':
if len(self.train_data_cols) > 0:
rows = self.dataset.df.iloc[row_indexes].copy()
data = {col: rows[col].values for col in self.train_data_cols}
elif mode == 'test':
data = self.collect_eval_data(row_indexes)
else:
raise NotImplementedError('Not implemented dataset mode')
return data
| StarcoderdataPython |
127809 | import requests
import json
from constants import getConstants
# get constants
constants = getConstants()
def api_request(method, url, header=None, data=None, response_type='json'):
response = requests.request(method, url, headers=header, data=data)
if response_type == 'json':
try:
response_message = response.json()
except:
response_message = None
return response_message
else:
try:
response_message = response.text
except:
response_message = None
return response_message
def post_data(data):
try:
URL = "http://" + constants["DB_MIDDLEWARE_WRITER_HOST"] + ":" + \
constants["DB_MIDDLEWARE_WRITER_PORT"] + "/" + "data_writer"
METHOD = 'POST'
PAYLOAD = json.dumps(data)
HEADERS = {
'Content-Type': 'application/json'
}
response = api_request(METHOD, URL, header=HEADERS,
data=PAYLOAD, response_type='json')
print(response)
except:
print("error in posting data... :(")
| StarcoderdataPython |
3500144 | <gh_stars>10-100
import numpy as np
import torch
import scipy
_eps = 1.0e-5
class FIDScheduler(object):
def __init__(self,args):
self.freq_fid = 2000 # args.freq_fid
self.oldest_fid_iter = 20000 # args.oldest_fid_iter
self.num_old_fids = int(self.oldest_fid_iter/self.freq_fid) +1
self.curent_cursor = -self.num_old_fids
self.old_fids = np.zeros([self.num_old_fids])
self.trainer = None
self.num_failures = 0
self.max_failures = 3 #args.max_failures
def init_trainer(self, trainer):
self.trainer = trainer
def step(self, fid):
if self.curent_cursor < 0:
print('Filling the buffer: curent_cursor'+ str(self.curent_cursor))
self.old_fids[self.num_old_fids + self.curent_cursor] = fid
self.curent_cursor += 1
else:
print(f'old_fids')
print(self.old_fids)
self.old_fids[self.curent_cursor] = fid
self.curent_cursor = np.mod(self.curent_cursor+1, self.num_old_fids)
old_fid = self.old_fids[self.curent_cursor]
print('new_fids')
print(self.old_fids)
if old_fid < fid:
print(' incresing counter ')
self.num_failures += 1
else:
print('resetting counter')
self.num_failures = 0
if self.num_failures==self.max_failures:
print(" reducing step-size ")
self.num_failures = 0
self.trainer.scheduler_d.step()
self.trainer.scheduler_g.step()
class MMDScheduler(object):
def __init__(self,args, device):
self.freq_fid = args.freq_fid
self.oldest_fid_iter = args.oldest_fid_iter
self.num_old_fids = int(self.oldest_fid_iter/self.freq_fid) +1
self.curent_cursor = -self.num_old_fids
self.bs = 2048
self.old_scores = self.num_old_fids*[None]
self.old_fids = np.zeros([self.num_old_fids])
self.trainer = None
self.device= device
self.num_failures = 0
self.max_failures = 3 #args.max_failures
self.restart = 0
def init_trainer(self, trainer):
self.trainer = trainer
def step(self,fid, score_true,score_fake):
bs = min(self.bs, score_true.shape[0],score_fake.shape[0])
act_true = score_true[:bs]
act_fake = score_fake[:bs]
if self.curent_cursor < 0:
print('Filling the buffer: curent_cursor '+ str(self.curent_cursor))
Y_related_sums = diff_polynomial_mmd2_and_ratio_with_saving(act_true.to(self.device), act_fake.to(self.device), None, device = self.device)
self.old_scores[self.num_old_fids + self.curent_cursor] = Y_related_sums
self.old_fids[self.num_old_fids + self.curent_cursor] = fid
self.curent_cursor += 1
else:
if self.restart<0:
print('Re-Filling the buffer: curent_cursor '+ str(self.curent_cursor))
Y_related_sums = diff_polynomial_mmd2_and_ratio_with_saving(act_true, act_fake, None,device = self.device)
self.old_scores[self.curent_cursor] = Y_related_sums
self.old_fids[self.curent_cursor] = fid
self.curent_cursor = np.mod(self.curent_cursor+1, self.num_old_fids)
self.restart +=1
else:
saved_Z = self.old_scores[self.curent_cursor]
mmd2_diff, test_stat, Y_related_sums = diff_polynomial_mmd2_and_ratio_with_saving(act_true, act_fake, saved_Z, device=self.device)
p_val = scipy.stats.norm.cdf(test_stat)
self.old_scores[self.curent_cursor] = Y_related_sums
self.old_fids[self.curent_cursor] = fid
self.curent_cursor = np.mod(self.curent_cursor+1, self.num_old_fids)
print("3-sample test stat = %.1f" % test_stat)
print("3-sample p-value = %.1f" % p_val)
if p_val>.1:
self.num_failures += 1
print(' increasing counter to %d ', self.num_failures)
if self.num_failures>=self.max_failures:
self.num_failures = 0
self.trainer.scheduler_d.step()
self.trainer.scheduler_g.step()
self.restart = -self.max_failures
print("failure to improve after %d tests", self.max_failures)
print(" reducing lr to: lr energy at %f and lr gen at %f ",self.trainer.optim_d.param_groups[0]['lr'],self.trainer.optim_g.param_groups[0]['lr'])
else:
print(" No improvement in last %d, keeping lr energy at %f and lr gen at %f ",self.num_failures,self.trainer.optim_d.param_groups[0]['lr'],self.trainer.optim_g.param_groups[0]['lr'])
else:
print(" Keeping lr energy at %f and lr gen at %f ",self.trainer.optim_d.param_groups[0]['lr'],self.trainer.optim_g.param_groups[0]['lr'])
self.num_failures = 0
print("FID scores: " + str(self.old_fids))
def diff_polynomial_mmd2_and_ratio_with_saving(X, Y, saved_sums_for_Z, device='cuda'):
dim = float(X.shape[1])
X = X.to(device)
Y = Y.to(device)
# TODO: could definitely do this faster
torch.einsum('ni,mi->nm',X,Y)
K_XY = (torch.einsum('ni,mi->nm',X,Y) / dim + 1) ** 3
K_YY = (torch.einsum('ni,mi->nm',Y,Y) / dim + 1) ** 3
#K_XY = (np.dot(X, Y.transpose()) / dim + 1) ** 3
#K_YY = (np.dot(Y, Y.transpose()) / dim + 1) ** 3
m = float(K_YY.shape[0])
Y_related_sums = _get_sums(K_XY, K_YY)
if saved_sums_for_Z is None:
return tuple([el.cpu() for el in Y_related_sums])
saved_sums_for_Z = tuple([el.to(device) for el in saved_sums_for_Z])
mmd2_diff, ratio = _diff_mmd2_and_ratio_from_sums(Y_related_sums, saved_sums_for_Z, m)
return mmd2_diff, ratio, tuple([el.cpu() for el in Y_related_sums])
def _get_sums(K_XY, K_YY, const_diagonal=False):
m = float(K_YY.shape[0]) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
if const_diagonal is not False:
const_diagonal = float(const_diagonal)
diag_Y = const_diagonal
sum_diag2_Y = m * const_diagonal**2
else:
diag_Y = torch.diag(K_YY)
sum_diag2_Y = torch.sum(diag_Y**2)
Kt_YY_sums = torch.sum(K_YY, dim=1) - diag_Y
K_XY_sums_0 = torch.sum(K_XY, dim=0)
K_XY_sums_1 = torch.sum(K_XY, dim=1)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
Kt_YY_2_sum = (K_YY ** 2).sum() - sum_diag2_Y
K_XY_2_sum = (K_XY ** 2).sum()
return Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum
def _diff_mmd2_and_ratio_from_sums(Y_related_sums, Z_related_sums, m, const_diagonal=False):
Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum = Y_related_sums
Kt_ZZ_sums, Kt_ZZ_2_sum, K_XZ_sums_0, K_XZ_sums_1, K_XZ_2_sum = Z_related_sums
Kt_YY_sum = Kt_YY_sums.sum()
Kt_ZZ_sum = Kt_ZZ_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
K_XZ_sum = K_XZ_sums_0.sum()
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
### Estimators for the various terms involved
muY_muY = Kt_YY_sum / (m * (m-1))
muZ_muZ = Kt_ZZ_sum / (m * (m-1))
muX_muY = K_XY_sum / (m * m)
muX_muZ = K_XZ_sum / (m * m)
E_y_muY_sq = (torch.dot(Kt_YY_sums, Kt_YY_sums) - Kt_YY_2_sum) / (m*(m-1)*(m-2))
E_z_muZ_sq = (torch.dot(Kt_ZZ_sums, Kt_ZZ_sums) - Kt_ZZ_2_sum) / (m*(m-1)*(m-2))
E_x_muY_sq = (torch.dot(K_XY_sums_1, K_XY_sums_1) - K_XY_2_sum) / (m*m*(m-1))
E_x_muZ_sq = (torch.dot(K_XZ_sums_1, K_XZ_sums_1) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muX_sq = (torch.dot(K_XY_sums_0, K_XY_sums_0) - K_XY_2_sum) / (m*m*(m-1))
E_z_muX_sq = (torch.dot(K_XZ_sums_0, K_XZ_sums_0) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muY_y_muX = torch.dot(Kt_YY_sums, K_XY_sums_0) / (m*m*(m-1))
E_z_muZ_z_muX = torch.dot(Kt_ZZ_sums, K_XZ_sums_0) / (m*m*(m-1))
E_x_muY_x_muZ = torch.dot(K_XY_sums_1, K_XZ_sums_1) / (m*m*m)
E_kyy2 = Kt_YY_2_sum / (m * (m-1))
E_kzz2 = Kt_ZZ_2_sum / (m * (m-1))
E_kxy2 = K_XY_2_sum / (m * m)
E_kxz2 = K_XZ_2_sum / (m * m)
### Combine into overall estimators
mmd2_diff = muY_muY - 2 * muX_muY - muZ_muZ + 2 * muX_muZ
first_order = 4 * (m-2) / (m * (m-1)) * (
E_y_muY_sq - muY_muY**2
+ E_x_muY_sq - muX_muY**2
+ E_y_muX_sq - muX_muY**2
+ E_z_muZ_sq - muZ_muZ**2
+ E_x_muZ_sq - muX_muZ**2
+ E_z_muX_sq - muX_muZ**2
- 2 * E_y_muY_y_muX + 2 * muY_muY * muX_muY
- 2 * E_x_muY_x_muZ + 2 * muX_muY * muX_muZ
- 2 * E_z_muZ_z_muX + 2 * muZ_muZ * muX_muZ
)
second_order = 2 / (m * (m-1)) * (
E_kyy2 - muY_muY**2
+ 2 * E_kxy2 - 2 * muX_muY**2
+ E_kzz2 - muZ_muZ**2
+ 2 * E_kxz2 - 2 * muX_muZ**2
- 4 * E_y_muY_y_muX + 4 * muY_muY * muX_muY
- 4 * E_x_muY_x_muZ + 4 * muX_muY * muX_muZ
- 4 * E_z_muZ_z_muX + 4 * muZ_muZ * muX_muZ
)
var_est = first_order + second_order
ratio = mmd2_diff.item() / np.sqrt(max(var_est.item(), _eps))
return mmd2_diff.item(), ratio | StarcoderdataPython |
6432969 | <filename>utils/convert_fregene_vcf.py
import argparse
import random
import numpy as np
format_header = "##fileformat=VCFv4.1"
header_left = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT"
def generate_vcf(flname, positions, diploids):
n_sites = len(positions)
n_individuals = len(diploids)
positions = list(positions)
positions.sort()
header = [header_left]
for i in range(n_individuals):
header.append("indiv_" + str(i+1))
header = "\t".join(header)
yield format_header
yield header
for site_idx, pos in enumerate(positions):
cols = ["1", str(pos), ".", "A", "T", "0", "PASS", "AC=30;AF=0.357;AN=84;DP=804;PercentNBaseSolid=0.0000;set=AGC", "GT"]
for indiv_idx in range(n_individuals):
gt = diploids[indiv_idx][1].get(pos, 0)
if gt == 0:
cols.append("0/0")
elif gt == 2:
cols.append("1/1")
else:
cols.append("0/1")
yield "\t".join(cols)
def write_vcf(flname, positions, diploids):
stream = generate_vcf(flname, positions, diploids)
with open(flname, "w") as fl:
for ln in stream:
fl.write(ln)
fl.write("\n")
def read_snps(flname):
chromosomes = []
recording = False
all_snp_positions = set()
with open(flname) as fl:
for ln in fl:
if not recording and ln.startswith("<DATA>"):
recording = True
start = len("<DATA>")
snp_positions = set(map(int, ln[start:].strip().split()))
# invertFREGENE uses 0 to indicate the end of a chromosome
# also seems to use an EOL
snp_positions.remove(0)
chromosomes.append((len(chromosomes), snp_positions))
all_snp_positions.update(snp_positions)
elif recording and ln.startswith("</DATA>"):
recording = False
elif recording:
snp_positions = set(map(int, ln[5:].strip().split()))
snp_positions.remove(0)
chromosomes.append((len(chromosomes), snp_positions))
all_snp_positions.update(snp_positions)
return all_snp_positions, chromosomes
def read_karyotypes(flname):
karyotypes = dict()
with open(flname) as fl:
for ln in fl:
cols = ln.strip().split()
karyotypes[int(cols[0])] = int(cols[1])
return karyotypes
def form_diploids(chromosomes, karyotypes):
random.shuffle(chromosomes)
diploids = []
diploid_karyotypes = []
for i in range(0, len(chromosomes), 2):
idx1, chrom1 = chromosomes[i]
idx2, chrom2 = chromosomes[i+1]
k = karyotypes[idx1] + karyotypes[idx2]
genotypes = dict()
for pos in chrom1:
genotypes[pos] = 1
for pos in chrom2:
genotypes[pos] = genotypes.get(pos, 0) + 1
diploids.append((k, genotypes))
return diploids
def write_pops(basename, diploids):
groups = { 0 : [],
1 : [],
2 : [] }
for i, (kt, _) in enumerate(diploids):
groups[kt].append("indiv_" + str(i+1))
print(groups)
with open(basename + ".pops", "w") as fl:
for group_id, pop in groups.items():
if group_id == 0:
group_name = "Homo. Std."
elif group_id == 1:
group_name = "Hetero."
else:
group_name = "Homo. Inv."
fl.write(group_name)
for ident in pop:
fl.write(",")
fl.write(ident)
fl.write("\n")
for group_id, pop in groups.items():
if group_id == 0:
group_name = "homo_std"
elif group_id == 1:
group_name = "hetero"
else:
group_name = "homo_inv"
with open(basename + "_" + group_name + ".ids", "w") as fl:
for ident in pop:
fl.write(ident)
fl.write("\n")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--sim-input",
type=str,
required=True)
parser.add_argument("--karyotype-input",
type=str,
required=True)
parser.add_argument("--output-base",
type=str,
required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
all_snp_positions, chromosomes = read_snps(args.sim_input)
karyotypes = read_karyotypes(args.karyotype_input)
print(len(all_snp_positions), len(chromosomes), len(karyotypes))
diploids = form_diploids(chromosomes, karyotypes)
print(len(diploids))
for i in range(5):
print(diploids[i][0])
print(all_snp_positions)
print()
print(diploids[0])
write_vcf(args.output_base + ".vcf",
all_snp_positions,
diploids)
write_pops(args.output_base,
diploids)
| StarcoderdataPython |
6492676 | <filename>test_linear.py
from ANN import ANN
from PSO import PSO
import time
#data = 'Data/1in_linear.txt'
#data = 'Data/1in_cubic.txt'
data = 'Data/1in_sine.txt'
#data = 'Data/1in_tanh.txt'
#data = 'Data/2in_xor.txt'
input_size = 1
ann = ANN(input_size, [12,1])
pso = PSO(50, 10, 0.9, 0.4, 2.5, 0, 1.5, 1, ann, 1000, data, input_size)
start = time.time()
pso.execute()
print('Execution time: {} minutes'.format((time.time() - start)/60)) | StarcoderdataPython |
11292035 | import os, sys, glob
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
import argparse
import pandas as pd
import numpy as np
from Utils.Funcs import printProgressBar, loadFromJson
from DataClasses import Annotation, classToDic
import json
import array
import math
import random
import wave
def main(jsonPath, outJson, noiseFilesPaths, addWhite, SNRs, ignoreExisting):
"""
Adding noise to a given dataset and making a different json file for it to reference it
Example
----------
python addNoise.py -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -o "/mnt/HD-Storage/Datasets/Recola_46/data_noisy.json" -aw True -n "/mnt/HD-Storage/Datasets/NoiseFiles/train" "/mnt/HD-Storage/Datasets/NoiseFiles/dev" "/mnt/HD-Storage/Datasets/NoiseFiles/test"
python addNoise.py -j "/mnt/HD-Storage/Datasets/SEMAINE/data.json" -o "/mnt/HD-Storage/Datasets/SEMAINE/data_noisy.json" -aw True -n "/mnt/HD-Storage/Datasets/NoiseFiles/train" "/mnt/HD-Storage/Datasets/NoiseFiles/dev" "/mnt/HD-Storage/Datasets/NoiseFiles/test"
python addNoise.py -j "/mnt/HD-Storage/Datasets/IEMOCAP/data_parted.json" -o "/mnt/HD-Storage/Datasets/IEMOCAP/data_parted_noisy.json" -aw True -n "/mnt/HD-Storage/Datasets/NoiseFiles/train" "/mnt/HD-Storage/Datasets/NoiseFiles/dev" "/mnt/HD-Storage/Datasets/NoiseFiles/test"
python addNoise.py -j "/mnt/HD-Storage/Datasets/MaSS_Fr/data_parted.json" -o "/mnt/HD-Storage/Datasets/MaSS_Fr/data_parted_noisy.json" -aw True -n "/mnt/HD-Storage/Datasets/NoiseFiles/train" "/mnt/HD-Storage/Datasets/NoiseFiles/dev" "/mnt/HD-Storage/Datasets/NoiseFiles/test"
"""
datasetPath = os.path.split(jsonPath)[0]
noisyFolder = "Wavs_Noisy"
noisyWavsPath = os.path.join(datasetPath, noisyFolder)
if not os.path.exists(noisyWavsPath): os.makedirs(noisyWavsPath)
trainPath = os.path.join(noiseFilesPaths[0], "**", "*.wav")
trainNoises = glob.glob(trainPath, recursive=True)
devPath = os.path.join(noiseFilesPaths[1], "**", "*.wav")
devNoises = glob.glob(devPath, recursive=True)
testPath = os.path.join(noiseFilesPaths[2], "**", "*.wav")
testNoises = glob.glob(testPath, recursive=True)
samples = loadFromJson(jsonPath)
newSamples = samples.copy()
for i, ID in enumerate(samples.keys()):
sample = samples[ID].copy()
wavePath = sample["path"]
wavFullPath = os.path.join(datasetPath, wavePath)
sample["features"] = {} # to avoid reading the wrong feature extracted from clean speech
wavsFolder = wavePath.split(os.sep)[0]
splits = wavePath.split(os.sep)
fileName = splits[-1].replace(".wav", "")
## MAKE NOISY FILES AND ADD TO SAMPLES, GIVE A NEW ID (which would be name of file)
noiseFiles = trainNoises
if sample["partition"] == "dev" : noiseFiles = devNoises
if sample["partition"] == "test": noiseFiles = testNoises
for snr in SNRs:
for noiseFile in noiseFiles:
outWavPath = noisyFolder
for split in splits[1:-1]:
outWavPath = os.path.join(outWavPath, split)
outWavName = fileName +'_snr' + str(snr) + '_' + noiseFile.split(os.sep)[-1]
outWavPath = os.path.join(outWavPath, outWavName)
outWavFullPath = os.path.join(datasetPath, outWavPath)
if not (ignoreExisting and os.path.exists(outWavFullPath)):
addNoiseFile(wavFullPath, noiseFile, outWavFullPath, snr=snr)
ID = outWavName.replace(".wav", "")
newSample = sample.copy()
newSample["path"] = outWavPath
newSample["ID"] = ID
newSamples[ID] = newSample
if addWhite:
outWavPath = noisyFolder
for split in splits[1:-1]:
outWavPath = os.path.join(outWavPath, split)
outWavName = fileName +'_snr' + str(snr) + '_whiteNoise.wav'
outWavPath = os.path.join(outWavPath, outWavName)
outWavFullPath = os.path.join(datasetPath, outWavPath)
if not (ignoreExisting and os.path.exists(outWavFullPath)):
addWhiteNoise(wavFullPath, outWavFullPath, snr=snr)
ID = outWavName.replace(".wav", "")
newSample = sample.copy()
newSample["path"] = outWavPath
newSample["ID"] = ID
newSamples[ID] = newSample
printProgressBar(i + 1, len(samples), prefix = 'Making wav files noisy:', suffix = 'Complete', length = "fit")
with open(outJson, 'w') as jsonFile:
json.dump(newSamples, jsonFile, indent=4, ensure_ascii=False)
def whiteNoise(size):
return np.random.normal(0, 1, size=size)
def noiseGain(snr, sGain=1):
return 10 ** (math.log10(sGain) - snr/20)
def addWhiteNoise(filePath, outPath, snr=5):
fileWav = wave.open(filePath, "r")
fileVec = (np.frombuffer(fileWav.readframes(fileWav.getnframes()), dtype="int16")).astype(np.float64)
noiseVec = whiteNoise(len(fileVec))
addNoise(fileWav, fileVec, noiseVec, outPath, snr=snr)
def addNoiseFile(filePath, noisePath, outPath, snr=5):
fileWav = wave.open(filePath, "r")
noiseWav = wave.open(noisePath, "r")
fileVec = (np.frombuffer(fileWav.readframes(fileWav.getnframes()), dtype="int16")).astype(np.float64)
noiseVec = (np.frombuffer(noiseWav.readframes(noiseWav.getnframes()), dtype="int16")).astype(np.float64)
start = random.randint(0, len(noiseVec)-len(fileVec))
noiseVec = noiseVec[start: start + len(fileVec)]
addNoise(fileWav, fileVec, noiseVec, outPath, snr=snr)
def addNoise(fileWav, fileVec, noiseVec, outPath, snr=5):
fileGain = np.sqrt(np.mean(np.square(fileVec), axis=-1))
noiseNormed = noiseVec / np.sqrt(np.mean(np.square(noiseVec), axis=-1))
newNoise = noiseGain(snr, sGain=fileGain) * noiseNormed
mixed = fileVec + newNoise
outFile = wave.Wave_write(outPath)
outFile.setparams(fileWav.getparams())
outFile.writeframes(array.array('h', mixed.astype(np.int16)).tobytes())
outFile.close()
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--json', '-j', help="input json file path")
parser.add_argument('--outJson', '-o', help="output json file path with references to noisy json files")
parser.add_argument('--noiseFilesPaths', '-n', help="paths to folders containing noise wav files, put 3 folders to have training/dev/test, 1 folder path if you don't have partitioning!", nargs='+', default=[])
parser.add_argument('--addWhite', '-aw', help="whether to add white noise or not", default=False)
parser.add_argument('--SNRs', '-snr', help="Signal to Noise ratios for mixing the noise with speech files", nargs='+', default=[5, 15])
parser.add_argument('--ignoreExisting', '-ie', help="ignore already existing noisy files paths", default=True)
args = parser.parse_args()
Flag = False
if args.json is None: Flag=True
if args.outJson is None: Flag=True
if args.addWhite == "False": args.addWhite = False
if args.ignoreExisting == "False": args.ignoreExisting = False
if Flag:
print(main.__doc__)
parser.print_help()
else:
main(args.json, args.outJson, args.noiseFilesPaths, args.addWhite, args.SNRs, args.ignoreExisting)
| StarcoderdataPython |
1609934 | <reponame>vmware/distributed-apps-platform
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
__version__='1.0.1'
| StarcoderdataPython |
11267575 | # -*- coding: utf-8 -*-
# flake8: noqa
"""Installation script."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os
import os.path as op
from pathlib import Path
import re
from setuptools import setup
#------------------------------------------------------------------------------
# Setup
#------------------------------------------------------------------------------
def _package_tree(pkgroot):
path = op.dirname(__file__)
subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
for i in os.walk(op.join(path, pkgroot))
if '__init__.py' in i[2]]
return subdirs
readme = (Path(__file__).parent / 'README.md').read_text()
# Find version number from `__init__.py` without executing it.
with (Path(__file__).parent / 'phylib/__init__.py').open('r') as f:
version = re.search(r"__version__ = '([^']+)'", f.read()).group(1)
setup(
name='phylib',
version=version,
license="BSD",
description='Ephys data analysis for thousands of channels',
long_description=readme,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/cortex-lab/phylib',
packages=_package_tree('phylib'),
package_dir={'phylib': 'phylib'},
package_data={
'phylib': [
'*.vert', '*.frag', '*.glsl', '*.npy', '*.gz', '*.txt',
'*.html', '*.css', '*.js', '*.prb'],
},
include_package_data=True,
keywords='phy,data analysis,electrophysiology,neuroscience',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Framework :: IPython",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
)
| StarcoderdataPython |
4957995 | <gh_stars>1-10
import mysql.connector as sql
from SearchUtility_Backend.SearchUtilityLogger import SearchUtilityLogger
class MySQLCommands:
@staticmethod
def CreateDataBase():
return "CREATE DATABASE IF NOT EXISTS SearchUtility"
@staticmethod
def UseDataBase():
return "USE SearchUtility"
@staticmethod
def CreateServerStateTable():
return "CREATE TABLE IF NOT EXISTS state(dummy INT)"
@staticmethod
def DropServerStateTable():
return "DROP TABLE state"
@staticmethod
def CreateOccurancesTable():
return "CREATE TABLE IF NOT EXISTS occurances(ID INT PRIMARY KEY AUTO_INCREMENT,wordID INT,docID INT,pageNum INT," \
"positionInPage INT,FOREIGN KEY(docID) REFERENCES documents(dID),FOREIGN KEY(wordID) REFERENCES " \
"vocabulary(wID)) "
@staticmethod
def CreateVocabularyTable():
return "CREATE TABLE IF NOT EXISTS vocabulary(wID INT PRIMARY KEY AUTO_INCREMENT,word VARCHAR(255))"
@staticmethod
def CreateDocTable():
return "CREATE TABLE IF NOT EXISTS documents(dID INT PRIMARY KEY AUTO_INCREMENT,file VARCHAR(500))"
@staticmethod
def InsertDocument():
return "INSERT INTO documents(file) VALUES (%s)"
@staticmethod
def InsertWord():
return "INSERT INTO vocabulary(word) VALUES (%s)"
@staticmethod
def InsertOccurance():
return "INSERT INTO occurances(wordID, docID, pageNum, positionInPage) VALUES (%s, %s, %s, %s)"
@staticmethod
def GetDocumentID():
return "SELECT dID FROM documents WHERE file = %s"
@staticmethod
def GetWordID():
return "SELECT wID FROM vocabulary WHERE word = %s"
class SqlDBManager:
__db = None
__dbCursor = None
__logger = SearchUtilityLogger.GetLoggerObj()
def __init__(self):
self.__db = sql.connect(
host="localhost",
user="root",
password="<PASSWORD>#"
)
if not self.__db is None:
self.__logger.info("DB Connection Status: Pass")
self.__dbCursor = self.__db.cursor(buffered=True)
self.__initializeDataSource()
else:
self.__logger.info("DB Connection Status: Fail")
def __initializeDataSource(self):
self.__ExecuteQuery(MySQLCommands.CreateDataBase())
self.__ExecuteQuery(MySQLCommands.UseDataBase())
self.__ExecuteQuery(MySQLCommands.CreateDocTable())
self.__ExecuteQuery(MySQLCommands.CreateVocabularyTable())
self.__ExecuteQuery(MySQLCommands.CreateOccurancesTable())
self.__logger.info("SearchUtility db initialized")
def __insertToDocTable(self, fullFilePath):
[docExists, docID] = self.__isDocExists(fullFilePath)
if not docExists:
self.__ExecuteQuery(MySQLCommands.InsertDocument(), (fullFilePath,))
docIDs = self.__ExecuteQuery(MySQLCommands.GetDocumentID(), (fullFilePath,))
docID = docIDs[0][0]
return docID
def __isDocExists(self, fullFilePath):
return self.__isExists(MySQLCommands.GetDocumentID(), fullFilePath)
def __insertToWordTable(self, word):
[wordExists, wordID] = self.__isWordExists(word)
if not wordExists:
self.__ExecuteQuery(MySQLCommands.InsertWord(), (word,))
wordIDs = self.__ExecuteQuery(MySQLCommands.GetWordID(), (word,))
wordID = wordIDs[0][0]
return wordID
def __isWordExists(self, word):
return self.__isExists(MySQLCommands.GetWordID(), word)
def __isExists(self, cmd, ID):
IDs = self.__ExecuteQuery(cmd, (ID,))
if len(IDs) is 0 or IDs is None:
return [False, -1]
return [True, IDs[0][0]]
def __insertToOccuranceTable(self, wordID, docID, pageNum, positionInPage):
self.__ExecuteQuery(MySQLCommands.InsertOccurance(), (wordID, docID, pageNum, positionInPage))
# The method will update document table and
# return new docID if new file name is passed
# or return the existing docID
def __fetchDocID(self, pdfFullFilePath):
return self.__insertToDocTable(pdfFullFilePath)
def __fetchWordID(self, word):
return self.__insertToWordTable(word)
def __ExecuteQuery(self, opr, params=None, debugMode=False):
response = None
for res in self.__dbCursor.execute(opr, params, multi=True):
if res.with_rows:
response = res.fetchall()
if debugMode:
self.__logger.debug("Rows produced by statement '{}':".format(res.statement))
self.__logger.debug(response)
else:
response = None
if debugMode:
self.__logger.debug("Number of rows affected by statement '{}': {}".format(res.statement, res.rowcount))
self.__db.commit()
return response
def UpdateListing(self, word, docID, pageNumber, positionInPage):
# Check if this word is in the vocabulary table and get its wordID
wordID = self.__fetchWordID(word)
return self.__insertToOccuranceTable(wordID, docID, pageNumber, positionInPage)
def GetDocumentID(self, pdfFullFilePath):
return self.__fetchDocID(pdfFullFilePath)
def SetServerUpdateState(self, isUpdating):
if isUpdating:
self.__ExecuteQuery(MySQLCommands.CreateServerStateTable())
else:
self.__ExecuteQuery(MySQLCommands.DropServerStateTable())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.