id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3502376 | #!/usr/bin/env python
import sys
import os
import shutil
import subprocess
import argparse
import logging
from pprint import pprint
import fileinput
from typing import Tuple
import dryable
from distutils.dir_util import copy_tree
# the make pdf command to fire.
CMD_MAKE_PDF = 'make pdf'
def get_target_dirs(read_from='list', list=[], path='.'):
"""
Get target directories from multiple sources like a manual
list, an input file or from the filesystem directories.
Parameters
----------
read_from : str, 'infile' or 'ls' or 'list' (default='list')
Reads target directories from a source.
Options:
- 'list', reads from a list of names given with `list`
attribute (default=[])
- 'infile', reads from a file name given with `path`
attribute (required).
- 'ls', reads from system directories given in `path`
attribute (default='.').
"""
if read_from == 'list':
return list
if read_from == 'file':
with open(path, 'r') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
if read_from == 'ls':
dirs = [f for f in os.listdir(path) if os.path.isdir(
f) and not f.startswith('.')]
return dirs
def ensure_path(path) -> str:
if not os.path.exists(path):
os.makedirs(path)
return path
@dryable.Dryable([True, "dry-run"])
def trigger_build(dir) -> Tuple[bool, str]:
logging.info(f'Building "{dir}".')
cmd = f'cd {dir}; {CMD_MAKE_PDF}'
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
o, e = proc.communicate(timeout=20 * 60)
e = e.decode('utf-8')
o = o.decode('utf-8')
retcode = proc.returncode
if e == '':
logging.info(f'Build of "{dir}" has done with return code {retcode}.')
logging.debug(f'Output: {o}')
return (True, o)
else:
logging.error(
f'Build of "{dir}" has failed with return code {retcode}.')
logging.debug(f'Output: {o}')
logging.debug(f'Error: {e}')
return (False, e)
@dryable.Dryable([True, "dry-run"])
def copy_dist(dir) -> Tuple[bool, str]:
dist_folder = '.dist'
ensure_path(dist_folder)
logging.info(f'Copying dist from "{dir}" to {dist_folder}.')
conventional_dirs = ["exercise", "solution"]
# Note: when *Weak Conventions* are not followed (see README.md) the scripts copies only
# pdf and cpp files. Instead, when `exercise` and/or `solution` folders are present
# the script copies only the two directories and their content.
# TODO: move weak conventions to strict conventions will simplify the code.
copy_list = []
copy_conventional_list = []
for root, dirs, files in os.walk(dir):
for name in dirs:
if name in conventional_dirs:
copy_conventional_list.append(os.path.join(root, name))
for name in files:
if name.endswith(".pdf") or name.endswith(".cpp"):
copy_list.append(os.path.join(root, name))
logging.debug(copy_list)
logging.debug(copy_conventional_list)
if len(copy_conventional_list) == 0:
for name in copy_list:
newdir = os.path.join(dist_folder, os.path.dirname(name))
newfile = os.path.join(dist_folder, name)
ensure_path(newdir)
shutil.copy(name, newfile)
else:
for name in copy_conventional_list:
copy_tree(name, os.path.join(dist_folder, name))
# TODO: pdf all in one directory with name of parent
logging.info(f'Copied {len(copy_list)} files.')
return (True, "")
def main(args, loglevel):
"""dirsdirs
Main function.
"""
logging.basicConfig(
format="%(levelname)s: %(message)s", level=loglevel)
if args.read_from is None or args.read_from == 'ls':
target_dirs = get_target_dirs('ls')
elif args.read_from == 'file':
target_dirs = get_target_dirs('file', path=args.path)
elif args.read_from == 'list':
target_dirs = get_target_dirs('list', list=args.list.split())
else:
target_dirs = []
print(f'Building directories.')
failed_no = 0
build_no = 0
for d in target_dirs:
build_no += 1
(build_ok, out) = trigger_build(d)
(copy_ok, _) = copy_dist(d)
if not build_ok:
failed_no += 1
# print(out)
# TODO handle build failures
print(
f'Build completed with {build_no-failed_no} success and {failed_no} failed, total {build_no}.')
return 0 if (failed_no == 0) else 1
# entry point
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""
Welcome to the building script for the high school assignments.
You can build assignments source with a single command.
(C) <NAME> <<EMAIL>>
""",
epilog="""
As an alternative to the commandline, params can be placed
in a file, one per line, and specified on the commandline
like '%(prog)s @params.conf'.
""",
fromfile_prefix_chars='@')
# parser.add_argument(
# "argument",
# help="pass ARG to the program",
# metavar="ARG")
parser.add_argument(
"--read-from",
help="""
Source from which reading the targets.
Options are `ls`, `list` and `file`.
""",
metavar="source")
parser.add_argument(
"--path",
help="The sources path. (Used with `--read-from` argument with `ls`, `file`).",
metavar="path")
parser.add_argument(
"--list",
help="The sources list. (Used with `--read-from list`).",
metavar="list")
parser.add_argument(
"--dry-run",
help="Do a trial run with actions performed.",
action="store_true")
parser.add_argument(
"-v",
"--verbose",
help="Increase output verbosity.",
action="store_true")
args = parser.parse_args()
dryable.set(args.dry_run)
# Setup logging
if args.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
main(args, loglevel)
| StarcoderdataPython |
1920131 | <gh_stars>0
"""DDD17+ create configs.
Author: <NAME>
Email : <EMAIL>
"""
from __future__ import print_function
import os
from os.path import join
import json
import spiker
# configure path
config_path = os.path.join(
spiker.HOME, "workspace", "ddd17-cvpr", "exps", "configs",
"ral-exps")
num_trails = 5
conditions = ["full", "aps", "dvs"]
channel_options = {
"full": 2,
"aps": 1,
"dvs": 0}
data_list = [
"rec1499656391_export.hdf5",
"rec1499657850_export.hdf5",
"rec1501649676_export.hdf5",
"rec1501650719_export.hdf5",
"rec1501994881_export.hdf5",
"rec1502336427_export.hdf5",
"rec1502337436_export.hdf5",
"rec1498946027_export.hdf5",
"rec1501651162_export.hdf5",
"rec1499025222_export.hdf5",
"rec1502338023_export.hdf5",
"rec1502338983_export.hdf5",
"rec1502339743_export.hdf5",
"rec1498949617_export.hdf5",
"rec1502599151_export.hdf5",
"rec1500220388_export.hdf5",
"rec1500383971_export.hdf5",
"rec1500402142_export.hdf5",
"rec1501288723_export.hdf5",
"rec1501349894_export.hdf5",
"rec1501614399_export.hdf5",
"rec1502241196_export.hdf5",
"rec1502825681_export.hdf5",
"rec1499023756_export.hdf5",
"rec1499275182_export.hdf5",
"rec1499533882_export.hdf5",
"rec1500215505_export.hdf5",
"rec1500314184_export.hdf5",
"rec1500329649_export.hdf5",
"rec1501953155_export.hdf5"]
frame_cuts = [
[2000, 4000],
[2600, 1200],
[500, 500],
[500, 500],
[200, 800],
[100, 400],
[100, 400],
[3000, 1000],
[850, 4500],
[200, 1500],
[200, 1500],
[200, 2500],
[200, 1500],
[1000, 2200],
[1500, 3000],
[500, 200],
[500, 1000],
[200, 2000],
[200, 1000],
[200, 1500],
[200, 1500],
[500, 1000],
[500, 1700],
[800, 2000],
[200, 1000],
[500, 800],
[200, 2200],
[500, 500],
[200, 600],
[500, 1500]]
experiment_id = 1
for trail_idx in range(1, num_trails+1):
# for night
for idx in range(1, 16):
# for each condition
for cond in conditions:
# construct file name
model_base = \
"steering-night-%d-%s-%d" % (idx, cond, trail_idx)
steering_dict = {}
steering_dict["model_name"] = model_base
steering_dict["data_name"] = data_list[idx-1]
steering_dict["channel_id"] = channel_options[cond]
steering_dict["stages"] = 3
steering_dict["blocks"] = 5
steering_dict["filter_list"] = \
[[16, 16, 16], [32, 32, 32], [64, 64, 64]]
steering_dict["nb_epoch"] = 200
steering_dict["batch_size"] = 64
steering_dict["frame_cut"] = frame_cuts[idx-1]
with open(join(config_path, model_base+".json"), "w") as f:
json.dump(steering_dict, f)
f.close()
print ("ral-experiment-%d:" % (experiment_id))
print (" KERAS_BACKEND=tensorflow PYTHONPATH=$(PYTHONPATH) "
"python ./exps/resnet_steering.py with "
"./exps/configs/ral-exps/"+model_base+".json\n")
experiment_id += 1
# for day
for idx in range(1, 16):
# for each condition
for cond in conditions:
# construct file name
model_base = \
"steering-day-%d-%s-%d" % (idx, cond, trail_idx)
steering_dict = {}
steering_dict["model_name"] = model_base
steering_dict["data_name"] = data_list[idx+14]
steering_dict["channel_id"] = channel_options[cond]
steering_dict["stages"] = 3
steering_dict["blocks"] = 5
steering_dict["filter_list"] = \
[[16, 16, 16], [32, 32, 32], [64, 64, 64]]
steering_dict["nb_epoch"] = 200
steering_dict["batch_size"] = 64
steering_dict["frame_cut"] = frame_cuts[idx+14]
with open(join(config_path, model_base+".json"), "w") as f:
json.dump(steering_dict, f)
f.close()
print ("ral-experiment-%d:" % (experiment_id))
print (" KERAS_BACKEND=tensorflow PYTHONPATH=$(PYTHONPATH) "
"python ./exps/resnet_steering.py with "
"./exps/configs/ral-exps/"+model_base+".json\n")
experiment_id += 1
| StarcoderdataPython |
3238088 | import unittest
from urllib2 import HTTPError
from flotilla.scheduler.coreos import CoreOsAmiIndex
from mock import MagicMock, patch
CHANNEL = 'stable'
VERSION = '835.9.0'
REGION = 'us-east-1'
@patch('urllib2.urlopen')
class TestCoreOsAmiIndex(unittest.TestCase):
def setUp(self):
self.coreos = CoreOsAmiIndex()
def test_get_ami(self, mock_open):
self._mock_amis(mock_open)
ami = self.coreos.get_ami(CHANNEL, VERSION, REGION)
self.assertEqual('ami-123456', ami)
def test_get_ami_not_found(self, mock_open):
mock_open.side_effect = HTTPError('url', 404, 'Not Found', {}, None)
ami = self.coreos.get_ami(CHANNEL, VERSION, REGION)
self.assertIsNone(ami)
def test_get_ami_cache(self, mock_open):
self._mock_amis(mock_open)
ami = self.coreos.get_ami(CHANNEL, VERSION, REGION)
self.assertEqual('ami-123456', ami)
ami = self.coreos.get_ami(CHANNEL, VERSION, REGION)
self.assertEqual('ami-123456', ami)
self.assertEquals(1, mock_open.call_count)
@staticmethod
def _mock_amis(mock_open):
ami_list = '{"amis":[{"name":"us-east-1","hvm":"ami-123456"}]}'
mock_response = MagicMock()
mock_response.read.return_value = ami_list
mock_open.return_value = mock_response
| StarcoderdataPython |
1770819 | # -*- coding: utf-8 -*-
# pylint: disable=unused-argument
"""Functional tests."""
from fm_database.settings import DevConfig, ProdConfig, TestConfig, get_config
def test_retrieving_settings():
"""Test that the correct settings are retrieved."""
test_config = get_config(override_default="test")
dev_config = get_config(override_default="dev")
prod_config = get_config(override_default="prod")
wrong_config = get_config(override_default="wrong")
assert test_config == TestConfig
assert dev_config == DevConfig
assert prod_config == ProdConfig
assert wrong_config == DevConfig
| StarcoderdataPython |
6549777 | <reponame>kaushikcfd/numloopy<filename>test/test_essentials.py
import sys
import numpy
import pyopencl as cl
import numloopy as nplp
from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa: F401
try:
import faulthandler
except ImportError:
pass
else:
faulthandler.enable()
from pyopencl.tools import pytest_generate_tests_for_pyopencl \
as pytest_generate_tests
__all__ = [
"pytest_generate_tests",
"cl" # 'cl.create_some_context'
]
def test_reshape(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
def func_nplp():
np = nplp.begin_computation_stack()
a = np.arange(8)
b = a.reshape((2, 4), order='F')
knl = np.end_computation_stack([a, b])
evt, (out_a, out_b) = knl(queue)
return out_a.get(), out_b.get()
def func_np():
import numpy as np
a = np.arange(8)
b = a.reshape((2, 4), order='F')
return a, b
nplp_a, nplp_b = func_np()
np_a, np_b = func_np()
assert numpy.allclose(np_a, nplp_a)
assert numpy.allclose(np_b, nplp_b)
def test_broadcast(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
def func_nplp():
np = nplp.begin_computation_stack()
A = np.arange(9).reshape((3, 3)) # noqa: N806
x = np.arange(3)
y = np.sum(A * x, axis=1)
knl = np.end_computation_stack([A, x, y])
evt, (out_A, out_x, out_y) = knl(queue)
return out_A.get(), out_x.get(), out_y.get()
def func_np():
import numpy as np
A = np.arange(9).reshape((3, 3)) # noqa: N806
x = np.arange(3)
y = np.sum(A * x, axis=1)
return A, x, y
nplp_A, nplp_x, nplp_y = func_np()
np_A, np_x, np_y = func_np()
assert numpy.allclose(np_A, nplp_A)
assert numpy.allclose(np_x, nplp_x)
assert numpy.allclose(np_y, nplp_y)
if __name__ == "__main__":
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
| StarcoderdataPython |
1938600 | from hashtable.hashtable import Hashtable
def repeat_word(long_string):
start=0
word=''
words_ht = Hashtable()
long_string = long_string.lower()
words= long_string.split(' ')
for word in words:
if words_ht.contains(word):
return word
else:
word_hash = words_ht.hash(word)
words_ht.add(word,word_hash)
return 'No matching Words'
| StarcoderdataPython |
1835225 | <filename>divia_req.py
from divia_api import DiviaAPI
from datetime import date, datetime
from browser import window
api = DiviaAPI()
def bridge(ligne, sens, arret):
totem_result = api.find_stop(ligne, arret, sens).totem()
now = datetime.now()
processed_result = []
for t in totem_result:
processed_result.append(str(round((t - now).total_seconds() / 60)))
return processed_result
window.diviaApiBridge = bridge
| StarcoderdataPython |
4853346 | import logging
import subprocess
import tempfile
import typing
import const
import models
import schemas
import os
import re
import util
_LOGGER = logging.getLogger(__name__)
class WGAlreadyStartedError(Exception):
pass
class WGAlreadyStoppedError(Exception):
pass
class WGPermissionsError(Exception):
pass
class TempServerFile():
def __init__(self, server: schemas.WGServer):
self.server = server
self.td = tempfile.TemporaryDirectory(prefix="wg_man_")
self.server_file = os.path.join(self.td.name, f"{server.interface}.conf")
def __enter__(self):
with open(self.server_file, "w+") as f:
f.write(self.server.configuration)
return self.server_file
def __exit__(self, type, value, traceback):
self.td.cleanup()
def _run_wg(server: schemas.WGServer, command):
try:
output = subprocess.check_output(const.CMD_WG_COMMAND + command, stderr=subprocess.STDOUT)
return output
except Exception as e:
if b'Operation not permitted' in e.output:
raise WGPermissionsError("The user has insufficientt permissions for interface %s" % server.interface)
def is_installed():
output = subprocess.check_output(const.CMD_WG_COMMAND)
return output == b'' or b'interface' in output
def generate_keys() -> typing.Dict[str, str]:
private_key = subprocess.check_output(const.CMD_WG_COMMAND + ["genkey"])
public_key = subprocess.check_output(
const.CMD_WG_COMMAND + ["pubkey"],
input=private_key
)
private_key = private_key.decode("utf-8").strip()
public_key = public_key.decode("utf-8").strip()
return dict(
private_key=private_key,
public_key=public_key
)
def generate_psk():
return subprocess.check_output(const.CMD_WG_COMMAND + ["genpsk"]).decode("utf-8").strip()
def start_interface(server: schemas.WGServer):
with TempServerFile(server) as server_file:
try:
#print(*const.CMD_WG_QUICK, "up", server_file)
output = subprocess.check_output(const.CMD_WG_QUICK + ["up", server_file], stderr=subprocess.STDOUT)
return output
except Exception as e:
if b'already exists' in e.output:
raise WGAlreadyStartedError("The wireguard device %s is already started." % server.interface)
def stop_interface(server: schemas.WGServer):
with TempServerFile(server) as server_file:
try:
output = subprocess.check_output(const.CMD_WG_QUICK + ["down", server_file], stderr=subprocess.STDOUT)
return output
except Exception as e:
if b'is not a WireGuard interface' in e.output:
raise WGAlreadyStoppedError("The wireguard device %s is already stopped." % server.interface)
def restart_interface(server: schemas.WGServer):
try:
stop_interface(server)
except WGAlreadyStoppedError:
pass
start_interface(server)
def is_running(server: schemas.WGServer):
try:
output = _run_wg(server, ["show", server.interface])
if output is None or b'Unable to access interface: No such device' in output:
return False
except Exception as e:
if b'No such device' in e.output:
return False
return True
def add_peer(server: schemas.WGServer, peer: schemas.WGPeer):
try:
output = _run_wg(server, ["set", server.interface, "peer", peer.public_key, "allowed-ips", peer.address])
return output == b''
except Exception as e:
_LOGGER.exception(e)
return False
def remove_peer(server: schemas.WGServer, peer: schemas.WGPeer):
try:
output = _run_wg(server, ["set", server.interface, "peer", peer.public_key, "remove"])
return output == b''
except Exception as e:
_LOGGER.exception(e)
return False
def get_stats(server: schemas.WGServer):
try:
output = _run_wg(server, ["show", server.interface])
if not output:
return []
regex = r"peer:.*?^\n"
test_str = output.decode("utf-8") + "\n"
peers = []
peers_raw = re.findall(regex, test_str, re.MULTILINE | re.DOTALL)
for peer in peers_raw:
peer = peer.strip()
lines = [x.split(": ")[1] for x in peer.split("\n")]
if len(lines) == 2:
public_key, allowed_ips = lines
peers.append(dict(
public_key=public_key,
client_endpoint=None,
allowed_ips=allowed_ips,
handshake=None,
rx=None,
tx=None
))
elif len(lines) == 5 or len(lines) == 6:
public_key = lines[0]
client_endpoint, allowed_ips, handshake, rx_tx = lines[-4:] # [1] is sometimes psk
rx = re.match(r"^(.*) received", rx_tx).group(1)
tx = re.match(r"^.*, (.*)sent", rx_tx).group(1)
peers.append(dict(
public_key=public_key,
client_endpoint=client_endpoint,
allowed_ips=allowed_ips,
handshake=handshake,
rx=rx,
tx=tx
))
else:
ValueError("We have not handled peers with line number of %s" % str(len(lines)))
return peers
except Exception as e:
_LOGGER.exception(e)
return []
def move_server_dir(interface, interface1):
old_server_dir = const.SERVER_DIR(interface)
old_server_file = const.SERVER_FILE(interface)
new_server_dir = const.SERVER_DIR(interface1)
new_server_file = old_server_file.replace(f"{interface}.conf", f"{interface1}.conf")
os.rename(old_server_file, new_server_file)
os.rename(old_server_dir, new_server_dir)
def generate_config(obj: typing.Union[typing.Dict[schemas.WGPeer, schemas.WGServer], schemas.WGServer]):
if isinstance(obj, dict) and "server" in obj and "peer" in obj:
template = "peer.j2"
elif isinstance(obj, schemas.WGServer) or isinstance(obj, models.WGServer):
template = "server.j2"
else:
raise ValueError("Incorrect input type. Should be WGPeer or WGServer")
result = util.jinja_env.get_template(template).render(
data=obj
)
return result
| StarcoderdataPython |
112696 | <reponame>simonwoerpel/runpandarun
import os
from ..util import get_files
from .backend import Backend
def ensure_directory(fp):
fp = os.path.abspath(fp)
if not os.path.isdir(fp):
os.makedirs(fp)
return fp
class FilesystemBackend(Backend):
def get_base_path(self):
return ensure_directory(self.config.data_root)
def exists(self, path):
p = self.get_path(path)
return any((os.path.isfile(p), os.path.isdir(p)))
def store(self, path, content, publish=False):
path = self.get_path(path)
ensure_directory(os.path.split(path)[0])
with open(path, 'w') as f:
f.write(content)
return path
def _fetch(self, path):
path = self.get_path(path)
with open(path) as f:
content = f.read().strip()
return content
def get_children(self, path, condition=lambda x: True):
path = self.get_path(path)
return get_files(path, condition)
| StarcoderdataPython |
4926947 |
import asyncio
import itertools
from aiostream import stream
async def number_generator(name, sleepytime):
numbers_returned = 0
while numbers_returned < 10:
numbers_returned += 1
await asyncio.sleep(sleepytime)
yield (name, numbers_returned)
async def main():
generator1 = number_generator("foo", 1)
generator2 = number_generator("bar", 2)
async for data in stream.merge(generator1, generator2):
name, num = data
print(f"{name} number {num}")
if __name__ == '__main__':
asyncio.run(main()) | StarcoderdataPython |
5062041 | <filename>src/tom/_mailbox/imapclient.py
import sys
import socket
import select
import imapclient as _imapclient
from imapclient import *
class IMAPClient(_imapclient.IMAPClient):
"""
This class provdes some extensions to `imapclient.IMAPClient`
"""
@imapclient.require_capability('IDLE')
def idle_check(self, timeout: float = None, selfpipe: int = None):
"""
This extends imapclient.IMAPClient.idle_check with elegant exiting mechanism via selfpipe
This will block until one of the following become true
1. an IDLE response is received
2. `timeout` seconds elapsed
3. the file descriptor `selfpipe` becomes ready to read
:param timeout: operation timeout
:param selfpipe: a file descriptor
:return: None if `selfpipe` is ready, otherwise same as `imapclient.IMAPClient.idle_check`
"""
sock = self._sock
# make the socket non-blocking so the timeout can be
# implemented for this call
sock.settimeout(None)
sock.setblocking(0)
try:
resps = []
rlist = [sock]
if selfpipe is not None:
rlist.append(selfpipe)
rs, _, _ = select.select(rlist, [], [], timeout)
if rs:
if selfpipe in rs:
return
while True:
try:
line = self._imap._get_line()
except (socket.timeout, socket.error):
break
except IMAPClient.AbortError:
# An imaplib.IMAP4.abort with "EOF" is raised
# under Python 3
err = sys.exc_info()[1]
if 'EOF' in err.args[0]:
break
else:
raise
else:
resps.append(imapclient._parse_untagged_response(line))
return resps
finally:
sock.setblocking(1)
self._set_read_timeout()
| StarcoderdataPython |
6643666 | """Recommend API.
"""
from copy import deepcopy
import numpy as np
from catalog import SimpleCatalog, Track
from cf.cf_hidden_feature import get_hidden_feature_matrix_GD
import itertools
from API.user import train_user_taste_model
# TODO: implement classes: UserTasteModel.
class RecommendationEngine(object):
def __init__(self, catalog=None):
if catalog is None:
import data
catalog = SimpleCatalog(data.get_example_tracks())
self.__catalog = catalog
self._hidden_feature = {}
@property
def catalog(self):
return self.__catalog
@property
def hidden_feature(self):
return self._hidden_feature
def train(self, ratings):
k = 5
learn_rate = 0.001
lambda_rate = 0.04
max_iter = 10000
GD_method = 1
user_weight, hidden_feature, res_norm, user_index, song_index = (
get_hidden_feature_matrix_GD(
ratings, k, learn_rate, lambda_rate, max_iter, GD_method)
)
self._user_weight = dict([(user_id, user_weight[idx])
for user_id, idx in user_index.iteritems()])
computed_hidden_feature = dict([(track_id, hidden_feature[idx])
for track_id, idx in song_index.iteritems()])
self._hidden_feature.update(computed_hidden_feature)
self._res_norm = res_norm
self._ratings = ratings
self._user_models = {}
# Make sure catalog includes tracks in `computed_hidden_feature`.
for track_id in computed_hidden_feature:
if not self.__catalog.get_track_by_id(track_id):
self.__catalog[track_id] = Track(track_data={"id": track_id})
def train_partial(self, ratings):
"""The incremental training of models.
"""
raise NotImplementedError
def get_user_model(self, user_id):
return self._user_models.get(user_id)
def train_user_taste_model(self, ratings):
X = []
y = []
for track_id, rating in ratings.iteritems():
track_hidden_features = self.get_track_hidden_features(track_id)
if track_hidden_features is None:
continue
X.append(track_hidden_features)
y.append(int(rating))
user_model = train_user_taste_model(np.array(X), np.array(y))
return user_model
def update_user_model(self, user_id):
ratings = self._ratings.get(user_id)
assert ratings is not None, 'No ratings found for user %s' % user_id
user_model = self.train_user_taste_model(ratings)
self._user_models[user_id] = user_model
return user_model
def recommend_by_user_model(self, user_model, num=10):
"""Recommend n tracks from catalog based on user's taste model.
:param catalog: a Catalog object.
:param user_id: a string.
:param num: the number of tracks to recommend.
:return recommended_tracks: a list of Track objects.
"""
pred_ratings = self.predict_all_ratings(user_model)
sampled_track_ids = _sample_tracks_from_ratings(pred_ratings, num, None)
return [self.__catalog[i] for i in sampled_track_ids]
def recommend_by_tracks(self, seed_track_ids, num):
raise NotImplementedError
def recommend(self, user_id=None, seed_track_ids=None, num=10):
if user_id is None and seed_track_ids is None:
raise ValueError('Please specify either user_id or seed_track_ids!')
if user_id is not None:
user_model = self.get_user_model(user_id)
if user_model is None:
user_model = self.update_user_model(user_id)
return self.recommend_by_user_model(user_model, num)
else:
return self.recommend_by_tracks(seed_track_ids, num)
def get_track_hidden_features(self, track_id):
assert hasattr(self, '_hidden_feature'), 'No hidden features found for track %s.' % track_id
return self._hidden_feature.get(track_id)
def get_user_ids(self, offset=0, limit=10):
assert hasattr(self, '_ratings'), 'No ratings data provided.'
return list(
itertools.islice(self._ratings.iterkeys(), offset, offset+limit)
)
def get_track(self, track_id):
return deepcopy(self.__catalog[track_id])
def get_tracks(self, offset=0, limit=10):
assert self.__catalog is not None
track_ids = self.__catalog.get_track_ids(offset=offset, limit=limit)
return [self.__catalog[i] for i in track_ids]
def get_ratings_by_user(self, user_id):
assert hasattr(self, '_ratings'), 'No ratings data provided.'
return self._ratings.get(user_id)
def predict_rating(self, user_taste_model, track):
"""
:return rating: an integer from 1 to 5.
"""
fea = self.get_track_hidden_features(track.id)
return user_taste_model.predict(fea.reshape(1, -1))
def predict_all_ratings(self, user_taste_model):
"""
:return ratings: a dict mapping track_id to predicted rating.
"""
ratings = dict(
(track.id, self.predict_rating(user_taste_model, track))
for track in self.__catalog.tracks()
)
return ratings
def update_hidden_feature(self, input_hidden_feature):
self._hidden_feature.update(input_hidden_feature)
def __rating_to_prob(rating):
"""Transform a rating of 1 to 5 to a non-negatie number proportational to
its probability of being sampled.
"""
# Exponential scale: one step higher in rating results in twice as much as
# likely to be sampled.
return float(2 ** rating)
def _sample_tracks_from_ratings(ratings, n, options):
"""
:return track_ids: a list of string ids for tracks.
"""
# TODO: allow filtering out certain track ids specified in `options`.
track_ids_and_ratings = ratings.items()
raw_rating_numbers = [x[1] for x in track_ids_and_ratings]
probs = np.array(map(__rating_to_prob, raw_rating_numbers))
probs = probs / max(probs.sum(), 1.)
options = options or {}
random_seed = options.get('random_seed')
if random_seed is not None:
np.random.seed(seed=random_seed)
return np.random.choice([x[0] for x in track_ids_and_ratings],
size=min(n, len(probs)), p=probs, replace=False)
def recommend_by_user_model(user_model, hidden_feature, num_tracks):
catalog = SimpleCatalog([{'id': track_id} for track_id in hidden_feature])
engine = RecommendationEngine(catalog=catalog)
engine.update_hidden_feature(hidden_feature)
tracks = engine.recommend_by_user_model(user_model, num=num_tracks)
return tracks or []
def train_user_model(ratings_one_user, hidden_feature):
catalog = SimpleCatalog([{'id': track_id} for track_id in hidden_feature])
engine = RecommendationEngine(catalog=catalog)
engine.update_hidden_feature(hidden_feature)
user_model = engine.train_user_taste_model(ratings_one_user)
return user_model
def train_cf(all_ratings):
engine = RecommendationEngine(catalog=None)
engine.train(all_ratings)
return engine.hidden_feature
| StarcoderdataPython |
3476127 | <reponame>EHakansson/nvdb2osm<filename>waydb.py
import logging
from functools import cmp_to_key
from twodimsearch import TwoDimSearch
from geometry_search import GeometrySearch
from geometry_basics import *
from merge_tags import merge_tags
from proj_xy import latlon_str
from shapely_utils import *
from nseg_tools import *
from osmxml import *
GEO_FILL_LENGTH = 50
_log = logging.getLogger("waydb")
# join_ways()
#
# Join ways that have matching endpoints. We also update the STARTAVST/SLUTAVST and
# SHAPE_LEN NVDB tags for debugging and gap resolving purposes
#
def join_ways(ways):
if len(ways) == 1:
return ways
# Due to snapping some very short segments can be zero length, sort to make sure
# we go through in order, otherwise we can miss to include the zero length segments
def startavst(w):
return w.tags["STARTAVST"]
ways.sort(key=startavst)
w1 = ways.pop(0)
ways_out = [ w1 ]
if w1.way[0] == w1.way[-1]:
# only okay if all are the same
for p in w1.way[1:]:
if p != w1.way[0]:
_log.error(f"w1 {w1}")
_log.error(f"w1.way {w1.way}")
raise RuntimeError("Closed way not expected here")
while len(ways) > 0:
match = False
for idx, w2 in enumerate(ways):
if w1.way[-1] == w2.way[0]:
w1.way += w2.way[1:]
w1.tags["SLUTAVST"] = w2.tags["SLUTAVST"]
w1.tags["SHAPE_LEN"] += w2.tags["SHAPE_LEN"]
match = True
del ways[idx]
break
if w1.way[0] == w2.way[-1]:
w1.way = w2.way[:-1] + w1.way
w1.tags["STARTAVST"] = w2.tags["STARTAVST"]
w1.tags["SHAPE_LEN"] += w2.tags["SHAPE_LEN"]
match = True
del ways[idx]
break
if not match:
# In some cases RLID is split in disconnected ways
w1 = ways.pop(0)
ways_out.append(w1)
return ways_out
# remove_short_segments_and_redundant_points()
#
# Remove segments shorter than 'min_seg_len' unless the point is endpoint or in the
# 'point_keepers' set. If the whole way is shorter than minimum segment it's reduced
# to one point
#
def remove_short_segments_and_redundant_points(way1, min_seg_len, point_keepers=None):
way = way1.way
dsq = min_seg_len * min_seg_len
new_way = []
if point_keepers is None:
point_keepers = set()
for idx, p in enumerate(way):
if idx == 0:
new_way.append(p)
may_remove = False
elif p == new_way[-1]:
pass # duplicate
elif p in point_keepers or idx == len(way) - 1:
if dist2dsq(new_way[-1], p) < dsq:
if not may_remove:
if new_way[-1] == way[0] and p == way[-1]:
# special case with two closely spaced points
if not way[-1] in point_keepers:
return [ way[0] ]
if not way[0] in point_keepers:
return [ way[-1] ]
_log.error(f"RLID {way1.rlid} {way1.tags}")
_log.error(f"way {way}")
_log.error(f"new_way {new_way}")
raise RuntimeError("Keeper points closer than minimum segment length %s < %s" % (dist2d(new_way[-1], p), min_seg_len))
del new_way[-1]
new_way.append(p)
may_remove = False
elif dist2dsq(new_way[-1], p) >= dsq:
new_way.append(p)
may_remove = True
if len(new_way) == len(way):
return way
return new_way
def copy_way(way):
new_way = []
for p in way:
new_way.append(Point(p.x, p.y))
return new_way
def join_ways_using_nvdb_tags(ways, snap_dist):
if len(ways) == 1:
return ways
def compare_avst(w1, w2):
res = w1.tags["STARTAVST"] - w2.tags["STARTAVST"]
if res == 0:
res = w1.tags["SLUTAVST"] - w2.tags["SLUTAVST"]
if res < 0:
return -1
if res > 0:
return 1
return 0
ways.sort(key=cmp_to_key(compare_avst))
new_ways = []
it = iter(ways)
pw = next(it)
for w in it:
if pw.tags["SLUTAVST"] < w.tags["STARTAVST"]:
# there's a gap between 'pw' and 'w', keep both
new_ways.append(pw)
elif pw.tags["SLUTAVST"] == w.tags["STARTAVST"]:
# perfect join, extend 'pw' with 'w'
w.tags["STARTAVST"] = pw.tags["STARTAVST"]
w.tags["SHAPE_LEN"] += pw.tags["SHAPE_LEN"]
w.way = pw.way + w.way # we probably add a double point here, removed elsewhere
elif pw.tags["SLUTAVST"] >= w.tags["SLUTAVST"]:
# 'pw' spans the whole of 'w', discard 'w'
w = pw
else:
# partial overlap
# figure out how much of 'pw' that is before 'w' starts, and store to 'nw'
gs = GeometrySearch(10)
gs.insert(w)
nw = []
for p in pw.way:
_, p1, _ = gs.snap_point_into_geometry(p, snap_dist, snap_dist)
if p1 is None:
nw.append(p)
else:
break
w.tags["STARTAVST"] = pw.tags["STARTAVST"]
w.way = nw + w.way
length, _ = calc_way_length(w.way)
w.tags["SHAPE_LEN"] = length
pw = w
new_ways += [ pw ]
return new_ways
# join_DKReflinjetillkomst_gaps()
#
# With the data source NVDB_DKReflinjetillkomst some gaps in RLID segment that should be a
# single segment has been observed (rare). This function tries to identify that and join the ways
# if needed.
#
# Note that even if segments are joined incorrectly, if the actual data layers doesn't contain any
# data in the joined segment it will not turn up in the map, so better join one too many than one
# too few.
#
def join_DKReflinjetillkomst_gaps(ways):
if len(ways) == 1:
return ways
def startavst(w):
return w.tags["STARTAVST"]
ways.sort(key=startavst)
i = 0
while i + 1 < len(ways):
dist = dist2d(ways[i+0].way[-1], ways[i+1].way[0])
# previous max gap was 1.0, but 2.4 meter gap was observed in Falun dataset,
# and then 3.4 meters in Kil dataset, and 8.6 meters in Piteå.
#
# Any false joins are not too bad as it won't join the data layers on top.
if dist < 10:
shapelen_sum = ways[i+0].tags["SHAPE_LEN"] + ways[i+1].tags["SHAPE_LEN"]
avst_diff = ways[i+1].tags["STARTAVST"] - ways[i+0].tags["SLUTAVST"]
shape_dist = shapelen_sum * avst_diff
_log.warning(f"a suspiciously small gap was found in RLID {ways[i].rlid} ({dist}m), segments where joined (total SHAPE_LEN {shapelen_sum}m, AVST diff {avst_diff}m => TAG gap {shape_dist}m)")
ways[i+1].way = ways[i].way + ways[i+1].way
del ways[i]
else:
i += 1
return ways
# remove_DKReflinjetillkomst_overlaps
#
# In rare occassions overlaps have been observed in the NVDB ref layer. This function cleans it up.
#
def remove_DKReflinjetillkomst_overlaps(ways, snap_dist):
if len(ways) == 1:
return ways
# remove segments that have been snapped to zero length
filtered_ways = []
for w in ways:
if w.way[0] != w.way[-1]:
filtered_ways.append(w)
else:
all_same = True
for p in w.way[1:]:
if p != w.way[0]:
all_same = False
break
if not all_same:
filtered_ways.append(w)
ways = filtered_ways
if len(ways) == 1:
return ways
def startavst(w):
return w.tags["STARTAVST"]
ways.sort(key=startavst)
has_overlap = False
prev_way = ways[0]
for way in ways[1:]:
if prev_way.tags["SLUTAVST"] > way.tags["STARTAVST"]:
_log.warning(f"overlapping segment for RLID {way.rlid} ({prev_way.tags['SLUTAVST']} > {way.tags['STARTAVST']}).")
has_overlap = True
break
prev_way = way
if has_overlap:
return join_ways_using_nvdb_tags(ways, snap_dist)
return ways
# print_progress()
#
# print progress 10%...20%... etc
#
def print_progress(last_print, idx, data_len, progress_text="work"):
if data_len <= 1:
return last_print
progress = int(100 * idx / (data_len-1))
if progress % 10 == 0 and progress != last_print:
last_print = progress
_log.info(f"{progress_text}: {progress}%")
return last_print
def test_self_connections(way):
# this will not trigger a warning for connected endpoints (closed loop), as that is normal
points = set()
for i, p in enumerate(way.way):
if p in points and (i != len(way.way) - 1 or p != way.way[0]):
_log.warning(f"RLID {way.rlid} connects to itself at {latlon_str(p)}")
points.add(p)
def extend_and_snap(way, is_start, snappoints, snappoints_extra, snap_dist, ext_dist):
# get last line segment bast->tip and ignore duplicated points
base = None
if is_start:
tip = way[0]
for p in way[1:]:
if p != tip:
base = p
break
else:
tip = way[-1]
for p in reversed(way[:-1]):
if p != tip:
base = p
break
if base is None:
return None, None, None
# get list of all possible snapping points
sp_list = snappoints.find_all_within_list(tip, ext_dist)
sp_list += snappoints_extra.find_all_within_list(tip, ext_dist)
if len(sp_list) == 0:
return None, None, None
assert base != tip
dist = dist2d(base, tip)
x_delta = (tip.x - base.x) / dist
y_delta = (tip.y - base.y) / dist
min_dist = snap_dist
min_ext_dist = ext_dist
result = None
# go through all possible snapping points
for sp in sp_list:
p = sp[0]
if p == tip:
# self, skip
continue
tip_to_p = dist2d(tip, p)
if tip_to_p >= ext_dist:
# distance to candidate longer than max snap distance, skip
continue
ext_point = Point(tip.x + x_delta * tip_to_p, tip.y + y_delta * tip_to_p)
ext_point_to_p = dist2d(ext_point, p)
if ext_point_to_p > tip_to_p:
# if this happens the candidate is behind the tip, skip
continue
dist = dist2d(p, ext_point)
if dist >= min_dist:
continue
# calculate deviation angle from linear extension
s1 = dist/2
s2 = math.sqrt(dist2dsq(base, p) - s1 * s1)
dev_angle = math.atan(s1/s2) * 180 / math.pi
if dev_angle >= 1.0:
# snapping to candidate causes too large change in angle
continue
min_dist = dist
min_ext_dist = tip_to_p
result = p
return min_ext_dist, min_dist, result
# WayDatabase
#
# Memory database used to store and merge NVDB geometry.
# The code is somewhat generic, but does expect that the input data has some specific
# properties which NVDB data has.
#
class WayDatabase:
POINT_SNAP_DISTANCE = 0.1
MAX_SNAP_DISTANCE = 2
EMERGENCY_SNAP_DISTANCE = 7
def __init__(self, reference_geometry, perform_self_testing=True):
_log.info("Setting up way database and cleaning reference geometry...")
self.way_db = {}
self.point_db = {}
self.gs = None
self._ref_gs = GeometrySearch(GEO_FILL_LENGTH, use_dist=True, perform_self_testing=perform_self_testing)
self._ref_way_db = {}
self._way_db_iter = None
self._way_db_sub_iter = None
self._perform_self_testing = perform_self_testing
# Expected properties of 'reference_geometry':
#
# - All points in a ways that should be connected are close (within SNAP_POINT_DISTANCE)
# from a point in the connecting way, which is an endpoint
# - No overlaps or duplicates
# - There may be very short segments (cleaned up)
# - Segments with the same RLID may sometimes be disconnected
# - There may be closed ways (start and end point the same), but no self-crossing ways
#
# After processing:
# - Any points closer than SNAP_POINT_DISTANCE have been merged to a single point
# - Segments with same RLID has been connected to as long segments as possible
# - Store in a GeometrySearch object with distance on each point
#
# group segments per RLID, and register all endpoints and midpoints (=non-endpoints) for searching
rlid_ways = {}
endpoints = TwoDimSearch()
point_count = 0
last_print = 0
_log.info("Setting up endpoint 2D search data structures...")
for idx, way in enumerate(reference_geometry):
last_print = print_progress(last_print, idx, len(reference_geometry),
progress_text="endpoint 2D search data structures")
for ep in (way.way[0], way.way[-1]):
endpoints.insert(ep, way)
point_count += len(way.way)
if way.rlid in rlid_ways:
rlid_ways[way.rlid].append(way)
else:
rlid_ways[way.rlid] = [way]
_log.info(f"({len(endpoints)} endpoints of {point_count} total points)")
_log.info("Snap points to nearby endpoints.")
# Due to snapping we may introduce duplicate points in the ways, which is ok as we
# remove them later.
ep_count, mp_count = self._snap_points_to_nearby_endpoints(rlid_ways, endpoints)
_log.info(f"done (snapped {ep_count} endpoints and {mp_count} midpoints)")
# In rare cases DKReflinjetillkomst has lines cut short, we try to connect those
_log.info("Snap still unconnected endpoints to nearby points by extension...")
ep_count = 0
uc_count = 0
second_pass = []
midpoints = TwoDimSearch()
for ways in rlid_ways.values():
for way in ways:
for mp in way.way[1:-1]:
midpoints.insert(mp, way)
for ways in rlid_ways.values():
for way in ways:
if way.way[0] == way.way[-1]:
continue # makes no sense to extend-snap closed loops
for way_idx in [ 0, -1 ]:
if len(endpoints[way.way[way_idx]]) > 1:
# already connected
continue
uc_count += 1
min_ext_dist, min_dev_dist, p = extend_and_snap(way.way, way_idx == 0, endpoints, midpoints, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is None:
continue
if min_dev_dist > 1e-5:
# keep very tight limit on first pass so we extend in the right order
second_pass.append(way)
continue
_log.info(f"extend snap ext:{min_ext_dist:g} dev:{min_dev_dist:g} for RLID {way.rlid}"
f" at {latlon_str(p)}")
endpoints.remove(way.way[way_idx], way) # must be removed before midpoint test below so we don't snap to ourself
if p in midpoints:
# for midpoints it may be better to snap to an endpoint instead
p = self._snap_to_nearby_point(p, endpoints, self.MAX_SNAP_DISTANCE)
endpoints.insert(p, way)
way.way[way_idx] = Point(p.x, p.y)
ep_count += 1
for way in second_pass:
for way_idx in [ 0, -1 ]:
if len(endpoints[way.way[way_idx]]) > 1:
continue
min_ext_dist, min_dev_dist, p = extend_and_snap(way.way, way_idx == 0, endpoints, midpoints, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is None:
continue
_log.info(f"extend snap ext:{min_ext_dist:g} dev:{min_dev_dist:g} for RLID {way.rlid} at {latlon_str(p)}")
endpoints.remove(way.way[way_idx], way)
if p in midpoints:
p = self._snap_to_nearby_point(p, endpoints, self.MAX_SNAP_DISTANCE)
endpoints.insert(p, way)
way.way[way_idx] = Point(p.x, p.y)
ep_count += 1
_log.info(f"done (snapped {ep_count} endpoints, {uc_count - ep_count} still unconnected)")
if ep_count > 0:
_log.warning("extend snaps typically means that there are gaps in the data source's geometry")
if self._perform_self_testing:
for ways in rlid_ways.values():
for way in ways:
for ep in [ way.way[0], way.way[-1] ]:
dist, new_point, _ = endpoints.find_nearest_within(ep, self.POINT_SNAP_DISTANCE, exclude_self=True)
if new_point is not None:
_log.error(f"endpoints placed too closely together: {dist}, {ep}, {new_point}")
raise RuntimeError("endpoints placed too closely together")
_log.info("Join segments with same RLID and insert to search data structure...")
self._insert_into_reference_geometry(rlid_ways, endpoints)
_log.info("done")
def __iter__(self):
self._way_db_iter = iter(self.way_db.values())
self._way_db_sub_iter = None
return self
def __next__(self):
if self._way_db_sub_iter is None:
ways = next(self._way_db_iter) # when StopIteration is raised iteration is complete
self._way_db_sub_iter = iter(ways)
try:
way = next(self._way_db_sub_iter)
return way
except StopIteration:
self._way_db_sub_iter = None
return self.__next__()
@staticmethod
def _snap_to_nearby_point(p, snappoints, snap_distance):
_, snap, _ = snappoints.find_nearest_within(p, snap_distance, exclude_self=True)
if snap is None:
return p
return snap
def _snap_points_to_nearby_endpoints(self, rlid_ways, endpoints):
ep_count = 0
mp_count = 0
midpoints = []
prev_count = -1
pass_count = 0
while pass_count < 2 or prev_count != ep_count + mp_count:
snapped_points = set()
prev_count = ep_count + mp_count
if pass_count == 0:
# snap really close one first to make sure we don't make a unnecessarily long snap
snap_distance = 0.001
else:
snap_distance = self.POINT_SNAP_DISTANCE
for ways in rlid_ways.values():
for way in ways:
for way_idx in range(0, len(way.way)):
is_midpoint = way_idx not in (0, len(way.way) - 1)
ep_list = endpoints.find_all_within_list(way.way[way_idx], snap_distance)
if len(ep_list) == 0:
# the own endpoint is expected to exist in endpoints set
assert is_midpoint
continue
if len(ep_list) == 1 and not is_midpoint:
assert ep_list[0][0] == way.way[way_idx]
continue
new_point = ep_list[0][0]
if pass_count == 0:
# first pass we can pick any point due to short snap distance
snapped_points.add(new_point)
else:
# prefer to snap to a point already snapped
for ep in ep_list:
if ep[0] in snapped_points:
new_point = ep[0]
break
# move all the nearby endpoints to the point we have chosen for snapping (new_point)
for ep in ep_list:
old_point = ep[0]
if old_point == new_point:
continue
ep_set = ep[1]
endpoints.remove_set(old_point)
for w in ep_set:
if w.way[0] == old_point:
w.way[0] = Point(new_point.x, new_point.y)
if w.way[-1] == old_point:
w.way[-1] = Point(new_point.x, new_point.y)
assert new_point in (w.way[0], w.way[-1])
ep_count += 1
endpoints.insert(new_point, w)
midpoints.append((way_idx, way))
if way.way[way_idx] != new_point:
assert is_midpoint
way.way[way_idx] = Point(new_point.x, new_point.y)
mp_count += 1
_log.debug(f"snap_counts {mp_count} {ep_count}")
pass_count += 1
# also add connected midpoints, we need to do it here afterwards to not disturb the multiple pass endpoint snapping
# FIXME: modifying contents and meaning of endpoints is a hard-to-follow side effect
for mp in midpoints:
idx = mp[0]
way = mp[1]
endpoints.insert(way.way[idx], way)
return ep_count, mp_count
def _insert_into_reference_geometry(self, rlid_ways, endpoints):
last_print = 0
for idx, ways in enumerate(rlid_ways.values()):
if len(rlid_ways) > 50:
last_print = print_progress(last_print, idx, len(rlid_ways), progress_text="Join segments")
# make longest possible ways of RLID segments
joined_ways = join_ways(ways)
joined_ways = join_DKReflinjetillkomst_gaps(joined_ways)
joined_ways = remove_DKReflinjetillkomst_overlaps(joined_ways, self.POINT_SNAP_DISTANCE)
for way in joined_ways:
# very short segments lead to problems with snapping (can cause gaps where there should not be any)
new_way = remove_short_segments_and_redundant_points(way, self.POINT_SNAP_DISTANCE, endpoints)
if len(new_way) < 2:
_log.debug(f"Skipping zero length segment for {way.rlid}")
continue
assert new_way[0] == way.way[0] and new_way[-1] == way.way[-1]
way.way = new_way
test_self_connections(way)
if self._perform_self_testing:
self._test_way_dist(way, allow_unset=True)
self._ref_gs.insert(way)
self._test_way_dist(self._ref_gs.find_reference_way(way.way[0], way.rlid))
if way.rlid in self._ref_way_db:
self._ref_way_db[way.rlid].append(way)
else:
self._ref_way_db[way.rlid] = [ way ]
def get_reference_geometry(self):
ref_ways = []
for ways in self._ref_way_db.values():
for way in ways:
ref_ways.append(way)
return ref_ways
def insert_missing_reference_geometry_if_any(self, geometry_ways):
missing_ways = {}
for way in geometry_ways:
if not way.rlid in self._ref_way_db:
wc = way.make_copy_new_way(copy_way(way.way))
if way.rlid in missing_ways:
missing_ways[way.rlid].append(wc)
else:
missing_ways[way.rlid] = [ wc ]
if len(missing_ways) == 0:
return False
# snap endpoints to self
endpoints = TwoDimSearch()
rlids = []
for ways in list(missing_ways.values()):
# this type of geometry may have overlaps, so we pre-join using NVDB tags
ways = join_ways_using_nvdb_tags(ways, self.POINT_SNAP_DISTANCE)
missing_ways[ways[0].rlid] = ways
rlids.append(ways[0].rlid)
for way in ways:
for ep in [ way.way[0], way.way[-1] ]:
endpoints.insert(ep, way)
self._snap_points_to_nearby_endpoints(missing_ways, endpoints)
self._insert_into_reference_geometry(missing_ways, endpoints)
# Missing segments may be missing because they have been snapped to zero and thus excluded.
# If that is the case they won't be reinserted either, so we only log after the insertion
# we we know if any actually got in.
did_insert = False
for rlid in rlids:
if rlid in self._ref_way_db:
_log.warning(f"RLID {rlid} was not in reference geometry, inserted it.")
did_insert = True
return did_insert
def insert_rlid_node(self, node, data_src_name, do_snap=True):
did_snap = False
if do_snap:
dist, p, snap_way = self._ref_gs.snap_point_into_geometry(node.way, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is None:
_log.warning(f"node with RLID {node.rlid} {latlon_str(node.way)} in {data_src_name} has no"
f" existing geometry within {self.MAX_SNAP_DISTANCE} meters")
else:
did_snap = True
if dist > self.POINT_SNAP_DISTANCE:
_log.info("Node %s snap distance %s", node.rlid, dist)
node.way.x = p[0]
node.way.y = p[1]
if node.way in self.point_db:
current = self.point_db[node.way][0]
if current.rlid != node.rlid:
# This can happen for some data in crossings for example
#raise RuntimeError("Node with RLID %s and position %s already exists in the database (%s, %s)" % (node.rlid, latlon_str(node.way), current, node))
_log.warning(f"Node with RLID {node.rlid} and position {latlon_str(node.way)} already"
f" exists in the database ({current}, {node})")
merge_tags(current, node.tags, data_src_name)
else:
self.point_db[node.way] = [ node ]
if do_snap and snap_way is not None:
self._add_node_into_way(snap_way.rlid, p)
return did_snap
def _add_node_into_way(self, rlid, point):
segs = self.way_db.get(rlid, [])
for seg in segs:
for idx, p in enumerate(seg.way):
if idx == 0:
continue
is_between, _ = point_between_points(point, seg.way[idx-1], p, 1e-6)
if is_between:
point.dist = seg.way[idx-1].dist + dist2d(seg.way[idx-1], point)
seg.way.insert(idx, point)
return
_log.warning(f"node {latlon_str(point)} not found in any way segment for RLID {rlid}")
def _split_and_merge(self, way, data_src_name):
if len(way.way) == 1:
# skipping (extremely short) ways that were reduced to one point
return
if not way.rlid in self.way_db:
# first segment for rlid
self.way_db[way.rlid] = [ way ]
return
segs = self.way_db[way.rlid]
if way.way[-1].dist <= segs[0].way[0].dist:
# way is before existing segments
segs.insert(0, way)
return
if way.way[0].dist >= segs[-1].way[-1].dist:
# way is after existing segments
segs.append(way)
return
segs_idx = 0
while segs_idx < len(segs): # we modify segs inside, so can't use for loop
seg = segs[segs_idx]
if seg.way[-1].dist <= way.way[0].dist:
# seg is before way
segs_idx += 1
continue
if seg.way[0].dist >= way.way[-1].dist:
# seg is after way, no overlap
segs.insert(segs_idx, way)
#print("insert no overlap")
break
# way starts somewhere inside seg, scan to start of way
#print("way ", way)
#print("matching seg", seg)
seg_idx = 0
while seg_idx < len(seg.way):
if seg.way[seg_idx].dist >= way.way[0].dist:
if seg.way[seg_idx].dist > way.way[0].dist:
# start of way is a new point, insert
seg.way.insert(seg_idx, way.way[0])
break
seg_idx += 1
if seg_idx > 0:
# split out segment which is before way
seg_copy = seg.make_copy_new_way(seg.way[:seg_idx+1])
segs.insert(segs_idx, seg_copy)
segs_idx += 1
seg.way = seg.way[seg_idx:]
#print("split before")
# now seg starts at same point as way
assert seg.way[0] == way.way[0]
# way may have new points, insert those into seg, if any
seg_idx = 0
way_idx = 0
while seg_idx < len(seg.way) and way_idx < len(way.way):
if seg.way[seg_idx].dist > way.way[way_idx].dist:
seg.way.insert(seg_idx, way.way[way_idx])
seg_idx += 1
way_idx += 1
if seg_idx < len(seg.way):
# split out segment which is after way
seg_copy = seg.make_copy_new_way(seg.way[seg_idx-1:])
segs.insert(segs_idx + 1, seg_copy)
seg.way = seg.way[:seg_idx]
assert seg.way[0] == way.way[0]
next_way = None
if len(way.way) > len(seg.way):
# split way
next_way = way.make_copy_new_way(way.way[len(seg.way)-1:])
way.way = way.way[:len(seg.way)]
# merge tags
assert seg.way[-1] == way.way[-1]
assert seg.way[0] == way.way[0] and seg.way[-1] == way.way[-1]
merge_tags(seg, way.tags, data_src_name)
#print("insert with split")
if next_way is None:
break
way = next_way
if way.way[0].dist >= segs[-1].way[-1].dist:
# special case when next_way is last
segs.append(way)
break
def insert_rlid_way(self, way, data_src_name, debug_ways=None):
if way.rlid not in self._ref_way_db:
_log.debug(f"Skipping RLID {way.rlid} (not in reference geometry)")
return
_, ways = self._adapt_way_into_reference_geometry(way, data_src_name)
for w in ways:
if debug_ways is not None:
debug_ways.append(w.make_copy_new_way(copy_way(w.way)))
self._split_and_merge(w, data_src_name)
if self._perform_self_testing:
if way.rlid in self.way_db:
self._test_segment(self.way_db[way.rlid])
def _test_segment(self, segs):
it = iter(segs)
prev = next(it)
for seg in it:
if seg.way[0].dist < prev.way[-1].dist:
raise RuntimeError("Bad order")
prev = seg
for seg in segs:
assert len(seg.way) >= 2
ref_way = self._ref_gs.find_reference_way(seg.way[0], seg.rlid)
ref_idx = 0
while ref_idx < len(ref_way.way) and ref_way.way[ref_idx] != seg.way[0]:
ref_idx += 1
assert ref_idx < len(ref_way.way)
prev = None
for p in seg.way:
if prev is not None:
dist = dist2d(prev, p)
if dist < self.POINT_SNAP_DISTANCE:
_log.error(f"ref.way: {ref_way.way}")
_log.error(f"seg.way: {seg.way}")
_log.error(f"ref_way: {ref_way}")
_log.error(f"seg : {seg}")
raise RuntimeError("Point closer placed than snap distance %s" % dist)
if ref_idx == len(ref_way.way):
_log.error(f"ref.way: {ref_way.way}")
_log.error(f"seg.way: {seg.way}")
_log.error(f"ref_way: {ref_way}")
_log.error(f"seg : {seg}")
raise RuntimeError("More points in segment than in reference way")
if p.dist != ref_way.way[ref_idx].dist:
_log.error(f"ref.way: {ref_way.way}")
_log.error(f"seg.way: {seg.way}")
_log.error(f"ref_way: {ref_way}")
_log.error(f"seg : {seg}")
raise RuntimeError("Dist mismatch got %s expected %s (ref_idx %s)" % (p.dist, ref_way.way[ref_idx].dist, ref_idx))
ref_idx += 1
prev = p
def _test_way_dist(self, way, allow_unset=False):
it = iter(way.way)
prev = next(it)
ref_dist = way.way[0].dist
if allow_unset and ref_dist == -1:
ref_dist = 0
else:
assert ref_dist >= 0
for p in it:
if prev == p:
_log.info(f"{way.way}")
_log.info(f"{way}")
raise RuntimeError("Duplicate point %s" % p)
dist = dist2d(prev, p)
if dist < self.POINT_SNAP_DISTANCE:
_log.info(f"{way.way}")
_log.info(f"{way}")
raise RuntimeError("Point closer placed than snap distance at %s in ref_way %s" % (p, dist))
ref_dist += dist
if (not allow_unset or p.dist != -1) and abs(p.dist - ref_dist) > 1e-6:
_log.info(f"{way.way}")
_log.info(f"{way}")
raise RuntimeError("Bad dist in ref_way %s (expected %s got %s)" % (p, ref_dist, p.dist))
if p.dist != -1:
ref_dist = p.dist
prev = p
def _retry_adapt_way_extending_reference_geometry(self, way):
# will not work for closed loops, or self-crossing stuff
new_way = []
snapped = []
ref_way = None
snap_count = 0
for way_idx, point in enumerate(way.way):
p, rway = self._ref_gs.snap_waypoint_into_geometry(way, way_idx, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is not None:
new_way.append(p)
snapped.append(True)
ref_way = rway
snap_count += 1
else:
# snap to other way if possible, using short snap distance
_, p, rway = self._ref_gs.snap_point_into_geometry(point, self.POINT_SNAP_DISTANCE, self.POINT_SNAP_DISTANCE)
if p is not None:
new_way.append(p)
else:
new_way.append(point)
snapped.append(False) # only count points snap to self RLID
if ref_way is None:
_log.warning(f"Way with RLID {way.rlid} could not be snapped to reference geometry")
return False
assert ref_way.rlid == way.rlid
_log.warning(f"must extend reference geometry for RLID {way.rlid} (only {snap_count} "
f"of {len(way.way)} points could be snapped)")
first_snap = 0
for idx, is_snap in enumerate(snapped):
if is_snap:
first_snap = idx
break
last_snap = len(new_way)
for idx in range(0, len(snapped)):
if snapped[len(snapped) - 1 - idx]:
last_snap = len(snapped) - 1 - idx
break
_log.debug(f"snapped {snapped}")
_log.debug(f"snappoints {first_snap} {last_snap}")
for way_idx, point in enumerate(way.way):
if way_idx <= first_snap or way_idx >= last_snap:
continue
if not snapped[way_idx]:
# this means snap failure in the middle too not just an extension problem
_log.info(f"Way with RLID {way.rlid} could not be snapped to reference geometry")
return False
extend_way_start = []
extend_way_end = []
for idx, point in enumerate(new_way):
if idx < first_snap:
extend_way_start.append(point)
if idx > last_snap:
extend_way_end.append(point)
current_segs = self.way_db.get(way.rlid, [])
if len(extend_way_start) > 0:
extend_way_start.append(new_way[first_snap])
if not self._ref_gs.extend_geometry(ref_way, extend_way_start, current_segs):
return False
if len(extend_way_end) > 0:
extend_way_end.insert(0, new_way[last_snap])
if not self._ref_gs.extend_geometry(ref_way, extend_way_end, current_segs):
return False
return True
def _adapt_way_into_reference_geometry(self, way, data_src_name, is_retry=False):
# first snap each point of the way into the existing geometry
way.way = remove_short_segments_and_redundant_points(way, self.POINT_SNAP_DISTANCE)
if len(way.way) == 1:
_log.debug(f"RLID {way.rlid} reduced to one point")
return None, [ way ]
new_way = []
prev = None
max_snap_distance = self.MAX_SNAP_DISTANCE
for way_idx, point in enumerate(way.way):
p, ref_way = self._ref_gs.snap_waypoint_into_geometry(way, way_idx, self.POINT_SNAP_DISTANCE, max_snap_distance, new_way)
if p is None:
if is_retry:
raise RuntimeError("Way with RLID %s %s has no existing geometry within %s meters" % (way.rlid, latlon_str(point), max_snap_distance))
success = self._retry_adapt_way_extending_reference_geometry(way)
if success:
return self._adapt_way_into_reference_geometry(way, data_src_name, is_retry=True)
if max_snap_distance < self.EMERGENCY_SNAP_DISTANCE:
max_snap_distance = self.EMERGENCY_SNAP_DISTANCE
p, ref_way = self._ref_gs.snap_waypoint_into_geometry(way, way_idx, self.POINT_SNAP_DISTANCE, max_snap_distance, new_way)
if p is None:
raise RuntimeError("Way with RLID %s %s has no existing geometry within %s meters" % (way.rlid, latlon_str(point), max_snap_distance))
if self._perform_self_testing:
self._test_way_dist(ref_way)
if p != prev:
# sometimes close points are merged to the same position
assert p.dist >= 0
new_way.append(p)
prev = p
if max_snap_distance == self.EMERGENCY_SNAP_DISTANCE:
_log.warning(f"had to use emergency snap distance ({max_snap_distance}m) for {way.rlid} to get it "
f"to match reference geometry")
way.way = new_way
if len(way.way) == 1:
#_log.info("RLID %s reduced to a point" % way.rlid)
return ref_way, [ way ]
if ref_way.way[0] == ref_way.way[-1]:
# closed loop special cases
assert len(ref_way.way) > 2
closed_loop = False
if way.way[0] == way.way[-1]:
assert len(way.way) > 2
closed_loop = True
way.way = way.way[:-1]
elif way.way[-1] == ref_way.way[0]:
# make sure we end at max dist rather than min dist
way.way[-1] = ref_way.way[-1]
elif way.way[0].dist > way.way[-1].dist:
# the way goes past split point of ref_way, split up way
for idx, p in enumerate(way.way):
if p == ref_way.way[0]:
way_part1 = way.way[:idx] + [ ref_way.way[-1] ]
way_part2 = way.way[idx:]
break
if idx > 0 and way.way[idx-1].dist > p.dist:
# ref_way.way[0] point doesn't exist, insert it
way_part1 = way.way[:idx] + [ ref_way.way[-1] ]
way_part2 = [ ref_way.way[0] ] + way.way[idx:]
break
way2 = way.make_copy_new_way(way_part2)
way.way = way_part1
r1, w1 = self._adapt_way_into_reference_geometry(way, data_src_name)
r2, w2 = self._adapt_way_into_reference_geometry(way2, data_src_name)
assert r1 == ref_way and r2 == ref_way and len(w1) == 1 and len(w2) == 1
return ref_way, [ w1[0], w2[0] ]
# way could have different starting point in the closed loop, make sure they are the same
min_idx = 0
for idx, p in enumerate(way.way):
if p.dist < way.way[min_idx].dist:
min_idx = idx
if min_idx != 0:
way.way = way.way[min_idx:] + way.way[:min_idx]
if closed_loop:
if way.way[0] != ref_way.way[0]:
assert len(way.way) < len(ref_way.way)
way.way.insert(0, ref_way.way[0])
way.way.append(ref_way.way[-1])
assert way.way[0] == way.way[-1]
elif len(way.way) > 2 and way.way[-1] == ref_way.way[-1] and way.way[-1].dist < way.way[-2].dist:
# P-shaped loop, ie endpoint attaches to midpoint on the own way.
# Very rare (seen in Stockholm dataset), and sort of illegal
_log.warning(f"endpoint attaches to own midpoint for RLID {way.rlid}")
way.way[-1].dist = ref_way.way[-1].dist
if self._perform_self_testing:
self._test_way_dist(way)
# if this way has fewer points that the reference geometry it snaps to (happens in
# some cases), we need to insert missing points we can assume that:
# - matching ways are oriented in the same direction
# - matching ways have the same RLID
# - reference geometry has each way for RLID in its full length, ie it should cover
# the full length of the inserted way
ref_it = iter(ref_way.way)
ref_p = next(ref_it)
while ref_p != way.way[0]:
try:
ref_p = next(ref_it)
except StopIteration as stop_iteration:
raise RuntimeError("Could not find start %s of way %s in reference geometry (does it extend reference geometry?)" % (latlon_str(way.way[0]), way.rlid)) from stop_iteration
assert ref_p == way.way[0]
new_way = []
#_log.info(way.rlid)
#_log.info("ref_way", ref_way.way)
#_log.info("way.way", way.way)
for p in way.way:
while ref_p != p:
assert p.dist >= 0
new_way.append(ref_p)
ref_p = next(ref_it)
new_way.append(p)
try:
ref_p = next(ref_it)
except StopIteration:
assert ref_p == p
if len(new_way) > len(way.way):
#_log.info("Added points to way of RLID %s (%s => %s)" % (way.rlid, len(way.way), len(new_way)))
way.way = new_way
return ref_way, [ way ]
def remove_short_sub_segments(self):
_log.info("Removing short sub-segments...")
# "Remove" in this context means merging with neighbor segment
remove_count = 0
for segs in list(self.way_db.values()):
new_segs = []
for idx, seg in enumerate(segs):
length, _ = calc_way_length(seg.way)
if length > 8.0:
new_segs.append(seg)
continue
prev_length = 0
next_length = 0
if len(new_segs) > 0 and new_segs[-1].way[-1] == seg.way[0]:
prev_length, _ = calc_way_length(new_segs[-1].way)
next_idx = (idx+1) % len(segs)
if segs[next_idx].way[0] == seg.way[-1]:
next_length, _ = calc_way_length(segs[next_idx].way)
if prev_length == 0 and next_length == 0:
# unconnected short segment (hopefully rare)
if len(segs) == 1:
_log.debug(f"RLID {seg.rlid} is an alone short segment ({length:g}), must be kept")
else:
_log.debug(f"RLID {seg.rlid} has a short unconnected segment ({length:g}), must be kept")
new_segs.append(seg)
continue
if length > 2.0:
# for longer stubs, we only remove them if they are on the start/end and
# only if only two points. This metric is based on what is seen in NVDB
# data.
if (prev_length != 0 and next_length != 0) or len(seg.way) > 2 or keep_end_stub(seg):
new_segs.append(seg)
continue
# we can mess up dist value of points here for closed loops, but since
# this is run at the end we don't care
if prev_length > next_length:
new_segs[-1].way += seg.way[1:]
else:
segs[next_idx].way = seg.way[:-1] + segs[next_idx].way
remove_count += 1
if len(new_segs) < len(segs):
if len(new_segs) == 0:
del self.way_db[segs[0].rlid]
else:
self.way_db[segs[0].rlid] = new_segs
_log.info(f"done ({remove_count} short sub-segments were removed)")
self.join_segments_with_same_tags()
def _get_way(self, rlid, point):
segs = self.way_db.get(rlid, [])
for seg in segs:
for p in seg.way:
if p == point:
return seg
return None
def test_segments(self):
for segs in self.way_db.values():
self._test_segment(segs)
def setup_geometry_search(self):
_log.info("Setting up search data structure for all geometry...")
self.gs = GeometrySearch(GEO_FILL_LENGTH, use_dist=False, perform_self_testing=self._perform_self_testing)
self.gs.insert_waydb(self.way_db)
_log.info("done")
def get_endpoint_map(self):
endpoints = {}
for segs in self.way_db.values():
for seg in segs:
for p in [ seg.way[0], seg.way[-1] ]:
if p in endpoints:
endpoints[p].append(seg)
else:
endpoints[p] = [ seg ]
return endpoints
@staticmethod
def _join_rlid_pick_best_matching_ways(ep, endpoints):
ways = endpoints[ep]
if len(ways) == 1:
# no connections
return None, None
max_angle = -1
best_way = None
for w1 in ways:
w1_closed = w1.way[0] == w1.way[-1]
if w1.way[0] == ep:
p1 = w1.way[1]
w1_start = True
else:
p1 = w1.way[-2]
w1_start = False
for w2 in ways:
if w1 == w2 or w1.tags != w2.tags:
continue
w2_closed = w2.way[0] == w2.way[-1]
if w2.way[0] == ep:
p3 = w2.way[1]
w2_start = True
else:
p3 = w2.way[-2]
w2_start = False
if w1_start == w2_start and not way_may_be_reversed(w1) and not way_may_be_reversed(w2):
# one way must be reversed, but none can be reversed
# if one way is closed, we can swap start/end to recover, otherwise skip this pair
if w1_closed:
if w1_start:
p1 = w1.way[-2]
else:
p1 = w1.way[1]
elif w2_closed:
if w2_start:
p3 = w2.way[-2]
else:
p3 = w2.way[1]
else:
continue
# calculate angle between p1 and p2
xa = p1.x - ep.x
ya = p1.y - ep.y
xb = p3.x - ep.x
yb = p3.y - ep.y
denom = math.sqrt(xa*xa + ya*ya) * math.sqrt(xb*xb + yb*yb)
if denom != 0:
q = (xa * xb + ya * yb) / denom
if q < -1:
# this can happen due to precision limitation, -1.0000000000000002 seen in tests
angle = 180
elif q > 1:
angle = 0
else:
angle = math.acos((xa * xb + ya * yb) / denom) * 180 / math.pi
else:
angle = 0
if angle > max_angle:
max_angle = angle
if max_angle > 30:
best_way = (w1, w2)
else:
_log.debug(f"Skipping extreme angle {angle} between {w1.rlid} {w2.rlid}")
if best_way is None:
return None, None
_log.debug(f"Max angle {max_angle} for {best_way[0].rlid} to {best_way[1].rlid}")
return best_way[0], best_way[1]
def _remove_seg_before_join(self, seg, endpoints):
segs = self.way_db[seg.rlid]
segs.remove(seg)
if len(segs) == 0:
_log.debug(f"RLID {seg.rlid} completely removed when joining with other segment")
del self.way_db[seg.rlid]
endpoints.remove(seg.way[0], seg)
if seg.way[-1] != seg.way[0]: # closed loop special case
endpoints.remove(seg.way[-1], seg)
def _join_rlid(self, seg, endpoints, directional_nodes):
rlid_join_count = 0
ep_idx = -1
consecutive_fails = 0
while consecutive_fails < 2:
if ep_idx == -1:
ep_idx = 0
connecting_ep_idx = -1
else:
ep_idx = -1
connecting_ep_idx = 0
ep = seg.way[ep_idx]
w1, w2 = self._join_rlid_pick_best_matching_ways(ep, endpoints)
if seg not in (w1, w2):
consecutive_fails += 1
continue
consecutive_fails = 0
if w1 == seg:
join_way = w2
else:
join_way = w1
self._remove_seg_before_join(join_way, endpoints)
self._remove_seg_before_join(seg, endpoints)
if join_way.way[connecting_ep_idx] != ep and seg.way[0] != seg.way[-1]:
# reversing required
if way_may_be_reversed(seg) and way_may_be_reversed(join_way):
l1, _ = calc_way_length(seg.way)
l2, _ = calc_way_length(join_way.way)
reverse_join_way = l1 >= l2
elif way_may_be_reversed(join_way):
reverse_join_way = True
else:
reverse_join_way = False
assert way_may_be_reversed(seg)
if reverse_join_way:
_log.debug(f"Reversing joining RLID {join_way.rlid}")
reverse_way(join_way, directional_nodes)
else:
_log.debug(f"Reversing base RLID {seg.rlid}")
reverse_way(seg, directional_nodes)
if ep_idx == 0:
ep_idx = -1
else:
ep_idx = 0
# create new RLID by joining the current, sort them to get repeatable result
new_rlid = seg.rlid.split(';')
new_rlid.append(join_way.rlid)
new_rlid.sort()
new_rlid = ";".join(new_rlid)
rlid_join_count += 1
if seg.way[0] == join_way.way[-1]:
seg.way.pop(0)
seg.way = join_way.way + seg.way
elif seg.way[-1] == join_way.way[0]:
join_way.way.pop(0)
seg.way += join_way.way
else:
_log.error(f"{seg.rlid}, {seg.way}")
_log.error(f"{join_way.rlid}, {join_way.way}")
raise RuntimeError("Disconnected segments cannot be joined")
join_way.way = None
seg.rlid = new_rlid
if new_rlid in self.way_db:
self.way_db[new_rlid].append(seg)
else:
_log.debug(f"Inserted joined RLID {new_rlid}")
self.way_db[new_rlid] = [ seg ]
endpoints.insert(seg.way[0], seg)
endpoints.insert(seg.way[-1], seg)
return rlid_join_count
def join_segments_with_same_tags(self, join_rlid=False):
if join_rlid:
_log.info("Joining segments with same tags even if different RLID...")
else:
_log.info("Joining RLID segments with same tags...")
join_count = 0
for segs in self.way_db.values():
it = iter(segs)
prev = next(it)
nsegs = [prev]
for seg in it:
lastseg = nsegs[-1]
if len(lastseg.way) < 2:
raise RuntimeError("Short way")
if len(seg.way) < 2:
raise RuntimeError("Short way %s %s" % (seg, segs))
if lastseg.way[-1] == seg.way[0] and lastseg.tags == seg.tags:
join_count += 1
seg.way.pop(0)
lastseg.way += seg.way
else:
nsegs.append(seg)
prev = seg
if len(nsegs) < len(segs):
self.way_db[segs[0].rlid] = nsegs
if join_rlid:
# Joining ways can be optimized in several ways, as there are crossings where more
# than two segments join at the same point and direction of segments can be swapped
# to either make joining possible or impossible with neighboring segment, there are
# many different solutions to the problem.
#
# Here we have adopted a strategy which is more advanced than the most trivial method,
# but still it does not attempt to reach "optimal" result (whatever that would be)
#
directional_nodes = get_directional_nodes(self.point_db)
rlid_join_count = 0
endpoints = TwoDimSearch()
all_segs = set()
for segs in self.way_db.values():
for seg in segs:
all_segs.add(seg)
for ep in (seg.way[0], seg.way[-1]):
endpoints.insert(ep, seg)
while True:
_log.debug("RLID join iteration")
prev_rlid_join_count = rlid_join_count
for seg in all_segs:
if seg.way is None: # already processed
continue
join_count = self._join_rlid(seg, endpoints, directional_nodes)
if join_count > 0:
_log.debug(f"Added {join_count} segments to {seg.rlid}")
rlid_join_count += join_count
if prev_rlid_join_count == rlid_join_count:
break
all_segs = set()
for segs in self.way_db.values():
for seg in segs:
assert seg.way is not None
all_segs.add(seg)
if join_rlid:
_log.info(f"done ({join_count} joined within same RLID, and {rlid_join_count} with different RLID joined)")
else:
_log.info(f"done ({join_count} joined)")
def make_way_directions_tree_like(self):
_log.info("Arrange way directions so the graph grows tree-like...")
reverse_count = 0
decided_count = 0
not_oriented = set()
oriented_endpoints = TwoDimSearch()
directional_nodes = get_directional_nodes(self.point_db)
for segs in self.way_db.values():
for seg in segs:
decide_later = False
if way_may_be_reversed(seg):
start_connect_count = len(self._ref_gs.find_all_connecting_ways(seg.way[0]))
end_connect_count = len(self._ref_gs.find_all_connecting_ways(seg.way[-1]))
if end_connect_count == 1:
# already desired direction
pass
elif start_connect_count == 1:
reverse_way(seg, directional_nodes)
reverse_count += 1
else:
decide_later = True
not_oriented.add(seg)
if not decide_later:
oriented_endpoints.insert(seg.way[0], seg)
oriented_endpoints.insert(seg.way[-1], seg)
decided_count += 1
_log.debug(f"master orientation iteration ({decided_count} decided, {reverse_count} reversed)")
last_not_oriented_len = -1
while len(not_oriented) != last_not_oriented_len:
_log.debug(f"orientation iteration ({len(not_oriented)} left)")
last_not_oriented_len = len(not_oriented)
oriented = set()
for seg in not_oriented:
max_rev_len = -1
max_len = -1
for ep_idx in (0, -1):
ep = seg.way[ep_idx]
if ep not in oriented_endpoints:
continue
ways = oriented_endpoints[ep]
for w in ways:
length, _ = calc_way_length(w.way)
if w.way[ep_idx] == ep:
if length > max_rev_len:
max_rev_len = length
elif length > max_len:
max_len = length
if max_rev_len == -1 and max_len == -1:
# could not decide direction in this round
continue
if max_rev_len > max_len:
reverse_way(seg, directional_nodes)
reverse_count += 1
oriented_endpoints.insert(seg.way[0], seg)
oriented_endpoints.insert(seg.way[-1], seg)
oriented.add(seg)
not_oriented -= oriented
_log.debug(f"orientation iterations complete ({len(not_oriented)} not considered, connected to midpoins etc)")
_log.info(f"done ({reverse_count} ways reversed)")
def simplify_geometry(self):
_log.info("Simplifying geometry...")
old_point_count = 0
new_point_count = 0
connected_midpoints = set()
for segs in self.way_db.values():
for seg in segs:
for p in seg.way[1:-1]:
# We also check if connected to self (P-shaped loops)
if p == seg.way[0] or p == seg.way[-1] or len(self._ref_gs.find_all_connecting_ways(p)) > 1:
connected_midpoints.add(p)
for segs in self.way_db.values():
for seg in segs:
start = 0
nway = []
# This value of 0.2 is consistent what is used in the Norwegian imports.
# It's a quite high detail level and will keep most points available in NVDB
# geometry.
epsilon = 0.2
for midx, p in enumerate(seg.way[1:-1]):
idx = midx + 1
if p in self.point_db or p in connected_midpoints:
sub_segment = seg.way[start:idx+1] # idx+1 to include current p as last point
simplified = simplify_way(sub_segment, epsilon)
nway += simplified[:-1] # skip last p to avoid overlaps with next sub-segment
start = idx
nway += simplify_way(seg.way[start:], epsilon)
old_point_count += len(seg.way)
new_point_count += len(nway)
seg.way = nway
_log.info(f"done ({old_point_count} => {new_point_count} points)")
| StarcoderdataPython |
4994088 | <gh_stars>100-1000
def custom_simclr_contrastive_loss(proj_feat1, proj_feat2, temperature=0.5):
"""
custom_simclr_contrastive_loss(proj_feat1, proj_feat2)
Returns contrastive loss, given sets of projected features, with positive
pairs matched along the batch dimension.
Required args:
- proj_feat1 (2D torch Tensor): projected features for first image
augmentations (batch_size x feat_size)
- proj_feat2 (2D torch Tensor): projected features for second image
augmentations (batch_size x feat_size)
Optional args:
- temperature (float): relaxation temperature. (default: 0.5)
Returns:
- loss (float): mean contrastive loss
"""
device = proj_feat1.device
if len(proj_feat1) != len(proj_feat2):
raise ValueError(f"Batch dimension of proj_feat1 ({len(proj_feat1)}) "
f"and proj_feat2 ({len(proj_feat2)}) should be same")
batch_size = len(proj_feat1) # N
z1 = torch.nn.functional.normalize(proj_feat1, dim=1)
z2 = torch.nn.functional.normalize(proj_feat2, dim=1)
proj_features = torch.cat([z1, z2], dim=0) # 2N x projected feature dimension
similarity_matrix = torch.nn.functional.cosine_similarity(
proj_features.unsqueeze(1), proj_features.unsqueeze(0), dim=2
) # dim: 2N x 2N
# initialize arrays to identify sets of positive and negative examples, of
# shape (batch_size * 2, batch_size * 2), and where
# 0 indicates that 2 images are NOT a pair (either positive or negative, depending on the indicator type)
# 1 indices that 2 images ARE a pair (either positive or negative, depending on the indicator type)
pos_sample_indicators = torch.roll(torch.eye(2 * batch_size), batch_size, 1).to(device)
neg_sample_indicators = (torch.ones(2 * batch_size) - torch.eye(2 * batch_size)).to(device)
# EXERCISE: Implement the SimClr loss calculation
# Calculate the numerator of the Loss expression by selecting the appropriate elements from similarity_matrix.
# Use the pos_sample_indicators tensor
numerator = torch.exp(similarity_matrix / temperature)[pos_sample_indicators.bool()]
# Calculate the denominator of the Loss expression by selecting the appropriate elements from similarity_matrix,
# and summing over pairs for each item.
# Use the neg_sample_indicators tensor
denominator = torch.sum(
torch.exp(similarity_matrix / temperature) * neg_sample_indicators,
dim=1
)
if (denominator < 1e-8).any(): # clamp to avoid division by 0
denominator = torch.clamp(denominator, 1e-8)
loss = torch.mean(-torch.log(numerator / denominator))
return loss
# add event to airtable
atform.add_event('Coding Exercise 6.1.1: Complete a SimCLR loss function')
## Uncomment below to test your function
test_custom_contrastive_loss_fct(custom_simclr_contrastive_loss) | StarcoderdataPython |
6500105 | __all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from . import compat
from . import events
from . import futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment and
bool(os.environ.get('PYTHONASYNCIODEBUG')))
try:
_types_coroutine = types.coroutine
except AttributeError:
_types_coroutine = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
_inspect_iscoroutinefunction = lambda func: False
try:
from collections.abc import Coroutine as _CoroutineABC, \
Awaitable as _AwaitableABC
except ImportError:
_CoroutineABC = _AwaitableABC = None
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax.
# Generator-based coroutines are wrapped in @coroutine
# decorator.
return CoroWrapper(gen, None)
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return self.gen.send(None)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, exc):
return self.gen.throw(exc)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
if compat.PY35:
def __await__(self):
cr_await = getattr(self.gen, 'cr_await', None)
if cr_await is not None:
raise RuntimeError(
"Cannot await on coroutine {!r} while it's "
"awaiting for {!r}".format(self.gen, cr_await))
return self
@property
def gi_yieldfrom(self):
return self.gen.gi_yieldfrom
@property
def cr_await(self):
return self.gen.cr_await
@property
def cr_running(self):
return self.gen.cr_running
@property
def cr_code(self):
return self.gen.cr_code
@property
def cr_frame(self):
return self.gen.cr_frame
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is None:
frame = getattr(gen, 'cr_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if _inspect_iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defiend with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, _AwaitableABC):
res = yield from await_meth()
return res
if not _DEBUG:
if _types_coroutine is None:
wrapper = coro
else:
wrapper = _types_coroutine(coro)
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = True # For iscoroutinefunction().
return wrapper
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', False) or
_inspect_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
assert iscoroutine(coro)
coro_name = None
if isinstance(coro, CoroWrapper):
func = coro.func
coro_name = coro.__qualname__
if coro_name is not None:
coro_name = '{}()'.format(coro_name)
else:
func = coro
if coro_name is None:
coro_name = events._format_callback(func, (), {})
try:
coro_code = coro.gi_code
except AttributeError:
coro_code = coro.cr_code
try:
coro_frame = coro.gi_frame
except AttributeError:
coro_frame = coro.cr_frame
filename = coro_code.co_filename
lineno = 0
if (isinstance(coro, CoroWrapper) and
not inspect.isgeneratorfunction(coro.func) and
coro.func is not None):
source = events._get_function_source(coro.func)
if source is not None:
filename, lineno = source
if coro_frame is None:
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro_frame is not None:
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
return coro_repr
| StarcoderdataPython |
3406945 | import MySQLdb
import datetime
from copy import deepcopy
#current db on VM: playgroundapr8; on jas' computer: playgroundapr8; on github: playgroundapr11
def launchdb():
db = MySQLdb.connect(host="localhost", user="root", passwd="", db="playgroundapr8")
return db
def launchcursor(db):
cursor = db.cursor()
return cursor
def commitclose(cursor, db):
db.commit()
cursor.close()
db.close()
| StarcoderdataPython |
9727344 | <reponame>anamaria-uofg/mass-spec-utils<gh_stars>1-10
# gnps.py
from .spectrum import SpectralRecord
from loguru import logger
def load_gnps_files(file_list):
if type(file_list) == str:
file_list = [file_list]
spectra = {}
for file_name in file_list:
spectra = load_mgf(file_name,id_field = 'SPECTRUMID',spectra = spectra)
return spectra
def load_mgf(mgf_name,id_field = 'SCANS',spectra = None):
if spectra is None:
spectra = {}
with open(mgf_name,'r') as f:
current_metadata = {'filename':mgf_name}
current_peaks = []
got_record = False
for line in f:
line = line.rstrip()
if len(line) == 0:
continue
if line.startswith('BEGIN IONS'):
if len(current_metadata) > 1:
if len(current_peaks) > 0:
try:
current_metadata['names'] = [current_metadata['COMPOUNDNAME']]
except:
pass
if id_field == 'SCANS':
id_val = int(current_metadata[id_field])
else:
id_val = current_metadata[id_field]
spectrum = SpectralRecord(float(current_metadata['PEPMASS']),current_peaks,current_metadata,mgf_name,id_val)
spectra[id_val] = spectrum
if len(spectra)%100 == 0:
logger.debug("Loaded {} spectra".format(len(spectra)))
current_metadata = {'filename':mgf_name}
current_peaks = []
elif len(line.split('=')) > 1:
# it is a metadata line
tokens = line.split('=')
current_metadata[tokens[0]] = "=".join(tokens[1:])
elif not line.startswith('END IONS'):
# it's a peak
tokens = line.split()
mz = float(tokens[0])
intensity = float(tokens[1])
current_peaks.append((mz,intensity))
# save the last one
if len(current_peaks) > 0:
try:
current_metadata['names'] = [current_metadata['COMPOUNDNAME']]
except:
pass
if id_field == 'SCANS':
id_val = int(current_metadata[id_field])
else:
id_val = current_metadata[id_field]
spectrum = SpectralRecord(float(current_metadata['PEPMASS']),current_peaks,current_metadata,mgf_name,id_val)
spectra[id_val] = spectrum
return spectra
| StarcoderdataPython |
6414794 | <filename>backend/api/urls.py
from .views import RegisterAPI, LoginAPI, ReactAPI, CommentAPI
from django.urls import path
from knox import views as knox_views
urlpatterns = [
path('register/', RegisterAPI.as_view(), name='register'),
path('login/', LoginAPI.as_view(), name='login'),
path('logout/', knox_views.LogoutView.as_view(), name='logout'),
path('logout_all/', knox_views.LogoutAllView.as_view(), name='logout_all'),
path('react/', ReactAPI.as_view(), name='react'),
path('comment/', CommentAPI.as_view(), name='comment')
]
| StarcoderdataPython |
56270 | <filename>test-app/behave/steps/autocomplete.py
####
#### Steps centered around dealing with the various
#### autocomplete/search boxes.
####
from behave import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@given('I type "{text}" into the general search')
def step_impl(context, text):
#print(context.browser.title)
webelt = context.browser.find_element_by_id('gsf-query')
webelt.send_keys(text)
@given('I submit the general search')
def step_impl(context):
#print(context.browser.title)
webelt = context.browser.find_element_by_id('query-form')
webelt.submit()
# @given('I wait "{seconds}" seconds')
# def step_impl(context, seconds):
# context.browser.implicitly_wait(int(seconds))
## TODO/BUG: Make use of the explicit waits instead of the (rather
## lame) implicit waits:
## http://selenium-python.readthedocs.org/en/latest/waits.html
@given('I wait until "{item}" appears in the autocomplete')
def step_impl(context, item):
## Implicity poll for items to appear for 10 seconds.
context.browser.implicitly_wait(10)
webelt = context.browser.find_element_by_link_text(item)
## TODO:
# wait = WebDriverWait(context.browser, 10)
# element = wait.until(
# EC.element_to_be_clickable((By.ID,'someid'))
# )
# visibility_of_element_located(
# element = WebDriverWait(context.browser, 1000).until(
# EC.text_to_be_present_in_element(
# context.browser.find_element_by_class_name('ui-autocomplete'), item
# )
# )
@given('I click the general search item "{item}"')
def step_impl(context, item):
#print(context.browser.title)
webelt = context.browser.find_element_by_link_text(item)
webelt.click()
| StarcoderdataPython |
9749337 | #!/usr/bin/env python
"""
synopsis:
Custom routing Router to Mama (ROUTER to REQ)
Author: <NAME> (brainsik) <spork(dash)zmq(at)theory(dot)org>
Modified for tornado/ioloop: <NAME> <dkuhlman(at)davekuhlman(dot)org>
usage:
python rtreq.py
"""
import sys
import random
import zmq
from functools import partial
from zmq.eventloop.future import Context
from zmq.eventloop.ioloop import IOLoop
from tornado import gen
import zhelpers
NBR_WORKERS = 10
@gen.coroutine
def worker_task(id, context=None):
context = context or Context.instance()
worker = context.socket(zmq.REQ)
# We use a string identity for ease here
zhelpers.set_id(worker)
worker.connect("tcp://localhost:5671")
total = 0
while True:
# Tell the router we're ready for work
yield worker.send(b"ready")
# Get workload from router, until finished
workload = yield worker.recv()
#print('(worker {}) received: {}'.format(id, workload))
finished = workload == b"END"
if finished:
print("worker %d processed: %d tasks" % (id, total))
break
total += 1
# Do some random work
yield gen.sleep(0.1 * random.random())
raise gen.Return(('worker {}'.format(id), total))
@gen.coroutine
def requestor(client):
for _ in range(NBR_WORKERS * 10):
# LRU worker is next waiting in the queue
address, empty, ready = yield client.recv_multipart()
yield client.send_multipart([
address,
b'',
b'This is the workload',
])
# Now ask mama to shut down and report their results
for _ in range(NBR_WORKERS):
address, empty, ready = yield client.recv_multipart()
yield client.send_multipart([
address,
b'',
b'END',
])
raise gen.Return(('requestor', 'finished'))
@gen.coroutine
def run(loop):
context = Context.instance()
client = context.socket(zmq.ROUTER)
client.bind("tcp://*:5671")
responses = yield [
worker_task(idx) for idx in range(NBR_WORKERS)
] + [requestor(client)]
print('responses: {}'.format(responses))
def main():
args = sys.argv[1:]
if len(args) != 0:
sys.exit(__doc__)
try:
loop = IOLoop.current()
loop.run_sync(partial(run, loop))
print('(main) exiting')
except KeyboardInterrupt:
print('\nFinished (interrupted)')
sys.exit(0)
if __name__ == '__main__':
main()
print('(program) finished')
| StarcoderdataPython |
6514149 | # -*- coding: utf-8 -*-
from juriscraper.OpinionSite import OpinionSite
from juriscraper.WebDriven import WebDriven
class OpinionSiteWebDriven(OpinionSite, WebDriven):
def __init__(self, *args, **kwargs):
super(OpinionSiteWebDriven, self).__init__(*args, **kwargs)
WebDriven.__init__(self, args, kwargs)
def __del__(self):
self.close_session()
self.close_webdriver_session()
| StarcoderdataPython |
4929884 | """Hidden Markov Model example.
https://github.com/pyro-ppl/numpyro/blob/master/examples/hmm.py
"""
import argparse
from typing import Callable, Dict, Tuple
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import numpyro
import numpyro.distributions as dist
from jax import lax, random
from jax.scipy.special import logsumexp
from numpyro.infer import MCMC, NUTS
from scipy.stats import gaussian_kde
def simulate_data(
rng_key: np.ndarray,
num_categories: int,
num_words: int,
num_supervised: int,
num_unsupservised: int,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
rng_key, rng_key_transition, rng_key_emission = random.split(rng_key, 3)
transition_prior = jnp.ones(num_categories)
emission_prior = jnp.repeat(0.1, num_words)
transition_prob = dist.Dirichlet(transition_prior).sample(
rng_key_transition, sample_shape=(num_categories,)
)
emission_prob = dist.Dirichlet(emission_prior).sample(
rng_key_emission, sample_shape=(num_categories,)
)
start_prob = jnp.repeat(1.0 / num_categories, num_categories)
category = 0
categories = []
words = []
for t in range(num_supervised + num_unsupservised):
rng_key, rng_key_transition, rng_key_emission = random.split(rng_key, 3)
if t == 0 or t == num_supervised:
category = dist.Categorical(start_prob).sample(rng_key_transition)
else:
category = dist.Categorical(transition_prob[category]).sample(rng_key_transition)
word = dist.Categorical(emission_prob[category]).sample(rng_key_emission)
categories.append(category)
words.append(word)
# Split data into supervised and unsupervised
categories = jnp.stack(categories)
words = jnp.stack(words)
supervised_categories = categories[:num_supervised]
supervised_words = words[:num_supervised]
unsupervised_words = words[num_supervised:]
return (
transition_prob,
emission_prob,
supervised_categories,
supervised_words,
unsupervised_words,
)
def forward_one_step(
prev_log_prob: jnp.ndarray,
curr_word: int,
transition_log_prob: jnp.ndarray,
emission_log_prob: jnp.ndarray,
) -> jnp.ndarray:
log_prob_tmp = jnp.expand_dims(prev_log_prob, axis=1) + transition_log_prob
log_prob = log_prob_tmp + emission_log_prob[:, curr_word]
return logsumexp(log_prob, axis=0)
def forward_log_prob(
init_log_prob: jnp.ndarray,
words: jnp.ndarray,
transition_log_prob: jnp.ndarray,
emission_log_prob: jnp.ndarray,
) -> jnp.ndarray:
def scan_fn(log_prob: jnp.ndarray, word: int) -> Tuple[jnp.ndarray, jnp.ndarray]:
return (
forward_one_step(log_prob, word, transition_log_prob, emission_log_prob),
jnp.zeros((0,)),
)
log_prob, _ = lax.scan(scan_fn, init_log_prob, words)
return log_prob
def semi_supervised_hmm(
num_categories: int,
num_words: int,
supervised_categories: jnp.ndarray,
supervised_words: jnp.ndarray,
unsupervised_words: jnp.ndarray,
) -> None:
transition_prior = jnp.ones(num_categories)
emission_prior = jnp.repeat(0.1, num_words)
transition_prob = numpyro.sample(
"transition_prob",
dist.Dirichlet(jnp.broadcast_to(transition_prior, (num_categories, num_categories))),
)
emission_prob = numpyro.sample(
"emission_prob",
dist.Dirichlet(jnp.broadcast_to(emission_prior, (num_categories, num_words))),
)
numpyro.sample(
"supervised_categories",
dist.Categorical(transition_prob[supervised_categories[:-1]]),
obs=supervised_categories[1:],
)
numpyro.sample(
"supervised_words",
dist.Categorical(emission_prob[supervised_categories]),
obs=supervised_words,
)
transition_log_prob = jnp.log(transition_prob)
emission_log_prob = jnp.log(emission_prob)
init_log_prob = emission_log_prob[:, unsupervised_words[0]]
log_prob = forward_log_prob(
init_log_prob, unsupervised_words[1:], transition_log_prob, emission_log_prob
)
log_prob = logsumexp(log_prob, axis=0, keepdims=True)
numpyro.factor("forward_log_prob", log_prob)
def inference(
model: Callable,
num_categories: int,
num_words: int,
supervised_categories: jnp.ndarray,
supervised_words: jnp.ndarray,
unsupervised_words: jnp.ndarray,
rng_key: np.ndarray,
*,
num_warmup: int = 500,
num_samples: int = 1000,
num_chains: int = 1,
verbose: bool = True,
) -> Dict[str, jnp.ndarray]:
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples, num_chains=num_chains)
mcmc.run(
rng_key,
num_categories,
num_words,
supervised_categories,
supervised_words,
unsupervised_words,
)
if verbose:
mcmc.print_summary()
return mcmc.get_samples()
def print_results(
posterior: Dict[str, jnp.ndarray],
transition_prob: jnp.ndarray,
emission_prob: jnp.ndarray,
) -> None:
header = "semi_supervised_hmm - TRAIN"
columns = ["", "ActualProb", "Pred(p25)", "Pred(p50)", "Pred(p75"]
header_format = "{:>20} {:>10} {:>10} {:>10} {:>10}"
row_format = "{:>20} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}"
print("\n", "=" * 20 + header + "=" * 20, "\n")
print(header_format.format(*columns))
quantiles = np.quantile(posterior["transition_prob"], [0.25, 0.5, 0.75], axis=0)
for i in range(transition_prob.shape[0]):
for j in range(transition_prob.shape[1]):
idx = f"transition[{i},{j}]"
print(row_format.format(idx, transition_prob[i, j], *quantiles[:, i, j]), "\n")
quantiles = np.quantile(posterior["emission_prob"], [0.25, 0.5, 0.75], axis=0)
for i in range(emission_prob.shape[0]):
for j in range(emission_prob.shape[1]):
idx = f"emission[{i},{j}]"
print(row_format.format(idx, emission_prob[i, j], *quantiles[:, i, j]), "\n")
def plot_results(
posterior: Dict[str, jnp.ndarray],
transition_prob: jnp.ndarray,
) -> None:
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
plt.figure(figsize=(14, 6))
x = np.linspace(0, 1, 101)
index = 0
for i in range(transition_prob.shape[0]):
for j in range(transition_prob.shape[1]):
y = gaussian_kde(posterior["transition_prob"][:, i, j])(x)
title = f"Posterior: trnas_prob[{i},{j}], true value={transition_prob[i, j]:.2f}"
plt.subplot(transition_prob.shape[0], transition_prob.shape[1], index + 1)
plt.plot(x, y, color=colors[index])
plt.axvline(transition_prob[i, j], linestyle="--", color=colors[index], alpha=0.6)
plt.xlabel("Probability")
plt.ylabel("Frequency")
plt.title(title)
index += 1
plt.tight_layout()
plt.show()
def main(args: argparse.Namespace) -> None:
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
(
transition_prob,
emission_prob,
supervised_categories,
supervised_words,
unsupervised_words,
) = simulate_data(
random.PRNGKey(1),
num_categories=args.num_categories,
num_words=args.num_words,
num_supervised=args.num_supervised,
num_unsupservised=args.num_unsupervised,
)
rng_key = random.PRNGKey(2)
posterior = inference(
semi_supervised_hmm,
args.num_categories,
args.num_words,
supervised_categories,
supervised_words,
unsupervised_words,
rng_key,
num_warmup=args.num_warmup,
num_samples=args.num_samples,
num_chains=args.num_chains,
)
print_results(posterior, transition_prob, emission_prob)
plot_results(posterior, transition_prob)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-categories", type=int, default=3)
parser.add_argument("--num-words", type=int, default=10)
parser.add_argument("--num-supervised", type=int, default=100)
parser.add_argument("--num-unsupervised", type=int, default=500)
parser.add_argument("--num-samples", type=int, default=3000)
parser.add_argument("--num-warmup", type=int, default=1500)
parser.add_argument("--num-chains", type=int, default=4)
parser.add_argument("--device", type=str, default="cpu")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
6583481 | """A ``tesliper``'s main data storage."""
import logging as lgg
from collections import Counter, OrderedDict
from collections.abc import ItemsView, KeysView, ValuesView
from contextlib import contextmanager
from inspect import Parameter
from itertools import chain
from reprlib import recursive_repr
from typing import Callable, Iterable, Optional, Sequence, Union
import numpy as np
from tesliper.exceptions import InconsistentDataError, TesliperError
from .. import datawork as dw
from . import arrays as ar
from .array_base import _ARRAY_CONSTRUCTORS
# LOGGER
logger = lgg.getLogger(__name__)
logger.setLevel(lgg.DEBUG)
# TYPE HINTS
AnyArray = Union[
ar.DataArray,
ar.Energies,
ar.FloatArray,
ar.FilenamesArray,
ar.InfoArray,
ar.BooleanArray,
ar.IntegerArray,
ar.Bands,
ar.VibrationalData,
ar.ScatteringData,
ar.ElectronicData,
ar.VibrationalActivities,
ar.ScatteringActivities,
ar.ElectronicActivities,
ar.Transitions,
ar.Geometry,
]
# CLASSES
class _KeptItemsView(ItemsView):
def __init__(self, mapping, indices=False):
super().__init__(mapping)
self.indices = indices
def __contains__(self, item):
key, value = item
try:
kept = self._mapping.kept[self._mapping.index_of(key)]
except KeyError:
return False
else:
if not kept:
return False
else:
v = self._mapping[key]
return v is value or v == value
def __iter__(self):
indices = self.indices
for idx, (key, kept) in enumerate(zip(self._mapping, self._mapping.kept)):
if kept:
value = self._mapping[key]
yield key, value if not indices else (idx, key, value)
def __reversed__(self):
yield from iter(reversed(list(self)))
class _KeptValuesView(ValuesView):
def __init__(self, mapping, indices=False):
super().__init__(mapping)
self.indices = indices
def __contains__(self, value):
for key, kept in zip(self._mapping, self._mapping.kept):
v = self._mapping[key]
if (v is value or v == value) and kept:
return True
return False
def __iter__(self):
indices = self.indices
for idx, (key, kept) in enumerate(zip(self._mapping, self._mapping.kept)):
if kept:
value = self._mapping[key]
yield value if not indices else (idx, value)
def __reversed__(self):
yield from iter(reversed(list(self)))
class _KeptKeysView(KeysView):
def __init__(self, mapping, indices=False):
super().__init__(mapping)
self.indices = indices
def __contains__(self, key):
try:
return self._mapping.kept[self._mapping.index_of(key)]
except KeyError:
return False
def __iter__(self):
indices = self.indices
for idx, (key, kept) in enumerate(zip(self._mapping, self._mapping.kept)):
if kept:
yield key if not indices else (idx, key)
def __reversed__(self):
yield from iter(reversed(list(self)))
class Conformers(OrderedDict):
"""Container for data extracted from quantum chemical software output files.
Data for each file is stored in the underlying OrderedDict, under the key of
said file's name. Its values are dictionaries with genres name (as key)
and appropriate data pairs. Beside this, its essential functionality is
transformation of stored data to corresponding DataArray objects with
use of :meth:`.arrayed` method. It provides some control over this transformation,
especially in terms of including/excluding particular conformers' data
on creation of new DataArray instance. This type of control is here called
trimming. Trimming can be achieved by use of various *trim* methods defined
in this class or by direct changes to :attr:`.kept` attribute. See its
documentation for more information.
Attributes
----------
primary_genres
Class attribute. Data genres considered most important, used as default when
checking for conformers completeness (see :meth:`.trim_incomplete` method).
Notes
-----
Inherits from collections.OrderedDict.
"""
primary_genres = tuple(
"dip rot vosc vrot losc lrot raman1 roa1 scf zpe ent ten gib".split()
)
def __init__(
self,
*args,
allow_data_inconsistency: bool = False,
temperature_of_the_system: float = 298.15,
**kwargs,
):
"""
Parameters
----------
*args
list of arguments for creation of underlying dictionary
allow_data_inconsistency : bool, optional
specifies if data inconsistency should be allowed in created DataArray
object instances, defaults to False
temperature_of_the_system : float, optional
Temperature of the system in Kelvin units, must be zero or higher.
Defaults to room temperature = 298.15 K.
**kwargs
list of arbitrary keyword arguments for creation of underlying
dictionary
"""
self.allow_data_inconsistency = allow_data_inconsistency
self.temperature = temperature_of_the_system
self.kept = []
self.filenames = []
self._indices = {}
super().__init__(*args, **kwargs)
@property
def temperature(self) -> float:
"""Temperature of the system expressed in Kelvin units.
Value of this parameter is passed to :term:`data array`\\s created with the
:meth:`.arrayed` method, provided that the target data array class supports a
parameter named *t* in it's constructor.
.. versionadded:: 0.9.1
Raises
------
ValueError
if set to a value lower than zero.
"""
return vars(self)["temperature"]
@temperature.setter
def temperature(self, value):
if value <= 0:
raise ValueError(
"Temperature of the system must be higher than absolute zero."
)
vars(self)["temperature"] = value
def clear(self):
"""Remove all items from the Conformers instance."""
self._kept = []
self.filenames = []
self._indices = {}
self.temperature = 298.15
super().clear()
@recursive_repr()
def __repr__(self) -> str:
if not self:
return (
f"{self.__class__.__name__}("
f"allow_data_inconsistency={self.allow_data_inconsistency})"
)
return (
f"{self.__class__.__name__}({list(self.items())}, "
f"allow_data_inconsistency={self.allow_data_inconsistency})"
)
def __setitem__(self, key, value):
try:
value = dict(value)
except TypeError as error:
raise TypeError("Can't convert given value to dictionary.") from error
except ValueError as error:
raise ValueError("Can't convert given value to dictionary.") from error
if key in self:
index = self._indices[key]
else:
index = len(self.filenames)
self.filenames.append(key)
self.kept.append(True)
super().__setitem__(key, value)
self._indices[key] = index
def __delitem__(self, key):
index = self._indices[key]
super().__delitem__(key)
del self.filenames[index]
del self.kept[index]
del self._indices[key]
for index, key in enumerate(self.keys()):
self._indices[key] = index
def popitem(self, last=True):
"""Remove and return a (key, value) pair from the dictionary.
Pairs are returned in LIFO order if last is true or FIFO order if false.
"""
idx = -1 if last else 0
try:
key = self.filenames[idx]
return key, self.pop(key)
except IndexError:
raise KeyError("Conforemres is empty.")
def move_to_end(self, key, last=True):
"""Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
"""
idx = self.index_of(key)
new_idx = 0 if not last else len(self.filenames) - 1
super().move_to_end(key, last)
self.filenames.insert(new_idx, self.filenames.pop(idx))
self._kept.insert(new_idx, self._kept.pop(idx))
self._indices = {k: i for i, k in enumerate(self.filenames)}
def copy(self):
"conformers.copy() -> a shallow copy of conformers"
cp = self.__class__(
allow_data_inconsistency=self.allow_data_inconsistency,
temperature_of_the_system=self.temperature,
**self,
)
cp.kept = self.kept
return cp
@property
def kept(self):
"""List of booleans, one for each conformer stored, defining if
particular conformers data should be included in corresponding DataArray
instance, created by :meth:`.arrayed` method. It may be changed by use of trim
methods, by setting its value directly, or by modification of the
underlying list. For the first option refer to those methods
documentation, for rest see the Examples section.
Returns
-------
list of bool
List of booleans, one for each conformer stored, defining if
particular conformers data should be included in corresponding
DataArray instance.
Raises
------
TypeError
If assigned values is not a sequence.
If elements of given sequence are not one of types: bool, int, str.
ValuesError
If number of given boolean values doesn't match number of contained
conformers.
KeyError
If any of given string values is not in underlying dictionary keys.
IndexError
If any of given integer values is not in range
0 <= i < number of conformers.
Examples
--------
New list of values can be set in a few ways. Firstly, it is the
most straightforward to just assign a new list of boolean values to
the :attr:`.kept` attribute. This list should have the same number of elements
as the number of conformers contained. A ValueError is raised if it
doesn't.
>>> c = Conformers(one={}, two={}, tree={})
>>> c.kept
[True, True, True]
>>> c.kept = [False, True, False]
>>> c.kept
[False, True, False]
>>> c.kept = [False, True, False, True]
Traceback (most recent call last):
...
ValueError: Must provide boolean value for each known conformer.
4 values provided, 3 excepted.
Secondly, list of filenames of conformers intended to be kept may be
given. Only these conformers will be kept. If given filename is not in
the underlying Conformers' dictionary, KeyError is raised.
>>> c.kept = ['one']
>>> c.kept
[True, False, False]
>>> c.kept = ['two', 'other']
Traceback (most recent call last):
...
KeyError: Unknown conformers: other.
Thirdly, list of integers representing conformers indices may be given.
Only conformers with specified indices will be kept. If one of given integers
can't be translated to conformer's index, IndexError is raised. Indexing with
negative values is not supported currently.
>>> c.kept = [1, 2]
>>> c.kept
[False, True, True]
>>> c.kept = [2, 3]
Traceback (most recent call last):
...
IndexError: Indexes out of bounds: 3.
Fourthly, assigning ``True`` or ``False`` to this attribute will mark all
conformers as kept or not kept respectively.
>>> c.kept = False
>>> c.kept
[False, False, False]
>>> c.kept = True
>>> c.kept
[True, True, True]
Lastly, list of kept values may be modified by setting its elements
to True or False. It is advised against, however, as mistake such as
``c.kept[:2] = [True, False, False]`` will break some functionality by
forcibly changing size of :attr:`.kept` list.
Notes
-----
Type of the first element of given sequence is used for dynamic
dispatch.
"""
# TODO: Consider making return value immutable.
return self._kept
@kept.setter
def kept(self, blade: Union[Sequence[bool], Sequence[str], Sequence[int], bool]):
if blade is True or blade is False:
self._kept = [blade for _ in self.keys()]
return
try:
first = blade[0]
except (TypeError, KeyError):
raise TypeError(f"Excepted sequence or boolean, got: {type(blade)}.")
except IndexError:
self._kept = [False for _ in self.keys()]
return # empty sequence is understood as "keep nothing"
if isinstance(first, (str, np.str_)):
blade = set(blade)
if not blade.issubset(self.keys()):
raise KeyError(f"Unknown conformers: {', '.join(blade-self.keys())}")
else:
self._kept = [fnm in blade for fnm in self.keys()]
elif isinstance(first, (bool, np.bool_)):
if not len(blade) == len(self):
raise ValueError(
f"Must provide boolean value for each known conformer. "
f"{len(blade)} values provided, {len(self)} excepted."
)
else:
self._kept = [bool(b) for b in blade] # convert from np.bool_
elif isinstance(first, (int, np.integer)):
length = len(self)
out_of_bounds = [b for b in blade if not 0 <= b < length]
if out_of_bounds:
raise IndexError(
f"Indexes out of bounds: "
f"{', '.join(str(n) for n in out_of_bounds)}."
)
else:
blade = set(blade)
self._kept = [num in blade for num in range(len(self))]
else:
raise TypeError(
f"Expected sequence of strings, integers or booleans, got: "
f"{type(first)} as first sequence's element."
)
def update(self, other=None, **kwargs):
"""Works like ``dict.update``, but if key is already present, it updates
dictionary associated with given key rather than assigning new value.
Keys of dictionary passed as positional parameter (or additional keyword
arguments given) should be conformers' identifiers and its values should be
dictionaries of {"genre": values} for those conformers.
Please note, that values of status genres like 'optimization_completed'
and 'normal_termination' will be updated as well for such key,
if are present in given new values.
"""
if other is not None:
other = dict(other)
else:
other = dict()
items = chain(other.items(), kwargs.items())
for key, value in items:
if key in self:
self[key].update(value)
else:
self[key] = value
def arrayed(
self, genre: str, full: bool = False, strict: bool = True, **kwargs
) -> AnyArray:
"""Lists requested data and returns as appropriate :class:`.DataArray` instance.
.. versionadded:: 0.9.1
The *strict* parameter.
Parameters
----------
genre
String representing data genre. Must be one of known genres.
full
Boolean indicating if full set of data should be taken, ignoring
any trimming conducted earlier. Defaults to ``False``.
strict
Boolean indicating if additional kwargs that doesn't match signature of data
array's constructor should cause an exception as normally (``strict =
True``) or be silently ignored (``strict = False``). Defaults to ``True``.
kwargs
Additional keyword parameters passed to data array constructor.
Any explicitly given parameters will take precedence over automatically
retrieved and default values.
Returns
-------
DataArray
Arrayed data of desired genre as appropriate :class:`.DataArray` object.
Notes
-----
For now, the special "filenames" genre always ignores *kwargs*.
"""
try:
cls = _ARRAY_CONSTRUCTORS[genre] # ArrayBase subclasses
except KeyError:
raise ValueError(f"Unknown genre '{genre}'.")
if genre == "filenames":
# return early if filenames requested
return cls(
genre=genre,
filenames=list(self.kept_keys() if not full else self.keys()),
allow_data_inconsistency=self.allow_data_inconsistency,
)
view = self.kept_items() if not full else self.items()
array = ((fname, conf, conf[genre]) for fname, conf in view if genre in conf)
try:
filenames, confs, values = zip(*array)
except ValueError: # if no elements in `array`
logger.debug(
f"Array of gerne {genre} requested, but no such data available "
f"or conformers providing this data were trimmed off. "
f"Returning an empty array."
)
filenames, confs, values = [], [], []
default_params = cls.get_init_params()
default_params["genre"] = genre
default_params["filenames"] = filenames
default_params["values"] = values
default_params["allow_data_inconsistency"] = self.allow_data_inconsistency
init_params = {}
for key, value in default_params.items():
if key in kwargs:
# explicitly given keyword parameters take precedence
init_params[key] = kwargs.pop(key)
continue
if key == "t":
# if not given explicitly, temperature is taken form self
init_params[key] = self.temperature
continue
if not isinstance(default_params[key], Parameter):
# if value for parameter is already established, just take it
init_params[key] = value
continue
param_genre = ( # maybe ``key`` is not a param's genre name
value.genre_getter(genre) if hasattr(value, "genre_getter") else key
)
try:
init_params[key] = [conf[param_genre] for conf in confs]
except KeyError:
# can't retrieve ``param_genre`` data from each included conformer
# set param to its default value or raise an error if it don't have one
if value.default is not value.empty:
init_params[key] = value.default
else:
raise TesliperError(
f"One or more conformers does not provide value for "
f"'{param_genre}' genre, needed to instantiate {cls.__name__} "
"object. You may provide missing values as a keyword parameters"
" to the `Conformers.arrayed()` method call."
)
if not init_params[key] and value.default is not value.empty:
# genre produces an empty array, but parameter has default value
init_params[key] = value.default
if kwargs and strict:
# any kwargs not popped till now are not expected by the ``cls.__init__()``
# if ``strict`` handling requested, add them anyway to cause an exception
init_params.update(**kwargs)
return cls(**init_params)
def by_index(self, index: int) -> dict:
"""Returns data for conformer on desired index."""
return self[self.filenames[index]]
def key_of(self, index: int) -> str:
"""Returns name of conformer associated with given index."""
return self.filenames[index]
def index_of(self, key: str) -> int:
"""Return index of given key."""
try:
return self._indices[key]
except KeyError as error:
raise KeyError(f"No such conformer: {key}.") from error
def has_genre(self, genre: str, ignore_trimming: bool = False) -> bool:
"""Checks if any of stored conformers contains data of given genre.
Parameters
----------
genre : str
Name of genre to test.
ignore_trimming : bool
If all known conformers should be considered (``ignore_trimming = True``)
or only kept ones (``ignore_trimming = False``, default).
Returns
-------
bool
Boolean value indicating if any of stored conformers contains data
of genre in question."""
conformers = self.values() if ignore_trimming else self.kept_values()
for conformer in conformers:
if genre in conformer:
return True
return False
def has_any_genre(
self, genres: Iterable[str], ignore_trimming: bool = False
) -> bool:
"""Checks if any of stored conformers contains data of any of given
genres.
Parameters
----------
genres : iterable of str
List of names of genres to test.
ignore_trimming : bool
If all known conformers should be considered (``ignore_trimming = True``)
or only kept ones (``ignore_trimming = False``, default).
Returns
-------
bool
Boolean value indicating if any of stored conformers contains data
of any of genres in question."""
conformers = self.values() if ignore_trimming else self.kept_values()
for conformer in conformers:
for genre in genres:
if genre in conformer:
return True
return False
def all_have_genres(
self, genres: Iterable[str], ignore_trimming: bool = False
) -> bool:
"""Checks if all stored conformers contains data of given genres.
Parameters
----------
genres : iterable of str
List of names of genres to test.
ignore_trimming : bool
If all known conformers should be considered (``ignore_trimming = True``)
or only kept ones (``ignore_trimming = False``, default).
Returns
-------
bool
Boolean value indicating if each stored conformers contains data
of all genres in question."""
genres = set(genres)
conformers = self.values() if ignore_trimming else self.kept_values()
for conformer in conformers:
if genres - conformer.keys():
return False
return True
def trim_incomplete(
self, wanted: Optional[Iterable[str]] = None, strict: bool = False
) -> None:
"""Mark incomplete conformers as "not kept".
Conformers that does not contain one or more data genres specified as *wanted*
will be marked as "not kept". If *wanted* parameter is not given, it evaluates
to :attr:`.primary_genres`. If no conformer contains all *wanted* genres,
conformers that match the specification most closely are kept. The "closeness"
is defined by number of conformer's genres matching *wanted* genres in the first
place (the more, the better) and the position of particular genre in *wanted*
list in the second place (the closer to the beginning, the better). This
"match closest" behaviour may be turned off by setting parameter
*strict* to ``True``. In such case, only conformers containing all *wanted*
genres will be kept.
Parameters
----------
wanted
List of data genres used as completeness reference.
If not given, evaluates to :attr:`.primary_genres`.
strict
Indicates if all *wanted* genres must be present in the kept conformers
(``strict=True``) or if "match closest" mechanism should be used
as a fallback (``strict=False``, this is the default).
Notes
-----
Conformers previously marked as "not kept" will not be affected.
"""
wanted = wanted if wanted is not None else self.primary_genres
if not strict:
count = [tuple(g in conf for g in wanted) for conf in self.values()]
if not count:
return
best_match = max(count)
complete = (match == best_match for match in count)
else:
complete = (all(g in conf for g in wanted) for conf in self.values())
blade = [kept and cmpl for kept, cmpl in zip(self.kept, complete)]
self._kept = blade
def trim_imaginary_frequencies(self) -> None:
"""Mark all conformers with imaginary frequencies as "not kept".
Notes
-----
Conformers previously marked as "not kept" will not be affected.
Conformers that doesn't contain "freq" genre will be treated as not having
imaginary frequencies.
"""
dummy = [1]
for index, conf in enumerate(self.values()):
freq = np.array(conf.get("freq", dummy))
if (freq < 0).any():
self._kept[index] = False
def trim_non_matching_stoichiometry(self, wanted: Optional[str] = None) -> None:
"""Mark all conformers with stoichiometry other than *wanted* as "not kept".
If not given, *wanted* evaluates to the most common stoichiometry.
Parameters
----------
wanted
Only conformers with same stoichiometry will be kept. Evaluates to the most
common stoichiometry if not given.
Notes
-----
Conformers previously marked as "not kept" will not be affected.
Conformers that doesn't contain stoichiometry data are always treated
as non-matching.
"""
if not wanted:
counter = Counter(
conf["stoichiometry"]
for conf in self.values()
if "stoichiometry" in conf
)
counts = counter.most_common()
wanted = counts[0][0] if counts else "" # no conformer has "stoichiometry"
for index, conf in enumerate(self.values()):
if "stoichiometry" not in conf or not conf["stoichiometry"] == wanted:
self._kept[index] = False
def trim_not_optimized(self) -> None:
"""Mark all conformers that failed structure optimization as "not kept".
Notes
-----
Conformers previously marked as "not kept" will not be affected.
Conformers that doesn't contain optimization data are always treated as
optimized.
"""
for index, conf in enumerate(self.values()):
if not conf.get("optimization_completed", True):
self._kept[index] = False
def trim_non_normal_termination(self) -> None:
"""Mark all conformers, which calculation job did not terminate normally,
as "not kept".
Notes
-----
Conformers previously marked as "not kept" will not be affected.
Conformers that doesn't contain data regarding their calculation job's
termination are always treated as terminated abnormally.
"""
for index, conf in enumerate(self.values()):
if not conf.get("normal_termination", False):
self._kept[index] = False
def trim_inconsistent_sizes(self) -> None:
"""Mark as "not kept" all conformers that contain any iterable data genre,
that is of different length, than in case of majority of conformers.
Examples
--------
>>> c = Conformers(
... one={'a': [1, 2, 3]},
... two={'a': [1, 2, 3]},
... three={'a': [1, 2, 3, 4]}
... )
>>> c.kept
[True, True, True]
>>> c.trim_inconsistent_sizes()
>>> c.kept
[True, True, False]
Notes
-----
Conformers previously marked as "not kept" will not be affected.
"""
sizes = {}
for fname, conf in self.items():
for genre, value in conf.items():
if isinstance(value, (np.ndarray, list, tuple)):
sizes.setdefault(genre, {})[fname] = len(value)
maxes = {
genre: Counter(v for v in values.values()).most_common()[0][0]
for genre, values in sizes.items()
}
for index, fname in enumerate(self.keys()):
for genre, most_common in maxes.items():
confs = sizes[genre]
if fname in confs and not confs[fname] == most_common:
self._kept[index] = False
def trim_to_range(
self,
genre: str,
minimum: Union[int, float] = float("-inf"),
maximum: Union[int, float] = float("inf"),
attribute: str = "values",
) -> None:
"""Marks as "not kept" all conformers, which numeric value of data
of specified genre is outside of the range specified by *minimum*
and *maximum* values.
Parameters
----------
genre
Name of genre that should be compared to specified
minimum and maximum values.
minimum
Minimal accepted value - every conformer, which genre value evaluates
to less than *minimum* will be marked as "not kept".
Defaults to ``float(-inf)``.
maximum
Maximal accepted value - every conformer, which genre value evaluates
to more than *maximum* will be marked as "not kept".
Defaults to ``float(inf)``.
attribute
Attribute of DataArray of specified *genre* that contains one-dimensional
array of numeric values. defaults to `"values"`.
Raises
------
AttributeError
If DataArray associated with *genre* genre has no attribute *attribute*.
ValueError
If data retrieved from specified genre's attribute is not in the form of
one-dimensional array.
TypeError
If comparision cannot be made between elements of specified genre's
attribute and *minimum* or *maximum* values.
Notes
-----
Conformers previously marked as "not kept" will not be affected.
"""
try:
arr = self.arrayed(genre)
atr = getattr(arr, attribute)
except AttributeError as error:
raise AttributeError(
f"Invalid genre/attribute combination: {genre}/{attribute}. "
f"Resulting DataArray object has no attribute {attribute}."
) from error
values = np.asarray(atr)
if values.ndim != 1:
raise ValueError(
f"Invalid genre/attribute combination: {genre}/{attribute}. "
f"DataArray's attribute must contain one-dimensional array of values."
)
try:
in_range = (minimum <= values) & (values <= maximum)
except TypeError as error:
raise TypeError(
f"Cannot compare {type(minimum)} with {type(values[0])}."
) from error
self.kept = arr.filenames[in_range]
def trim_rmsd(
self,
threshold: Union[int, float],
window_size: Optional[Union[int, float]],
geometry_genre: str = "last_read_geom",
energy_genre: str = "scf",
ignore_hydrogen: bool = True,
moving_window_strategy: Callable = dw.stretching_windows,
) -> None:
"""Marks as "not kept" all conformers that are identical with some other
conformer, judging by a provided RMSD threshold.
To minimize computation cost, conformers are compared inside windows, that is a
subsets of the original list of conformers. Those windows are generated by the
*moving_window_strategy* function. The recommended strategy, and a default
value, is :func:`.streaching_windows`, but other are also available:
:func:`.fixed_windows` and :func:`.pyramid_windows`. This function will be
called with list of energies for conformers compared and (if it is not ``None``)
*window_size* parameter.
With default *moving_window_strategy* conformers, which energy difference (dE)
is higher than given *window_size* are always treated as different, while those
with dE smaller than *window_size* and RMSD value smaller than given *threshold*
are considered identical. From two identical conformers, the one with lower
energy is "kept", and the other is discarded (marked as "not kept").
Notes
-----
RMSD threshold and size of the energy window should be chosen depending on the
parameters of conformers' set: number of conformers, size of the conformer,
its lability, etc. However, *threshold* of 0.5 angstrom and *window_size*
of 5 to 10 kcal/mol is a good place to start if in doubt.
Parameters
----------
threshold : int or float
Maximum RMSD value to consider conformers identical.
window_size : int or float
Size of the energy window, in kcal/mol, inside which RMSD matrix is
calculated. Essentially, a difference in conformers' energy, after which
conformers are always considered different.
geometry_genre : str
Genre of geometry used to calculate RMSD matrix. "last_read_geom" is
default.
energy_genre : str
Genre of energy used to sort and group conformers into windows of given
energy size. "scf" is used by default.
ignore_hydrogen : bool
If hydrogen atom should be discarded before RMSD calculation.
Defaults to ``True``.
moving_window_strategy : callable
Function that generates windows, inside which RMSD comparisions is
performed.
Raises
------
InconsistentDataError
If requested genres does not provide the same set of conformers.
ValueError
When called with ``ignore_hydrogen=True`` but requested
:attr:`.Geometry.atoms` cannot be collapsed to 1-D array.
"""
energy = self.arrayed(energy_genre)
geometry = self.arrayed(geometry_genre)
if not energy.filenames.size == geometry.filenames.size:
raise InconsistentDataError(
"Unequal number of conformers in requested geometry and energy genres. "
"Trim incomplete entries before trimming with :meth:`.trim_rmds`."
)
elif not np.array_equal(energy.filenames, geometry.filenames):
raise InconsistentDataError(
"Different conformers in requested geometry and energy genres. "
"Trim incomplete entries before trimming with :meth:`.trim_rmds`."
)
if not geometry:
return # next steps assume there are some conformers
if ignore_hydrogen and geometry.atoms.shape[0] > 1:
# TODO: remove when dw.geometry.select_atoms supplemented
raise ValueError(
"Cannot ignore hydrogen atoms if requested conformers do not have "
"the same order of atoms. This functionality is not supported yet."
)
geom = (
dw.drop_atoms(geometry.values, geometry.atoms[0], dw.atoms.Atom.H)
if ignore_hydrogen
else geometry.values
)
if window_size is None:
windows = moving_window_strategy(energy.as_kcal_per_mol)
else:
windows = moving_window_strategy(energy.as_kcal_per_mol, window_size)
wanted = dw.rmsd_sieve(geom, windows, threshold)
self.kept = geometry.filenames[wanted]
def select_all(self) -> None:
"""Marks all conformers as 'kept'. Equivalent to ``conformers.kept = True``."""
self._kept = [True for _ in self._kept]
def reject_all(self) -> None:
"""Marks all conformers as 'not kept'. Equivalent to
``conformers.kept = False``.
"""
self._kept = [False for _ in self._kept]
def kept_keys(self, indices: bool = False) -> _KeptKeysView:
"""Equivalent of ``dict.keys()`` but gives view only on conformers marked
as "kept". Returned view may also provide information on conformers index
in its Conformers instance if requested with ``indices=True``.
>>> c = Conformers(c1={"g": 0.1}, c2={"g": 0.2}, c3={"g": 0.3}}
>>> c.kept = [True, False, True]
>>> list(c.kept_keys())
["c1", "c3"]
>>> list(c.kept_keys(indices=True))
[(0, "c1"}), (2, "c3")]
Parameters
----------
indices : bool
If resulting Conformers view should also provide index of each conformer.
Defaults to False.
Returns
-------
_KeptKeysView
View of kept conformers.
"""
return _KeptKeysView(self, indices=indices)
def kept_values(self, indices: bool = False) -> _KeptValuesView:
"""Equivalent of ``dict.values()`` but gives view only on conformers marked
as "kept". Returned view may also provide information on conformers index
in its Conformers instance if requested with ``indices=True``.
>>> c = Conformers(c1={"g": 0.1}, c2={"g": 0.2}, c3={"g": 0.3}}
>>> c.kept = [True, False, True]
>>> list(c.kept_values())
[{"g": 0.1}, {"g": 0.3}]
>>> list(c.kept_values(indices=True))
[(0, {"g": 0.1}), (2, {"g": 0.3})]
Parameters
----------
indices : bool
If resulting Conformers view should also provide index of each conformer.
Defaults to False.
Returns
-------
_KeptValuesView
View of kept conformers.
"""
return _KeptValuesView(self, indices=indices)
def kept_items(self, indices: bool = False) -> _KeptItemsView:
"""Equivalent of ``dict.items()`` but gives view only on conformers marked
as "kept". Returned view may also provide information on conformers index
in its Conformers instance if requested with ``indices=True``.
>>> c = Conformers(c1={"g": 0.1}, c2={"g": 0.2}, c3={"g": 0.3}}
>>> c.kept = [True, False, True]
>>> list(c.kept_items())
[("c1", {"g": 0.1}), ("c3", {"g": 0.3})]
>>> list(c.kept_items(indices=True))
[(0, "c1", {"g": 0.1}), (2, "c3", {"g": 0.3})]
Parameters
----------
indices : bool
If resulting Conformers view should also provide index of each conformer.
Defaults to False.
Returns
-------
_KeptItemsView
View of kept conformers.
"""
return _KeptItemsView(self, indices=indices)
@property
@contextmanager
def untrimmed(self) -> "Conformers":
"""Temporally remove trimming. Implemented as context manager to use with
python's 'with' keyword.
Examples
--------
>>> c = Conformers(one={}, two={}, tree={})
>>> c.kept = [False, True, False]
>>> with c.untrimmed:
>>> c.kept
[True, True, True]
>>> c.kept
[False, True, False]
"""
blade = self._kept
self.kept = True
yield self
self._kept = blade
@contextmanager
def trimmed_to(
self, blade: Union[Sequence[bool], Sequence[str], Sequence[int], bool]
) -> "Conformers":
"""Temporally set trimming blade to given one. Implemented as context manager
to use with python's 'with' keyword.
Parameters
----------
blade : bool or sequence of bool, str, or int
Temporary trimming blade. To better understand how blade setting works,
see Conformers.kept documentation.
Examples
--------
>>> c = Conformers(one={}, two={}, tree={})
>>> c.kept = [True, True, False]
>>> with c.trimmed_to([1, 2]):
>>> c.kept
[False, True, True]
>>> c.kept
[True, True, False]
"""
old_blade = self._kept
self.kept = blade
yield self
self._kept = old_blade
@property
@contextmanager
def inconsistency_allowed(self) -> "Conformers":
"""Temporally sets Conformers' 'allow_data_inconsistency' attribute
to true. Implemented as context manager to use with python's 'with' keyword.
Examples
--------
>>> c = Conformers(...)
>>> with c.inconsistency_allowed:
>>> # do stuff here while c.allow_data_inconsistency is True
>>> c.allow_data_inconsistency
True
>>> c.allow_data_inconsistency
False
"""
inconsistency = self.allow_data_inconsistency
self.allow_data_inconsistency = True
yield self
self.allow_data_inconsistency = inconsistency
| StarcoderdataPython |
11356560 | """Test the refinement routines in RBFOpt.
This module contains unit tests for the module rbfopt_refinement.
Licensed under Revised BSD license, see LICENSE.
(C) Copyright International Business Machines Corporation 2017.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import rbfopt
import rbfopt.rbfopt_utils as ru
import rbfopt.rbfopt_refinement as ref
from rbfopt.rbfopt_settings import RbfoptSettings
import numpy as np
def dist(a, b):
"""Distance function, for convenience reimplemented here.
"""
return np.sqrt(np.dot(a-b, a-b))
class TestRefinement(unittest.TestCase):
"""Test the rbfopt_refinement module."""
def setUp(self):
"""Create data for subsequent tests."""
np.random.seed(71294123)
self.n = 3
self.k = 10
self.var_lower = np.array([i for i in range(self.n)])
self.var_upper = np.array([i + 10 for i in range(self.n)])
self.node_pos = np.array([self.var_lower, self.var_upper,
[1, 2, 3], [9, 5, 8.8], [5.5, 7, 12],
[3.2, 10.2, 4], [2.1, 1.1, 7.4],
[6.6, 9.1, 2.0], [10, 8.8, 11.1],
[7, 7, 7]])
self.node_val = np.array([2*i for i in range(self.k)])
self.integer_vars = np.array([0, 2])
# Compute maximum distance between nodes
max_dist = 0
for node1 in self.node_pos:
for node2 in self.node_pos:
max_dist = max(max_dist, dist(node1, node2))
self.max_dist = max_dist
# -- end function
def test_init_refinement(self):
"""Test the init_refinement function.
"""
settings = RbfoptSettings()
# Compute maximum distance between nodes
for k in range(2, self.k):
model_set, radius = ref.init_refinement(settings, self.n, k,
self.node_pos[:k],
self.node_pos[k-1])
self.assertEqual(len(model_set), min(k, self.n + 1),
msg='Wrong size of model set')
self.assertLessEqual(radius, self.max_dist)
# -- end function
def test_get_linear_model(self):
"""Test the get_linear_model function.
"""
settings = RbfoptSettings()
model_set = np.arange(self.k)
for i in range(5):
h = np.random.rand(self.n)
b = np.random.rand()
node_val = (np.dot(h, self.node_pos.T)).T + b
hm, bm, rank_def = ref.get_linear_model(
settings, self.n, self.k, self.node_pos, node_val, model_set)
self.assertAlmostEqual(dist(h, hm), 0,
msg='Wrong linear part of linear model')
self.assertAlmostEqual(b - bm, 0,
msg='Wrong constant part of linear model')
# -- end function
def test_get_candidate_point(self):
"""Test the get_candidate_point function.
"""
settings = RbfoptSettings()
for i in range(self.k):
h = np.random.rand(self.n)
b = np.random.rand()
ref_radius = np.random.uniform(self.max_dist/2)
point, diff, grad_norm = ref.get_candidate_point(
settings, self.n, self.k, self.var_lower, self.var_upper,
h, self.node_pos[i], ref_radius)
self.assertGreaterEqual(np.dot(h, self.node_pos[i]),
np.dot(h, point),
msg='Function value did not decrease')
self.assertLessEqual(dist(self.node_pos[i], point),
ref_radius + 1.0e-6,
msg='Point moved too far')
self.assertAlmostEqual(diff, np.dot(h, self.node_pos[i] - point),
msg='Wrong model difference estimate')
self.assertAlmostEqual(grad_norm, dist(h, np.zeros(self.n)),
msg='Wrong gradient norm')
for j in range(self.n):
self.assertLessEqual(self.var_lower[j], point[j],
msg='Point outside bounds')
self.assertGreaterEqual(self.var_upper[j], point[j],
msg='Point outside bounds')
# -- end function
def test_get_integer_candidate(self):
"""Test the get_integer_candidate function.
"""
settings = RbfoptSettings()
model_set = np.arange(self.k)
for i in range(self.k):
h = np.random.rand(self.n)
b = np.random.rand()
ref_radius = np.random.uniform(self.max_dist/2)
candidate, diff, grad_norm = ref.get_candidate_point(
settings, self.n, self.k, self.var_lower, self.var_upper,
h, self.node_pos[i], ref_radius)
point, diff = ref.get_integer_candidate(
settings, self.n, self.k, h, self.node_pos[i],
ref_radius, candidate, self.integer_vars, None)
self.assertAlmostEqual(diff, np.dot(h, candidate - point),
msg='Wrong model difference estimate')
for j in range(self.n):
self.assertLessEqual(self.var_lower[j], point[j],
msg='Point outside bounds')
self.assertGreaterEqual(self.var_upper[j], point[j],
msg='Point outside bounds')
for j in self.integer_vars:
self.assertEqual(np.floor(point[j] + 0.5), int(point[j]),
msg='Point is not integer')
for i in range(5):
n = np.random.randint(3, 11)
h = np.random.rand(n)
b = np.random.rand()
ref_radius = np.random.uniform(2, 5)
k = np.random.randint(10, 20)
node_pos = np.random.randint(0, 2, size=(k, n))
var_lower = np.array([0] * n)
var_upper = np.array([1] * n)
categorical_info = (np.array([0]), np.array([1, 2]),
[(0, 0, np.array([i for i in range(2, n)]))])
integer_vars = np.array([i for i in range(2, n)])
candidate, diff, grad_norm = ref.get_candidate_point(
settings, n, k, var_lower, var_upper,
h, node_pos[0], ref_radius)
point, diff = ref.get_integer_candidate(
settings, n, k, h, node_pos[0],
ref_radius, candidate, integer_vars, categorical_info)
self.assertAlmostEqual(diff, np.dot(h, candidate - point),
msg='Wrong model difference estimate')
for j in range(n):
self.assertLessEqual(var_lower[j], point[j],
msg='Point outside bounds')
self.assertGreaterEqual(var_upper[j], point[j],
msg='Point outside bounds')
for j in integer_vars:
self.assertAlmostEqual(point[j]-int(round(point[j])), 0,
msg='Point is not integer')
# -- end function
def test_get_model_improving_point(self):
"""Test the get_model_improving_point function.
"""
settings = RbfoptSettings()
n = 6
model_set = np.arange(n+1)
ref_radius = 1
integer_vars = np.arange(n)
var_lower = np.zeros(n)
var_upper = 10*np.ones(n)
for i in range(n):
node_pos = np.vstack((np.eye(n), np.eye(n)[i, :]))
point, success, to_replace = ref.get_model_improving_point(
settings, n, n+1, var_lower, var_upper,
node_pos, model_set, i, ref_radius, integer_vars, None)
self.assertTrue(success,
msg='Model improvement was not successful')
self.assertTrue(to_replace == n - 1,
msg='Wrong point to be replaced')
for j in range(n):
self.assertLessEqual(var_lower[j], point[j],
msg='Point outside bounds')
self.assertGreaterEqual(var_upper[j], point[j],
msg='Point outside bounds')
# -- end function
def test_update_refinement_radius(self):
"""Test the update_refinement_radius function.
"""
settings = RbfoptSettings()
model_diff = 10.0
ref_radius = 1.0
new_ref_radius, move = ref.update_refinement_radius(
settings, ref_radius, model_diff,
model_diff * settings.ref_acceptable_decrease_shrink - 1.0e-3)
self.assertLess(new_ref_radius, ref_radius,
msg='Trust region radius did not decrease')
new_ref_radius, move = ref.update_refinement_radius(
settings, ref_radius, model_diff,
model_diff * settings.ref_acceptable_decrease_enlarge + 1.0e-3)
self.assertGreater(new_ref_radius, ref_radius,
msg='Trust region radius did not increase')
new_ref_radius, move = ref.update_refinement_radius(
settings, ref_radius, model_diff,
model_diff * settings.ref_acceptable_decrease_move + 1.0e-3)
self.assertTrue(move, msg='Candidate point did not move')
# -- end function
# -- end class
if (__name__ == '__main__'):
unittest.main()
| StarcoderdataPython |
11393364 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, <NAME>
# Distributed under the (new) BSD License.
#
# Contributors: <NAME> (<EMAIL>)
# -----------------------------------------------------------------------------
# References:
#
# * Interaction between cognitive and motor cortico-basal ganglia loops during
# decision making: a computational study. <NAME>, <NAME>, <NAME>,
# and <NAME>. Journal of Neurophysiology, 109:3025–3040, 2013.
# -----------------------------------------------------------------------------
import numpy as np
from model import *
from display import *
def debug(time, cues, choice, reward):
n = len(cues)
cues = np.sort(cues)
R.append(reward)
if choice == cues[0]:
P.append(1)
else:
P.append(0)
print "Choice: ",
for i in range(n):
if choice == cues[i]:
print "[%d]" % cues[i],
else:
print "%d" % cues[i],
if i < (n-1):
print "/",
if choice == cues[0]:
print " (good)"
else:
print " (bad)"
print "Reward (%3d%%) : %d" % (int(100*CUE["reward"][choice]),reward)
print "Mean performance: %.3f" % np.array(P).mean()
print "Mean reward: %.3f" % np.array(R).mean()
print "Response time: %d ms" % (time)
print "CTX.cog->CTX.ass:", connections["CTX.cog -> CTX.ass"].weights
print
n_experiments = 250
n_trials = 120
P = np.zeros((n_experiments,n_trials))
filename = "%d-experiments-%d-trials-performances.npy" % (n_experiments, n_trials)
# Put 1 if you want to run a new set of experiments
if 0:
for k in range(n_experiments):
reset()
p = []
for j in range(n_trials):
reset_activities()
# Settling phase (500ms)
for i in xrange(0,500):
iterate(dt)
# Trial setup
set_trial(n=2)
# Learning phase (2500ms)
for i in xrange(500,3000):
iterate(dt)
# Test if a decision has been made
if CTX.mot.delta > decision_threshold:
cues, choice, reward = process(n=2, learning=True)
cues = np.sort(cues)
if choice == cues[0]:
p.append(1)
else:
p.append(0)
P[k,j] = np.mean(p)
break
print "Experiment %d: %g" % (k,np.mean(p))
np.save(filename, P)
P = np.load(filename)
from matplotlib import rcParams
rcParams['xtick.direction'] = 'out'
rcParams['ytick.direction'] = 'out'
plt.figure(figsize=(12,6), dpi=72, facecolor="white")
axes = plt.subplot(111)
axes.spines['right'].set_color('none')
axes.spines['top'].set_color('none')
axes.xaxis.set_ticks_position('bottom')
axes.spines['bottom'].set_position(('data',0))
axes.yaxis.set_ticks_position('left')
p = np.round(np.mean(P,axis=0)[-1],2)
axes.axhline(p, color = '.25', linewidth=.5, linestyle="-")
axes.axhline(.5, color = '.5', linewidth=1, linestyle="--")
axes.text(120,.51,"Chance level", ha="right", va="bottom", color='.5', fontsize=16)
axes.plot(1+np.arange(n_trials), np.mean(P,axis=0), lw=2, c='k')
yticks = np.sort(np.append(np.linspace(0,1,6,endpoint=True), p))
plt.yticks(yticks)
plt.xlabel("Trial number")
plt.xlim(0,120)
plt.ylabel("Proportion of optimum trials")
plt.ylim(0,1)
plt.show()
| StarcoderdataPython |
191104 | from pathlib import Path
import pytest
@pytest.fixture(autouse=True)
def tests_directory() -> Path:
return Path(__file__).parent
@pytest.fixture(autouse=True)
def project_root_directory() -> Path:
return Path(__file__).parent.parent
@pytest.fixture(autouse=True)
def test_root_directory(request) -> Path:
return Path(request.config.rootdir)
| StarcoderdataPython |
6491049 | <filename>specs/secure/scanning/scanning_vulnerability_spec.py
import os
from expects import equal, expect, have_keys
from mamba import before, context, description, it
from sdcclient import SdScanningClient
from specs import be_successful_api_call
with description("Scanning vulnerability details", "integration") as self:
with before.each:
self.client = SdScanningClient(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"),
token=os.getenv("SDC_SECURE_TOKEN"))
with context("when retrieving a simple vulnerability"):
with it("retrieves the vulnerability details correctly if exists"):
vuln_id = "VULNDB-140292"
ok, res = self.client.get_vulnerability_details(id=vuln_id)
expect((ok, res)).to(be_successful_api_call)
expect(res).to(
have_keys("description", "severity", "vendor_data", "nvd_data", "references",
"affected_packages", id=equal(vuln_id))
)
with it("fails if it does not exist"):
non_existing_vuln_id = "VULNDB-NOEXISTS"
ok, res = self.client.get_vulnerability_details(id=non_existing_vuln_id)
expect((ok, res)).to_not(be_successful_api_call)
expect(res).to(equal(f"Vulnerability {non_existing_vuln_id} was not found"))
with it("fails if no id was provided"):
non_existing_vuln_id = None
ok, res = self.client.get_vulnerability_details(id=non_existing_vuln_id)
expect((ok, res)).to_not(be_successful_api_call)
expect(res).to(equal("No vulnerability ID provided"))
| StarcoderdataPython |
3226002 | <reponame>Bhaskers-Blu-Org1/SPAHM
import numpy as np
import sys
from sklearn.metrics.pairwise import euclidean_distances
from scipy.optimize import linear_sum_assignment
def gen_gaus_mixture(centers, mixing_prop=None, noise_sd=0.1, M=5000):
# noise_sd=0.1
# M=5000
# mixing_prop=None
# centers = np.random.normal(0,10,(10,500))
K, D = centers.shape
if mixing_prop is None:
mixing_prop = np.random.dirichlet(np.ones(K))
assignments = np.random.choice(K, size=M, p=mixing_prop)
data_j = np.zeros((M,D))
for k in range(K):
data_j[assignments==k] = np.random.normal(loc=centers[k], scale=noise_sd, size=((assignments==k).sum(),D))
return data_j
def gen_partition(L, J, D, M=5000, base_measure='gaus', ll='gaus', local_model='mixture', local_sd=0.1, global_sd=None, mu0=0, a=1, b=1):
# base_measure=='gaus'
# ll = 'gaus'
# local_model='mixture'
# noise_local_sd=0.1
# a=1
# b=1
# L = 100
# J = 10
# D = 50
# M = 5000
if global_sd is None:
global_sd = np.sqrt(L)
global_p = np.random.beta(a=a, b=b, size=L)
# MANUAL
idxs_list = [[0, 1], [1,2], [0,2]]
if base_measure=='gaus':
global_atoms = np.random.normal(mu0,global_sd,size=(L,D))
else:
sys.exit('unsupported base measure')
data = []
used_components = set()
atoms = []
for j in range(J):
atoms_j_idx = [l for l in range(L) if np.random.binomial(1, global_p[l])]
used_components.update(atoms_j_idx)
if ll=='gaus':
atoms_j = np.random.normal(global_atoms[atoms_j_idx], scale=local_sd)
else:
sys.exit('unsupported likelihood kernel')
if local_model=='mixture':
data_j = gen_gaus_mixture(atoms_j, M=M)
else:
sys.exit('unsupported local model')
data.append(data_j)
atoms.append(atoms_j)
if len(used_components)<L:
L = len(used_components)
L_idx = list(used_components)
global_p = global_p[L_idx]
global_atoms = global_atoms[L_idx]
print('Removing unused components; new L is %d' % L)
return global_atoms, global_p, data, atoms
def hungarian_match(est_atoms, true_atoms):
# true_atoms = global_atoms
# est_atoms = global_atoms[np.random.permutation(global_atoms.shape[0])]
dist = euclidean_distances(true_atoms, est_atoms)
row_ind, col_ind = linear_sum_assignment(dist)
obj = []
for r, c in zip(row_ind, col_ind):
obj.append(dist[r,c])
return obj
def min_match(est_atoms, true_atoms):
e_to_t = np.apply_along_axis(lambda x: np.sqrt(((true_atoms-x)**2).sum(axis=1)), 1, est_atoms)
return max([max(np.min(e_to_t, axis=0)), max(np.min(e_to_t, axis=1))]) | StarcoderdataPython |
11309212 | import pulp
import LPHelpers as lph
import Utils
##### Goal Interface #####
class Goal(object):
def __init__(self, required, netReward, partialReward):
self.required = required
self.netReward = netReward
self.partialReward = partialReward
def genConstraintsAndRewards(self, dataBox):
return ([], []) # (constraints, rewards)
##### Goal Classes #####
# For a given studentFilter and groupFilter, goal is to put all students into one of the groups (students and groups defined by filter)
class GroupFilterGoal(Goal):
"""Goal is to put all relevant students into preferable groups.
:param studentFilter: a filter that gives the list of relevant students
:param groupFilter: a filter that gives the list of preferable groups
:param required: if true, this goal must be fully completed
:param netReward: amount to be awarded if all students are placed into preferable groups
:param partialReward: amount to be awarded for each student that is placed into a preferable group
"""
def __init__(self, studentFilter=None, groupFilter=None, required=True, netReward=0, partialReward=0):
Goal.__init__(self, required, netReward, partialReward)
self.studentFilter = studentFilter
self.groupFilter = groupFilter
def genConstraintsAndRewards(self, dataBox):
constraints = []
rewards = []
students = dataBox.filterStudents(self.studentFilter)
groups = dataBox.filterGroups(self.groupFilter)
if len(students) > 0:
if len(groups) == 0:
print "Could not find groups that match filter: " + str(groupFilter) + ". Impossible to create groups."
return None
allSatVars = []
for student in students:
variables = []
for group in groups:
variables.append(student.getVar(group.id))
# Create variable v which is 1 only if student is in one of the groups
(c,v) = lph._boolFromLowerBound(sum(variables), 1)
constraints += c
# Save this variable
allSatVars.append(v)
# If this is required, add constraint
if self.required:
constraints.append(lph._requireTrue(v))
# Add partial reward
(c,rvar) = lph._createRewardVar(v, self.partialReward)
constraints += c
rewards.append(rvar)
# Create variable satistifed which is 1 only if all required students are in appropriate groups
(c,satisfied) = lph._boolFromLowerBound(sum(allSatVars), len(students))
constraints += c
# Add a reward
(c,v) = lph._createRewardVar(satisfied, self.netReward)
constraints += c
rewards.append(v)
return (constraints, rewards)
# For a groupFilter, propertyName, and minSimilar, goal is for all groups to have at least minSimilar students that share the same value for propertyName. All groups means those groups matching the groupFilter
class MinSimilarGoal(Goal):
"""Goal is to get at least minSimilar similar people into each relevant group (exception: empty groups also satisfy this goal)
:param groupFilter: filter that gives the list of relevant groups
:param propertyName: the property name that will be used for testing comparison (for objectA and objectB, they are similar if objectA[propertyName] == objectB[propertyName])
:param minSimilar: the minimum number of people that need to be similar in each group
:param required: if true, this goal must be fully completed
:param netReward: amount to be awarded if all relevant groups satisfy this goal
:param partialReward: amount to be awarded for each relevant group that satisfies this goal
"""
def __init__(self, groupFilter=None, propertyName=None, minSimilar=-1, required=True, netReward=0, partialReward=0):
Goal.__init__(self, required, netReward, partialReward)
if propertyName == None:
raise TypeError('propertyName is required')
self.groupFilter = groupFilter
self.propertyName = propertyName
self.minSimilar = minSimilar
def genConstraintsAndRewards(self, dataBox):
constraints = []
rewards = []
PLACEHOLDER = -1
# Function to get self.minSimilar for specific group size
def _getCutoff(groupSize):
ret = PLACEHOLDER
if self.minSimilar != PLACEHOLDER:
# Look for value for this group size
# If self.minSimilar is dictionary, look up group size
if type(self.minSimilar) is dict:
if groupSize in self.minSimilar:
ret = self.minSimilar[groupSize]
else:
# group size not in dictionary
return None # caller should interpret this as "no restriction
# If self.minSimilar is same for all groups (not dict), just return that
else:
ret = self.minSimilar
if ret == PLACEHOLDER:
return groupSize
else:
return ret
# Grab clicks (groups of students that are similar based on propertyname)
clicks = dataBox.getStudentsWhoShareProperty(self.propertyName)
# Filter groups
groupsOfInterest = dataBox.filterGroups(self.groupFilter)
satVariables = []
for group in groupsOfInterest:
groupMin = _getCutoff(group.size)
if groupMin == None:
# No restriction, don't continue, don't add constraints
continue
if groupMin == 0:
print "Cannot apply min similarity constraint when minimum similar is " + str(groupMin) + ". Constraint on property " + self.propertyName
continue
groupVariables = []
for i in range(len(clicks)):
click = clicks[i]
# Create click count var
variables = []
for student in click:
var = student.getVar(group.id)
variables.append(var)
# Create variable v thats 1 only if click satisfies goal
(c,v) = lph._boolFromLowerBound(sum(variables), groupMin)
constraints += c
groupVariables.append(v)
# Get sat variable
(c,v) = lph._boolFromLowerBound(sum(groupVariables), 1)
constraints += c
# Also satisfied if nobody is in the group
(c,vsat) = lph._boolOr(v, group.getNotInUseVar())
constraints += c
satVariables.append(vsat)
# Add partial reward
(c,rvar) = lph._createRewardVar(vsat, self.partialReward)
constraints += c
rewards.append(rvar)
# Done going through groups. Now, add sat and reward
# Create variable satisfied thats 1 only if at least one sat var is true
(c,satisfied) = lph._boolFromLowerBound(sum(satVariables), len(groupsOfInterest))
constraints += c
# Add constraint if required
if self.required:
constraints.append(lph._requireTrue(satisfied))
# Add reward
(c,v) = lph._createRewardVar(satisfied, self.netReward)
constraints += c
rewards.append(v)
return (constraints, rewards)
# For a groupFilter, propertyName, and maxSimilar, goal is for all groups to have at most maxSimilar students that share the same value for propertyName. All groups means those groups matching the groupFilter
class MaxSimilarGoal(Goal):
# Partial reward:
# awarded for each group that satisfies this goal
# Net reward:
# awarded if all groups satisfy this goal
"""Goal is for all relevant groups to have at most maxSimilar similar people
:param groupFilter: a filter that gives the list of relevant groups
:param propertyName: the property name that will be used for testing comparison (for objectA and objectB, they are similar if objectA[propertyName] == objectB[propertyName])
:param maxSimilar: the maximum number of similar people allowed in each relevant group
:param required: if true, this goal must be fully completed
:param netReward: amount to be awarded if all relevant groups satisfy this goal
:param partialReward: amount to be awarded for each relevant group that satisfies this goal
"""
def __init__(self, groupFilter=None, propertyName=None, maxSimilar=-1, required=True, netReward=0, partialReward=0):
Goal.__init__(self, required, netReward, partialReward)
if propertyName == None:
raise TypeError('propertyName is required')
if maxSimilar < 0:
raise TypeError('maxSimilar is required and must be positive')
self.groupFilter = groupFilter
self.propertyName = propertyName
self.maxSimilar = maxSimilar
def genConstraintsAndRewards(self, dataBox):
constraints = []
rewards = []
PLACEHOLDER = -1
# Function to get self.maxSimilar for sepecific group size
def _getCutoff(groupSize):
ret = PLACEHOLDER
if self.maxSimilar != PLACEHOLDER:
# Look for value for this group size
# If self.maxSimilar is dictionary, look up group size
if type(self.maxSimilar) is dict:
if groupSize in self.maxSimilar:
ret = self.maxSimilar[groupSize]
else:
# group size not in dictionary
return None # caller should interpret this as "no restriction
# If self.maxSimilar is same for all groups (not dict), just return that
else:
ret = self.maxSimilar
if ret == PLACEHOLDER:
# Replace placeholder with 1 (nobody should be similar)
return 1
else:
return ret
# Grab clicks (groups of students that are similar based on propertyname)
clicks = dataBox.getStudentsWhoShareProperty(self.propertyName)
# Filter groups
groupsOfInterest = dataBox.filterGroups(self.groupFilter)
satVariables = []
violateVariables = []
for group in groupsOfInterest:
groupMax = _getCutoff(group.size)
if groupMax == None:
# No restriction, don't continue, don't add constraints
continue
if group.size != None and (groupMax > group.size or groupMax == 0):
print "Cannot apply max similarity constraint when maximum similar is " + str(groupMax) + ". Constraint on property " + self.propertyName
continue
groupViolateVars = []
for i in range(len(clicks)):
click = clicks[i]
# Create click count variable
variables = []
for student in click:
var = student.getVar(group.id)
variables.append(var)
# Create variable v thats 1 only if click violates goal
(c,v) = lph._boolFromLowerBound(sum(variables), groupMax + 1)
constraints += c
violateVariables.append(v)
groupViolateVars.append(v)
# Add partial reward only if none of this group violate
(c,v) = lph._boolFromUpperBound(sum(groupViolateVars), 0)
constraints += c
(c,rvar) = lph._createRewardVar(v, self.partialReward)
constraints += c
rewards.append(rvar)
# Done going through groups. Now, add sat and reward
# Create variable satisfied thats 1 only if none of the violateVariables is true
(c,satisfied) = lph._boolFromUpperBound(sum(violateVariables), 0)
constraints += c
# Add constraint if required
if self.required:
constraints.append(lph._requireTrue(satisfied))
# Add reward
(c,v) = lph._createRewardVar(satisfied, self.netReward)
constraints += c
rewards.append(v)
return (constraints, rewards)
# For a groupFilter, groupProperty, studentFilter, and studentProperty, goal is for all students to be assigned to groups where student[studentProperty] = group[groupProperty]
class MustMatchGoal(Goal):
"""Goal where all relevant students must be placed in a relevant group where student[studentProperty] == group[groupProperty]
:param groupFilter: a filter that gives the list of relevant groups
:param studentFilter: a filter that gives the list of relevant students
:param groupProperty: a property name to be used in testing match (group and student match if student[studentProperty] == group[groupProperty])
:param studentProperty: a property name to be used in testing match (group and student match if student[studentProperty] == group[groupProperty])
:param required: if true, this goal must be fully completed
:param netReward: amount to be awarded if all relevant students satisfy this goal
:param partialReward: amount to be awarded for each relevant student that satisfies this goal
"""
def __init__(self, groupFilter=None, groupProperty=None, studentFilter=None, studentProperty=None, required=True, netReward=0, partialReward=0):
Goal.__init__(self, required, netReward, partialReward)
if groupProperty == None:
raise TypeError('groupProperty must be defined')
if studentProperty == None:
raise TypeError('studentProperty must be defined')
self.groupFilter = groupFilter
self.groupProperty = groupProperty
self.studentFilter = studentFilter
self.studentProperty = studentProperty
def genConstraintsAndRewards(self, dataBox):
constraints = []
rewards = []
allSatVars = []
students = dataBox.filterStudents(self.studentFilter)
for student in students:
if not self.studentProperty in student.info:
continue
groupVariables = []
groups = dataBox.filterGroups(self.groupFilter)
for group in groups:
if not self.groupProperty in group.info:
continue
# Skip if they don't match
paramsMatch = (group.info[self.groupProperty] == student.info[self.studentProperty])
wildcardIncluded = (group.info[self.groupProperty] == Utils.WILDCARD or student.info[self.studentProperty] == Utils.WILDCARD)
if not paramsMatch and not wildcardIncluded:
continue
# Yes, this is a match
variable = student.getVar(group.id)
groupVariables.append(variable)
if len(groupVariables) == 0:
continue
# Create variable v thats true if this student satisfied match goal
(c,v) = lph._boolFromLowerBound(sum(groupVariables), 1)
constraints += c
# Keep track of sat vars
allSatVars.append(v)
# Add constraint if required
if self.required:
constraints.append(lph._requireTrue(v))
# Add partial reward
(c,rvar) = lph._createRewardVar(v, self.partialReward)
constraints += c
rewards.append(rvar)
# Create variable satisfied which is 1 only if all students satisfy the goal
(c,satisfied) = lph._boolFromLowerBound(sum(allSatVars), len(students))
constraints += c
# Add a reward
(c,v) = lph._createRewardVar(satisfied, self.netReward)
constraints += c
rewards.append(v)
return (constraints, rewards)
class PodGoal(Goal):
"""Goal where each "pod" of students must be in a group together
:param studentFilter: a filter that gives a list of students to be together in a group
:param studentFilters: a list of filters, each of which gives a list of students to be placed together in a group
:param required: if true, this goal must be fully completed
:param netReward: amount to be awarded if all relevant "pods" are satisfied
:param partialReward: amount to be awarded for each "pod" that satisfies this goal
"""
def __init__(self, studentFilter=None, studentFilters=None, required=True, netReward=0, partialReward=0):
Goal.__init__(self, required, netReward, partialReward)
if studentFilter != None and studentFilters != None:
# Both are defined
raise TypeError('studentFilter and studentFilters can\'t both be defined')
if studentFilter == None and studentFilters == None:
raise TypeError('either studentFilter or studentFilters must be defined')
if studentFilters != None:
self.studentFilters = studentFilters
else:
self.studentFilters = [studentFilter]
def genConstraintsAndRewards(self, dataBox):
constraints = []
rewards = []
groups = dataBox.getGroups()
allSatVars = []
for studentFilter in self.studentFilters:
students = dataBox.filterStudents(studentFilter)
groupSatVariables = []
for group in groups:
groupVariables = []
for student in students:
groupVariables.append(student.getVar(group.id))
# Create variable v that is 1 only if the group of students are in this group
(c,v) = lph._boolFromLowerBound(sum(groupVariables), len(students))
constraints += c
groupSatVariables.append(v)
# groupSatVariables is a list of booleans: if one is true, the students are in the same group
(c,v) = lph._boolFromLowerBound(sum(groupSatVariables), 1)
constraints += c
allSatVars.append(v)
# Partial reward
(c,rvar) = lph._createRewardVar(v, self.partialReward)
constraints += c
rewards.append(rvar)
# Create variable that's 1 only if all selections of students are grouped together
(c,satisfied) = lph._boolFromLowerBound(sum(allSatVars), len(self.studentFilters))
constraints += c
# Add constraint if required
if self.required:
constraints.append(lph._requireTrue(satisfied))
# Add net reward
(c,vreward) = lph._createRewardVar(satisfied, self.netReward)
constraints += c
rewards.append(vreward)
return (constraints, rewards) | StarcoderdataPython |
61086 | # import RPi.GPIO as GPIO
import datetime
class Servo:
def __init__(self, on_time=datetime.time(0,0,0)): #pin_mode = GPIO.BOARD):
self.on_time = on_time
self.on_hour = on_time.hour
self.on_minute = on_time.minute
self.is_running = False
def view_config(self):
print(f'on_time: {self.on_time}')
print(f'on_hour: {self.on_hour}')
print(f'on_minute: {self.on_minute}')
print(f'is_running: {self.is_running}')
def set_is_running(self, status):
self.is_running = status
def should_run_servo(self, current_time):
current_hour = current_time.hour
current_minute = current_time.minute
return current_hour == self.on_hour and current_minute == self.on_minute and not self.is_running
def should_stop_servo(self, current_time):
current_hour = current_time.hour
current_minute = current_time.minute
return current_minute > self.on_minute and self.is_running | StarcoderdataPython |
11200762 | from .base import *
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Allow all host headers
ALLOWED_HOSTS = ['*']
DEBUG = False
| StarcoderdataPython |
3394165 | <gh_stars>0
# Generated by Django 2.1.1 on 2018-10-05 11:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20181004_1735'),
]
operations = [
migrations.AddField(
model_name='otp',
name='verified',
field=models.BooleanField(default=False),
),
]
| StarcoderdataPython |
4847819 | # -*- coding: utf8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 <NAME>, <NAME>
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import time
from unittest import TestCase
import zmqpy
from zmqpy.eventloop import ioloop, zmqstream
class TestZMQStream(TestCase):
def setUp(self):
self.context = zmqpy.Context()
self.socket = self.context.socket(zmqpy.REP)
self.loop = ioloop.IOLoop.instance()
self.stream = zmqstream.ZMQStream(self.socket)
def tearDown(self):
self.socket.close()
self.context.term()
def test_callable_check(self):
"""Ensure callable check works (py3k)."""
self.stream.on_send(lambda *args: None)
self.stream.on_recv(lambda *args: None)
self.assertRaises(AssertionError, self.stream.on_recv, 1)
self.assertRaises(AssertionError, self.stream.on_send, 1)
self.assertRaises(AssertionError, self.stream.on_recv, zmqpy)
| StarcoderdataPython |
8072285 | from django import forms
from django_countries.tests import models
class PersonForm(forms.ModelForm):
class Meta:
model = models.Person
fields = ["country", "favourite_country"]
class AllowNullForm(forms.ModelForm):
class Meta:
model = models.AllowNull
fields = ["country"]
class MultiCountryForm(forms.ModelForm):
class Meta:
model = models.MultiCountry
fields = ["countries"]
| StarcoderdataPython |
1839698 | <reponame>DeWill404/Data-Structure-and-Algorithm<filename>Python/Data Structure/Stack/Stack Using Array.py
class Stack:
# Stack varaible Initialization
def __init__(self):
self.stack = []
self.MAX = 5
self.TOP = -1
# OVERFLOW check function
def UNDERFLOW(self):
if self.TOP == -1:
return True
else:
return False
# UNDERFLOW check function
def OVERFLOW(self):
if self.TOP == self.MAX-1: # If not overflowed
return True
else:
return False
# PUSH into stack function
def push(self, item):
if not self.OVERFLOW(): # If not underflowed
self.stack.append(item)
self.TOP += 1
return True
else:
return False
# POP from stack function
def pop(self):
if not self.UNDERFLOW(): # If not underflowed
self.TOP -= 1
return self.stack.pop()
else:
return False
# DISPLAY item from stack
def display(self):
if not self.UNDERFLOW():
print(" ".join([str(i) for i in self.stack[::-1]]))
else:
print("-> UNDERFLOW <-")
menu = '''
Enter
1. PUSH
2. POP
3. DISPAY
4. EXIT
-> '''
if __name__ == "__main__":
obj = Stack()
exit = False
while not exit:
switch_var = int(input(menu))
# PUSH
if switch_var == 1:
temp = int(input("Enter a value to push : "))
if obj.push(temp):
print(f"{temp} is pushed into stack.")
else:
print("-> OVERFLOW <-")
# POP
elif switch_var == 2:
temp = obj.pop()
if temp:
print(f"The poped value is {temp}.")
else:
print("-> UNDERFLOW <-")
# DISPLAY
elif switch_var == 3:
obj.display()
# EXIT
elif switch_var == 4:
exit = True
# INVALID
else:
print("Enter a valid option.")
| StarcoderdataPython |
3327570 | import pytest
from asn1PERser.codec.per.encoder import encode as per_encoder
from asn1PERser.classes.data.builtin.BooleanType import BooleanType
@pytest.mark.parametrize("boolean, encoded", [
(BooleanType(value=True), '80'),
(BooleanType(value=False), '00')
])
def test_boolean_type_can_be_encoded(boolean, encoded):
assert per_encoder(boolean) == bytearray.fromhex(encoded)
| StarcoderdataPython |
280574 | import tempfile
import pytest
import numpy as np
from pandas import DataFrame
import joblib
from sklearn_pandas import DataFrameMapper
from sklearn_pandas import NumericalTransformer
@pytest.fixture
def simple_dataset():
return DataFrame({
'feat1': [1, 2, 1, 3, 1],
'feat2': [1, 2, 2, 2, 3],
'feat3': [1, 2, 3, 4, 5],
})
def test_common_numerical_transformer(simple_dataset):
"""
Test log transformation
"""
transfomer = DataFrameMapper([
('feat1', NumericalTransformer('log'))
], df_out=True)
df = simple_dataset
outDF = transfomer.fit_transform(df)
assert list(outDF.columns) == ['feat1']
assert np.array_equal(df['feat1'].apply(np.log).values, outDF.feat1.values)
def test_numerical_transformer_serialization(simple_dataset):
"""
Test if you can serialize transformer
"""
transfomer = DataFrameMapper([
('feat1', NumericalTransformer('log'))
])
df = simple_dataset
transfomer.fit(df)
f = tempfile.NamedTemporaryFile(delete=True)
joblib.dump(transfomer, f.name)
transfomer2 = joblib.load(f.name)
np.array_equal(transfomer.transform(df), transfomer2.transform(df))
f.close()
| StarcoderdataPython |
3212103 | import requests
import re
import random
import configparser
from bs4 import BeautifulSoup
from flask import Flask, request, abort
from imgurpython import ImgurClient
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
app = Flask(__name__)
config = configparser.ConfigParser()
config.read("config.ini")
line_bot_api = LineBotApi(config['line_bot']['Channel_Access_Token'])
handler = WebhookHandler(config['line_bot']['Channel_Secret'])
client_id = config['imgur_api']['Client_ID']
client_secret = config['imgur_api']['Client_Secret']
album_id = config['imgur_api']['Album_ID']
API_Get_Image = config['other_api']['API_Get_Image']
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# print("body:",body)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'ok'
def venue():
return "National Taiwan University of Science and Technology\n國立台灣科技大學\nNo.43, Keelung Rd., Sec.4, Da'an Dist., Taipei, Taiwan\n台灣台北市大安區基隆路四段43號"
def susenews():
target_url = 'https://www.suse.com/c/news/'
print('Start parsing news ...')
rs = requests.session()
res = rs.get(target_url, verify=False)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
content = ""
date_lst = []
subject_lst = []
for i in soup.select('div .col-sm-3 p.date'):
date_lst.append(i.getText())
for j in soup.select('div .col-sm-8 .content'):
subject_lst.append(j.getText())
for k in range(len(date_lst)):
content += u'\u2022' + " " + date_lst[k].replace('\t','').replace('\n','') + '\n'
if k != len(date_lst) - 1:
content += subject_lst[k].replace('\n','') + '\n\n'
else:
content += subject_lst[k].replace('\n','')
return content
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
print("event.reply_token:", event.reply_token)
print("event.message.text:", event.message.text)
if event.message.text == "Logo":
client = ImgurClient(client_id, client_secret)
images = client.get_album_images(album_id)
index = random.randint(0, len(images) - 1)
url = images[index].link
#line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=url))
image_message = ImageSendMessage(
original_content_url=url,
preview_image_url=url
)
line_bot_api.reply_message(
event.reply_token, image_message)
return 0
if event.message.text == "Venue":
content = venue()
#line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=content))
image_message = ImageSendMessage(
original_content_url='https://charleswang.us/opensuse-line-bot/taiwan-tech5.jpg',
preview_image_url='https://charleswang.us/opensuse-line-bot/taiwan-tech3.jpg'
)
#line_bot_api.reply_message(
# event.reply_token, image_message)
message = LocationSendMessage(
title='台灣科技大學國際大樓',
address='10607 臺北市大安區基隆路 4 段 43 號',
latitude=25.013162196759016,
longitude=121.54029257962338
)
line_bot_api.reply_message(event.reply_token, message)
#line_bot_api.push_message(
# event.push_token,
# TextSendMessage(text=content))
#line_bot_api.replySticker(event.reply_token, { packageId: '1', stickerId: '1' })
return 0
if event.message.text == "YouTube":
target_url = 'https://www.youtube.com/user/opensusetv/videos'
rs = requests.session()
res = rs.get(target_url, verify=False)
soup = BeautifulSoup(res.text, 'html.parser')
seqs = ['https://www.youtube.com{}'.format(data.find('a')['href']) for data in soup.select('.yt-lockup-title')]
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),
TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)])
])
return 0
if event.message.text == "News":
content = susenews()
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
return 0
if event.message.text == "About":
content = "openSUSE 亞洲高峰會是 openSUSE 社群 ( 即:貢獻者跟使用者 ) 很重要的活動之一,那些平常都在線上交流的人,現在可以一起面對面,與來自世界各地的高手進行交流,社群成員將會分享他們最新的知識、經驗,並學習關於 openSUSE FLOSS 的技術。這次在台北的活動是 openSUSE 亞洲高峰會的第五次,繼 2014 年首次的亞洲高峰會是在北京之後,過去的亞洲高峰有來自中國、台灣、印度、印度尼西亞、日本、南韓等國的參加。"
content += "\n\nRegistration: https://coscup2018.kktix.cc/events/coscup2018regist"
content += "\n\nLINE Bot Created by:\n<NAME> (<EMAIL>)"
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
return 0
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Hello from openSUSE.Asia Summit 2018!"))
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
print("package_id:", event.message.package_id)
print("sticker_id:", event.message.sticker_id)
# ref. https://developers.line.me/media/messaging-api/sticker_list.pdf
sticker_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 100, 101, 102, 103, 104, 105, 106,
107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 401, 402]
index_id = random.randint(0, len(sticker_ids) - 1)
sticker_id = str(sticker_ids[index_id])
print(index_id)
sticker_message = StickerSendMessage(
package_id='1',
sticker_id=sticker_id
)
line_bot_api.reply_message(
event.reply_token,
sticker_message)
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
11210168 | <gh_stars>1-10
# this file is for testing purposes only
# TODO remove this file later
from bython.parser import parse_file_recursively
parse_file_recursively("test-code.by") | StarcoderdataPython |
9727410 | import json
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _, ugettext
from django.views.decorators.csrf import csrf_exempt
from datawinners import settings
from datawinners.accountmanagement.decorators import session_not_expired, is_not_expired
from datawinners.accountmanagement.models import NGOUserProfile
from datawinners.activitylog.models import UserActivityLog
from datawinners.common.constant import CREATED_QUESTIONNAIRE
from datawinners.main.database import get_database_manager
from datawinners.project import helper
from datawinners.project.helper import associate_account_users_to_project
from datawinners.project.wizard_view import get_preview_and_instruction_links, create_questionnaire, \
update_questionnaire
from datawinners.utils import get_organization
from mangrove.datastore.entity_type import get_unique_id_types
from mangrove.errors.MangroveException import QuestionCodeAlreadyExistsException, QuestionAlreadyExistsException, EntityQuestionAlreadyExistsException
from mangrove.transport.xforms.xform import xform_for
@login_required
@session_not_expired
@csrf_exempt
@is_not_expired
def create_project(request):
manager = get_database_manager(request.user)
if request.method == 'GET':
cancel_link = reverse('dashboard') if request.GET.get('prev', None) == 'dash' else reverse('alldata_index')
return render_to_response('project/create_project.html',
{'preview_links': get_preview_and_instruction_links(),
'questionnaire_code': helper.generate_questionnaire_code(manager),
'is_edit': 'false',
'active_language': request.LANGUAGE_CODE,
'post_url': reverse(create_project),
'unique_id_types': json.dumps([unique_id_type.capitalize() for unique_id_type in
get_unique_id_types(manager)]),
'cancel_link': cancel_link}, context_instance=RequestContext(request))
if request.method == 'POST':
response_dict = _create_project_post_response(request, manager)
return HttpResponse(json.dumps(response_dict))
def _validate_questionnaire_name_and_code(questionnaire):
code_has_errors, name_has_errors = False, False
error_message = {}
if not questionnaire.is_form_code_unique():
code_has_errors = True
error_message["code"] = _("Form with same code already exists.")
if not questionnaire.is_project_name_unique():
name_has_errors = True
error_message["name"] = _("Form with same name already exists.")
return code_has_errors, error_message, name_has_errors
def _is_open_survey_allowed(request, is_open_survey):
return get_organization(request).is_pro_sms and is_open_survey
def _create_project_post_response(request, manager):
project_info = json.loads(request.POST['profile_form'])
try:
ngo_admin = NGOUserProfile.objects.get(user=request.user)
is_open_survey_allowed = _is_open_survey_allowed(request, request.POST.get('is_open_survey'))
questionnaire = create_questionnaire(post=request.POST, manager=manager, name=project_info.get('name'),
language=project_info.get('language', request.LANGUAGE_CODE),
reporter_id=ngo_admin.reporter_id,
is_open_survey=is_open_survey_allowed)
except (QuestionCodeAlreadyExistsException, QuestionAlreadyExistsException,
EntityQuestionAlreadyExistsException) as ex:
return {'success': False, 'error_message': _(ex.message), 'error_in_project_section': False}
code_has_errors, error_message, name_has_errors = _validate_questionnaire_name_and_code(questionnaire)
if not code_has_errors and not name_has_errors:
associate_account_users_to_project(manager, questionnaire)
questionnaire.update_doc_and_save()
if settings.BRAND_FEATURES.get('DW_BUILDER_PROJECT_TO_XLSFORMS', False):
questionnaire.xform = xform_for(get_database_manager(request.user), questionnaire.id, request.user.get_profile().reporter_id)
questionnaire.update_doc_and_save()
UserActivityLog().log(request, action=CREATED_QUESTIONNAIRE, project=questionnaire.name,
detail=questionnaire.name)
return {'success': True, 'project_id': questionnaire.id}
return {'success': False,
'error_message': error_message,
'error_in_project_section': False,
'code_has_errors': code_has_errors,
'name_has_errors': name_has_errors} | StarcoderdataPython |
5012981 | <filename>recipes/recipe_modules/windows_adk/examples/ensure.py
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.post_process import StepCommandRE, DropExpectation
DEPS = [
'windows_adk',
'recipe_engine/properties',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
]
def RunSteps(api):
api.windows_adk.ensure()
api.windows_adk.cleanup()
def GenTests(api):
STEP_INSTALL_ADK_PASS = api.step_data(
'ensure windows adk present.PowerShell> Install ADK',
stdout=api.json.output({
'results': {
'Success': True
},
'[CLEANUP]/logs/adk/adk.log': 'i007: Exit code: 0x0',
}))
STEP_INSTALL_WINPE_PASS = api.step_data(
'ensure win-pe add-on present.PowerShell> Install WinPE',
stdout=api.json.output({
'results': {
'Success': True
},
'[CLEANUP]/logs/winpe/winpe.log': 'i007: Exit code: 0x0',
}))
STEP_UNINSTALL_ADK_PASS = api.step_data(
'PowerShell> Uninstall ADK',
stdout=api.json.output({
'results': {
'Success': True
},
'[CLEANUP]/logs/adk-uninstall/adk.log': 'i007: Exit code: 0x0',
}))
STEP_UNINSTALL_WINPE_PASS = api.step_data(
'PowerShell> Uninstall WinPE',
stdout=api.json.output({
'results': {
'Success': True
},
'[CLEANUP]/logs/winpe-uninstall/winpe.log': 'i007: Exit code: 0x0',
}))
yield (api.test('basic') +
api.properties(win_adk_refs='canary', win_adk_winpe_refs='canary') +
api.post_process(StepCommandRE, 'ensure windows adk present', []) +
api.post_process(StepCommandRE, 'ensure win-pe add-on present', []) +
STEP_INSTALL_ADK_PASS + STEP_INSTALL_WINPE_PASS +
STEP_UNINSTALL_ADK_PASS + STEP_UNINSTALL_WINPE_PASS +
api.post_process(DropExpectation))
| StarcoderdataPython |
12831633 | from subscriber import podaac_data_subscriber as pds
import pytest
def test_temporal_range():
assert pds.get_temporal_range(None, '2021-01-01T00:00:00Z', "2021-08-20T13:30:38Z") == "1900-01-01T00:00:00Z,2021-01-01T00:00:00Z"
assert pds.get_temporal_range('2021-01-01T00:00:00Z', '2022-01-01T00:00:00Z', "2021-08-20T13:30:38Z") == "2021-01-01T00:00:00Z,2022-01-01T00:00:00Z"
assert pds.get_temporal_range('2021-01-01T00:00:00Z', None, "2021-08-20T13:30:38Z") == "2021-01-01T00:00:00Z,2021-08-20T13:30:38Z"
with pytest.raises(ValueError):
pds.get_temporal_range(None, None, None) == "2021-01-01T00:00:00Z,2021-08-20T13:30:38Z"
def test_validate():
# work
a = validate(["-c", "viirs", "-d", "/data"])
assert a.collection == "viirs"
assert a.outputDirectory == "/data"
a = validate(["-c", "viirs", "-d", "/data", "-b=-180,-90,180,90"])
assert a.bbox == "-180,-90,180,90"
a = validate(["-c", "viirs", "-d", "/data", "-b=-170,-80,170,20"])
assert a.bbox == "-170,-80,170,20"
a = validate(["-c", "viirs", "-d", "/data", "-b=-180,-90,180,90", "-m", "100"])
assert a.minutes == 100, "should equal 100"
a = validate(["-c", "viirs", "-d", "/data", "-b=-180,-90,180,90", "-e", ".txt", ".nc"])
assert ".txt" in a.extensions
assert ".nc" in a.extensions
a = validate(["-c", "viirs", "-d", "/data", "-dydoy", "-e", ".nc", "-m", "60", "-b=-180,-90,180,90"])
assert a.outputDirectory == "/data"
assert a.dydoy is True
a = validate(["-c", "viirs", "-d", "/data", "-dymd", "-e", ".nc", "-m", "60", "-b=-180,-90,180,90"])
assert a.outputDirectory == "/data"
assert a.dymd is True
a = validate(["-c", "viirs", "-d", "/data", "-dy", "-e", ".nc", "-m", "60", "-b=-180,-90,180,90"])
assert a.outputDirectory == "/data"
assert a.dy is True
a = validate(["-c", "JASON_CS_S6A_L2_ALT_LR_STD_OST_NRT_F", "-d", "/data", "-dc", "-e", ".nc", "-m", "60", "-b=-180,-90,180,90"])
assert a.collection == "JASON_CS_S6A_L2_ALT_LR_STD_OST_NRT_F"
assert a.outputDirectory == "/data"
assert a.cycle is True
a = validate(["-c", "dataset", "-d", "/data", "-sd", "2020-01-01T00:00:00Z", "-ed", "2021-01-01T00:00:00Z"])
assert a.startDate == '2020-01-01T00:00:00Z'
assert a.endDate == '2021-01-01T00:00:00Z'
assert a.provider == "POCLOUD"
a = validate(["-c", "dataset", "-d", "/data", "-p", "ANEWCLOUD"])
assert a.provider == 'ANEWCLOUD'
with pytest.raises(ValueError):
a = validate(["-c", "dataset", "-d", "/data", "-sd", "2020-01-01", "-ed", "2021-01-01T00:00:00Z"])
with pytest.raises(ValueError):
a = validate(["-c", "dataset", "-d", "/data", "-sd", "2020-01-01T00:00:00Z", "-ed", "2021-01-01"])
with pytest.raises(ValueError):
a = validate(["-c", "viirs", "-d", "/data", "-b=-180,-90,180,90anc", "-m", "100"])
with pytest.raises(ValueError):
a = validate(["-c", "viirs", "-d", "/data", "-b=-180,-90,180,90,100", "-m", "100"])
with pytest.raises(ValueError):
a = validate(["-c", "viirs", "-d", "/data", "-b=-180,-90,180", "-m", "100"])
# #don't work
# with pytest.raises(SystemExit):
# a = validate([])
#
# #don't work
# with pytest.raises(SystemExit):
# a = validate(["-c", "viirs"])
#
# with pytest.raises(SystemExit):
# a = validate(["-d", "/data"])
#
# with pytest.raises(ValueError):
# a = validate(["-c", "viirs", "-d", "/data", "-ds", "2021-01-01T00:00:Z"])
#
# with pytest.raises(ValueError):
# a = validate(["-c", "viirs", "-d", "/data", "-b=-170abc,-80,170,20"])
#
# with pytest.raises(SystemExit):
# a = validate(["-c", "viirs", "-d", "/data", "-m","60b"])
def validate(args):
parser = pds.create_parser()
args2 = parser.parse_args(args)
pds.validate(args2)
return args2
| StarcoderdataPython |
1924829 | <reponame>fictional-tribble/integrations-extras
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from nose.plugins.attrib import attr
from collections import namedtuple
import os
# 3p
from mock import patch
# project
from tests.checks.common import AgentCheckTest, Fixtures
mocked_file_stats = namedtuple('mocked_file_stats', ['st_size', 'st_ino', 'st_dev'])
# allows mocking `os.stat` only for certain paths; for all others it will call
# the actual function - needed as a number of test helpers do make calls to it
def with_mocked_os_stat(mocked_paths_and_stats):
vanilla_os_stat = os.stat
def internal_mock(path):
if path in mocked_paths_and_stats:
return mocked_paths_and_stats[path]
return vanilla_os_stat(path)
def external_wrapper(function):
# silly, but this _must_ start with `test_` for nose to pick it up as a
# test when used below
def test_wrapper(*args, **kwargs):
with patch.object(os, 'stat') as patched_os_stat:
patched_os_stat.side_effect = internal_mock
return function(*args, **kwargs)
return test_wrapper
return external_wrapper
@attr(requires='filebeat')
class TestFilebeat(AgentCheckTest):
"""Basic Test for filebeat integration."""
CHECK_NAME = 'filebeat'
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
def _build_config(self, name):
return {
'init_config': None,
'instances': [
{
'registry_file_path': Fixtures.file(name + '_registry.json', sdk_dir=self.FIXTURE_DIR)
}
]
}
@with_mocked_os_stat({'/test_dd_agent/var/log/nginx/access.log': mocked_file_stats(394154, 277025, 51713),
'/test_dd_agent/var/log/syslog': mocked_file_stats(1024917, 152172, 51713)})
def test_happy_path(self):
self.run_check(self._build_config('happy_path'))
self.assertMetric('filebeat.registry.unprocessed_bytes', value=2407, tags=['source:/test_dd_agent/var/log/nginx/access.log'])
self.assertMetric('filebeat.registry.unprocessed_bytes', value=0, tags=['source:/test_dd_agent/var/log/syslog'])
def test_bad_config(self):
bad_config = {
'init_config': None,
'instances': [{}]
}
self.assertRaises(
Exception,
lambda: self.run_check(bad_config)
)
def test_missing_registry_file(self):
# tests that it simply silently ignores it
self.run_check(self._build_config('i_dont_exist'))
self.assertMetric('filebeat.registry.unprocessed_bytes', count=0)
def test_missing_source_file(self):
self.run_check(self._build_config('missing_source_file'))
self.assertMetric('filebeat.registry.unprocessed_bytes', count=0)
@with_mocked_os_stat({'/test_dd_agent/var/log/syslog': mocked_file_stats(1024917, 152171, 51713)})
def test_source_file_inode_has_changed(self):
self.run_check(self._build_config('single_source'))
self.assertMetric('filebeat.registry.unprocessed_bytes', count=0)
@with_mocked_os_stat({'/test_dd_agent/var/log/syslog': mocked_file_stats(1024917, 152172, 51714)})
def test_source_file_device_has_changed(self):
self.run_check(self._build_config('single_source'))
self.assertMetric('filebeat.registry.unprocessed_bytes', count=0)
| StarcoderdataPython |
305383 | import json
import os
from os import path
from .provider import Provider
SOURCE_DIR = "./source/data/"
class GCloud(Provider):
def __init__(self, options, *args, **kwargs):
data_file = path.join(os.path.dirname(os.path.abspath(__file__)), SOURCE_DIR, 'gcloud.json')
super(GCloud, self).__init__(data_file, options, *args, **kwargs)
@property
def gce_zone(self):
return self.data['gce']['zone']
@property
def gce_machine_type(self):
return self.data['gce']['machine-type']
@property
def gce_disk_type(self):
return self.data['gce']['disk-type']
@property
def gce_image(self):
return self.data['gce']['image']
def render_gce_zone(self):
return self.render(self.gce_zone)
def render_gce_machine_type(self):
return self.render(self.gce_machine_type)
def render_gce_disk_type(self):
return self.render(self.gce_disk_type)
def render_gce_image(self):
return self.render(self.gce_image)
| StarcoderdataPython |
1819055 | <reponame>chirain1206/Improvement-on-OTT-QA
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""A script to build the tf-idf document matrices for retrieval."""
import numpy as np
import scipy.sparse as sp
import argparse
import os
import math
import logging
import glob
from multiprocessing import Pool as ProcessPool
from multiprocessing.util import Finalize
from functools import partial
from collections import Counter
import sys
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_path))
import drqa.retriever
import drqa.drqa_tokenizers
import sqlite3
import json
import importlib.util
from tqdm import tqdm
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Building corpus
# ------------------------------------------------------------------------------
def build_corpus(build_option, tmp_file):
fw = open(tmp_file, 'w')
posts = []
with open('../data/all_plain_tables.json', 'r') as f:
tables = json.load(f)
with open('../data/all_passages.json', 'r') as f:
passages = json.load(f)
if build_option == 'title':
for _, table in tables.items():
title = table['title']
content = "{}".format(title)
fw.write(json.dumps({'id': table['uid'], 'text': content}) + '\n')
elif build_option == 'title_sectitle':
for _, table in tables.items():
title = table['title']
section_title = table['section_title']
content = "{} | {}".format(title, section_title)
fw.write(json.dumps({'id': table['uid'], 'text': content}) + '\n')
elif build_option == 'title_sectitle_sectext':
for _, table in tables.items():
title = table['title']
section_title = table['section_title']
section_text = table['section_text']
if section_text == '':
content = "{} | {}".format(title, section_title)
else:
content = "{} | {} | {}".format(title, section_title, section_text)
fw.write(json.dumps({'id': table['uid'], 'text': content}) + '\n')
elif build_option == 'title_sectitle_schema':
for _, table in tables.items():
title = table['title']
section_title = table['section_title']
headers = []
for h in table['header']:
headers.append(h)
headers = ' '.join(headers)
content = "{} | {} | {}".format(title, section_title, headers)
fw.write(json.dumps({'id': table['uid'], 'text': content}) + '\n')
elif build_option == 'title_sectitle_content':
for _, table in tables.items():
title = table['title']
section_title = table['section_title']
contents = []
for h in table['header']:
contents.append(h)
for rows in table['data']:
for cell in rows:
contents.append(cell)
contents = ' '.join(contents)
content = "{} | {} | {}".format(title, section_title, contents)
fw.write(json.dumps({'id': table['uid'], 'text': content}) + '\n')
elif build_option == 'text':
for k, v in passages.items():
fw.write(json.dumps({'id': k, 'text': v}) + '\n')
fw.close()
elif build_option == 'text_title':
for k, v in passages.items():
v = k.replace('/wiki/', '')
v = v.replace('_', ' ')
if k and v:
fw.write(json.dumps({'id': k, 'text': v}) + '\n')
fw.close()
else:
raise NotImplementedError
fw.close()
# ------------------------------------------------------------------------------
# Import helper
# ------------------------------------------------------------------------------
PREPROCESS_FN = None
def init_preprocess(filename):
global PREPROCESS_FN
if filename:
PREPROCESS_FN = import_module(filename).preprocess
def import_module(filename):
"""Import a module given a full path to the file."""
spec = importlib.util.spec_from_file_location('doc_filter', filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# ------------------------------------------------------------------------------
# Store corpus.
# ------------------------------------------------------------------------------
def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path)
def get_contents(filename):
"""Parse the contents of a file. Each line is a JSON encoded document."""
global PREPROCESS_FN
documents = []
with open(filename) as f:
for line in f:
# Parse document
doc = json.loads(line)
# Maybe preprocess the document with custom function
if PREPROCESS_FN:
doc = PREPROCESS_FN(doc)
# Skip if it is empty or None
if not doc:
continue
# Add the document
documents.append((doc['id'], doc['text']))
return documents
def store_contents(data_path, save_path, preprocess, num_workers=None):
"""Preprocess and store a corpus of documents in sqlite.
Args:
data_path: Root path to directory (or directory of directories) of files
containing json encoded documents (must have `id` and `text` fields).
save_path: Path to output sqlite db.
preprocess: Path to file defining a custom `preprocess` function. Takes
in and outputs a structured doc.
num_workers: Number of parallel processes to use when reading docs.
"""
if os.path.isfile(save_path):
os.remove(save_path)
#raise RuntimeError('%s already exists! Not overwriting.' % save_path)
logger.info('Reading into database...')
conn = sqlite3.connect(save_path)
c = conn.cursor()
c.execute("CREATE TABLE documents (id PRIMARY KEY, text);")
workers = ProcessPool(num_workers, initializer=init_preprocess, initargs=(preprocess,))
files = [f for f in iter_files(data_path)]
count = 0
with tqdm(total=len(files)) as pbar:
for pairs in tqdm(workers.imap_unordered(get_contents, files)):
count += len(pairs)
c.executemany("INSERT INTO documents VALUES (?,?)", pairs)
pbar.update()
logger.info('Read %d docs.' % count)
logger.info('Committing...')
conn.commit()
conn.close()
# ------------------------------------------------------------------------------
# Multiprocessing functions
# ------------------------------------------------------------------------------
DOC2IDX = None
PROCESS_TOK = None
PROCESS_DB = None
def init(tokenizer_class, db_class, db_opts):
global PROCESS_TOK, PROCESS_DB
PROCESS_TOK = tokenizer_class()
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
PROCESS_DB = db_class(**db_opts)
Finalize(PROCESS_DB, PROCESS_DB.close, exitpriority=100)
def fetch_text(doc_id):
global PROCESS_DB
return PROCESS_DB.get_doc_text(doc_id)
def tokenize(text):
global PROCESS_TOK
return PROCESS_TOK.tokenize(text)
# ------------------------------------------------------------------------------
# Build article --> word count sparse matrix.
# ------------------------------------------------------------------------------
def count(ngram, hash_size, doc_id):
"""Fetch the text of a document and compute hashed ngrams counts."""
global DOC2IDX
row, col, data = [], [], []
# Tokenize
tokens = tokenize(drqa.retriever.utils.normalize(fetch_text(doc_id)))
# Get ngrams from tokens, with stopword/punctuation filtering.
ngrams = tokens.ngrams(
n=ngram, uncased=True, filter_fn=drqa.retriever.utils.filter_ngram
)
# Hash ngrams and count occurences
counts = Counter([drqa.retriever.utils.hash(gram, hash_size) for gram in ngrams])
# Return in sparse matrix data format.
row.extend(counts.keys())
col.extend([DOC2IDX[doc_id]] * len(counts))
data.extend(counts.values())
return row, col, data
def get_count_matrix(args, db, db_opts):
"""Form a sparse word to document count matrix (inverted index).
M[i, j] = # times word i appears in document j.
"""
# Map doc_ids to indexes
global DOC2IDX
db_class = drqa.retriever.get_class(db)
with db_class(**db_opts) as doc_db:
doc_ids = doc_db.get_doc_ids()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
# Setup worker pool
tok_class = drqa.drqa_tokenizers.get_class(args.tokenizer)
workers = ProcessPool(
args.num_workers,
initializer=init,
initargs=(tok_class, db_class, db_opts)
)
# Compute the count matrix in steps (to keep in memory)
logger.info('Mapping...')
row, col, data = [], [], []
step = max(int(len(doc_ids) / 10), 1)
batches = [doc_ids[i:i + step] for i in range(0, len(doc_ids), step)]
_count = partial(count, args.ngram, args.hash_size)
for i, batch in enumerate(batches):
logger.info('-' * 25 + 'Batch %d/%d' % (i + 1, len(batches)) + '-' * 25)
for b_row, b_col, b_data in workers.imap_unordered(_count, batch):
row.extend(b_row)
col.extend(b_col)
data.extend(b_data)
workers.close()
workers.join()
logger.info('Creating sparse matrix...')
count_matrix = sp.csr_matrix(
(data, (row, col)), shape=(args.hash_size, len(doc_ids))
)
count_matrix.sum_duplicates()
return count_matrix, (DOC2IDX, doc_ids)
# ------------------------------------------------------------------------------
# Transform count matrix to different forms.
# ------------------------------------------------------------------------------
def get_tfidf_matrix(cnts, idf_cnts, option='tf-idf'):
"""Convert the word count matrix into tfidf one.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))
* tf = term frequency in document
* N = number of documents
* Nt = number of occurences of term in all documents
"""
# Computing the IDF parameters
Ns = get_doc_freqs(idf_cnts)
idfs = np.log((idf_cnts.shape[1] - Ns + 0.5) / (Ns + 0.5))
idfs[idfs < 0] = 0
idfs = sp.diags(idfs, 0)
if option == 'tfidf':
# Computing the TF parameters
tfs = cnts.log1p()
#ratio = np.array(cnts.sum(0)).squeeze()
#ratio = sp.diags(ratio, 0)
#ratio.data = 1 / (ratio.data + 0.001)
#tfs = cnts.dot(ratio)
elif option == 'bm25':
k1 = 1.5
b = 0.75
# Computing the saturation parameters
doc_length = np.array(cnts.sum(0)).squeeze()
doc_length_ratio = k1 * (1 - b + b * doc_length / doc_length.mean())
doc_length_ratio = sp.diags(doc_length_ratio, 0)
binary = (cnts > 0).astype(int)
masked_length_ratio = binary.dot(doc_length_ratio)
denom = cnts.copy()
denom.data = denom.data + masked_length_ratio.data
tfs = cnts * (1 + k1)
tfs.data = tfs.data / denom.data
else:
raise NotImplementedError
tfidfs = idfs.dot(tfs)
return tfidfs
def get_doc_freqs(cnts):
"""Return word --> # of docs it appears in."""
binary = (cnts > 0).astype(int)
freqs = np.array(binary.sum(1)).squeeze()
return freqs
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--build_option', type=str, default=None,
help='Build option for corpus')
parser.add_argument('--out_dir', type=str, default=None,
help='Directory for saving output files')
parser.add_argument('--ngram', type=int, default=2,
help=('Use up to N-size n-grams '
'(e.g. 2 = unigrams + bigrams)'))
parser.add_argument('--hash-size', type=int, default=int(math.pow(2, 24)),
help='Number of buckets to use for hashing ngrams')
parser.add_argument('--tokenizer', type=str, default='simple',
help=("String option specifying tokenizer type to use "
"(e.g. 'corenlp')"))
parser.add_argument('--num-workers', type=int, default=None,
help='Number of CPU processes (for tokenizing, etc)')
parser.add_argument('--option', type=str, default='tfidf',
help='TF-IDF or BM25')
parser.add_argument('--tmp_file', type=str, default='/tmp/tf-idf-input.json',
help='Tmp file to put build corpus')
parser.add_argument('--tmp_db_file', type=str, default='/tmp/db.json',
help='Tmp DB file to put build corpus')
parser.add_argument('--preprocess', type=str, default=None,
help=('File path to a python module that defines '
'a `preprocess` function'))
args = parser.parse_args()
args.option = args.option.lower()
assert args.option in ['tfidf', 'bm25'], "only support TF-iDF and BM25"
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
logging.info('Building corpus...')
build_corpus(args.build_option, args.tmp_file)
logging.info('Building DB file...')
store_contents(
args.tmp_file, args.tmp_db_file, args.preprocess, args.num_workers)
logging.info('Counting words...')
count_matrix, doc_dict = get_count_matrix(
args, 'sqlite', {'db_path': args.tmp_db_file}
)
logger.info('Making tfidf vectors...')
tfidf = get_tfidf_matrix(count_matrix, count_matrix, option=args.option)
logger.info('Getting word-doc frequencies...')
freqs = get_doc_freqs(count_matrix)
basename = 'index'
basename += ('-%s-ngram=%d-hash=%d-tokenizer=%s' %
(args.option, args.ngram, args.hash_size, args.tokenizer))
filename = os.path.join(args.out_dir, basename)
logger.info('Saving to %s.npz' % filename)
metadata = {
'doc_freqs': freqs,
'tokenizer': args.tokenizer,
'hash_size': args.hash_size,
'ngram': args.ngram,
'doc_dict': doc_dict
}
drqa.retriever.utils.save_sparse_csr(filename, tfidf, metadata)
| StarcoderdataPython |
6579567 | <filename>src/moehni/progmem_creator.py<gh_stars>0
# Writes the intensities to one header file
import math
f = open("intensities.h", "w")
size = 256
max_value = 4095
i = 0;
offset_0 = 125
for i in range(5):
name = "const PROGMEM uint16_t intensities_" + str(i) + "[] = {";
f.write(name)
offset = offset_0 * 2**i
expo = math.log(max_value - offset)/(size - 1)
for index in range(size):
value = round(math.exp(index * expo)) + offset
print(index, value)
f.write(str(value))
f.write(", ")
f.write("};\n")
f.close()
| StarcoderdataPython |
289599 | <gh_stars>1-10
#!/usr/bin/env python
"""
admindb helper library
Called by flask app admin
to modify budgets, etc.
Copyright 2015 zulily, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from app import app
import app.utils as utils
#globals
SETTINGS_FILE = "/var/dubweb/.admin_settings"
class AdmIDs(object):
"""
admindb ids helper class
"""
def __init__(self, prv_id, team_id, project_id, budget_id, div_id):
"""
Initial ids for chart
"""
self.prv = prv_id
self.team = team_id
self.project = project_id
self.budget = budget_id
self.div = div_id
def format_budget(bgt_id, bgt, month, comment, team_id, prv_id, response):
"""
Helper for budget row formatting
"""
data_point = {}
data_point["ID"] = bgt_id
data_point["Budget"] = bgt
data_point["Month"] = month
data_point["Comment"] = comment
data_point["TeamID"] = team_id
data_point["ProviderID"] = prv_id
data_point["Response"] = response
return data_point
def format_provider(prv_id, name, lastetl, taxrate):
"""
Helper for provider row formatting
"""
data_point = {}
data_point["ID"] = prv_id
data_point["Name"] = name
data_point["LastETL"] = lastetl
data_point["TaxRate"] = taxrate
return data_point
def format_team(team_id, name, div_id):
"""
Helper for team row formatting
"""
data_point = {}
data_point["ID"] = team_id
data_point["Name"] = name
data_point["DivisionID"] = div_id
return data_point
def format_division(div_id, name):
"""
Helper for division row formatting
"""
data_point = {}
data_point["ID"] = div_id
data_point["Name"] = name
return data_point
def format_project(prj_id, name, extid, team_id, prv_id):
"""
Helper for project row formatting
"""
data_point = {}
data_point["ID"] = prj_id
data_point["ExtName"] = name
data_point["ExtID"] = extid
data_point["TeamID"] = team_id
data_point["ProviderID"] = prv_id
return data_point
def get_budget_items(ids, m_filter, bgt_filter):
"""
Given a time, and optional filters for provider, team, month,
budget...
Return list of budget entries.
"""
datalist = []
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
params = []
query = """
SELECT distinct budgetid, budget, month,
IFNULL(comment,""), teamid, prvid, response
FROM budgetdata WHERE 1 """
if ids.team is not None:
query += " AND teamid = %s "
params.append(str(ids.team))
if ids.budget is not None:
query += " AND budgetid = %s "
params.append(str(ids.budget))
if ids.prv is not None:
query += " AND prvid = %s "
params.append(str(ids.prv))
if m_filter is not None:
query += " AND month LIKE %s "
params.append(m_filter + "%")
if bgt_filter is not None:
query += " AND budget LIKE %s "
params.append(str(bgt_filter) + "%")
app.logger.debug("get budget query: %s", query)
dubmetrics = utils.get_from_db(query, tuple(params), dubconn)
for dubmetric in dubmetrics:
if len(dubmetric) > 0 and dubmetric[1] is not None:
budget_row = format_budget(bgt_id=dubmetric[0],
bgt=dubmetric[1],
month=dubmetric[2],
comment=dubmetric[3],
team_id=dubmetric[4],
prv_id=dubmetric[5],
response=dubmetric[6])
datalist.append(budget_row)
dubconn.close()
return datalist
def edit_budget_item(ids, my_month, my_budget, my_comment, my_response):
"""
Given budget id, and modified: providers, team, project,
budget, or month
Return modified budget entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
budget = format_budget(bgt_id=int(ids.budget),
bgt=int(my_budget),
month=my_month,
comment=my_comment,
team_id=int(ids.team),
prv_id=int(ids.prv),
response=my_response)
query = """
UPDATE budgetdata
SET budget=%s, month=%s, teamid=%s, prvid=%s, comment=%s,
response=%s WHERE budgetid=%s
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (budget['Budget'], budget['Month'],
budget['TeamID'], budget['ProviderID'],
budget['Comment'], budget['Response'],
budget['ID']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return budget
def insert_budget_item(ids, my_month, my_budget, my_comment, my_response):
"""
Given providers, team, project, budget, month, and comments,
Return inserted budget entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
budget = format_budget(bgt_id=None,
bgt=int(my_budget),
month=my_month,
comment=my_comment,
team_id=int(ids.team),
prv_id=int(ids.prv),
response=my_response)
query = """
INSERT INTO budgetdata
(budget, month, teamid, prvid, comment, response)
VALUES (%s, %s, %s, %s, %s, %s)
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (budget['Budget'], budget['Month'],
budget['TeamID'], budget['ProviderID'],
budget['Comment'], budget['Response']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return get_budget_items(ids, my_month, my_budget)[0]
def clone_budget_month(ids, src_month, dst_month):
"""
Clone budget from source month into (empty) destination months.
Return destination month budgets.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
my_comment = "Cloned from " + src_month
query = """
INSERT IGNORE INTO budgetdata
(budget, month, teamid, prvid, comment, response)
VALUES (%s, %s, %s, %s, %s, %s)
"""
cursor = dubconn.cursor()
src_budgets = get_budget_items(ids, m_filter=src_month,
bgt_filter=None)
for bdgt in src_budgets:
budget = format_budget(bgt_id=None,
bgt=int(bdgt['Budget']),
month=dst_month,
comment=my_comment,
team_id=int(bdgt['TeamID']),
prv_id=int(bdgt['ProviderID']),
response=None)
try:
cursor.execute(query, (budget['Budget'], budget['Month'],
budget['TeamID'], budget['ProviderID'],
budget['Comment'], budget['Response']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return get_budget_items(ids, m_filter=dst_month, bgt_filter=None)
def delete_budget_item(ids, my_month, my_budget, my_comment):
"""
Given budget id, delete from db
Return deleted budget entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
budget = get_budget_items(ids, my_month, my_budget)
query = """
DELETE FROM budgetdata
WHERE budgetid=%s
"""
app.logger.debug("Got a delete query of: %s ", query)
cursor = dubconn.cursor()
try:
cursor.execute(query, (ids.budget,))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return budget[0]
def get_providers_admin(ids):
"""
Given an optional filter for provider id (of a datacenter instance),
Return list of provider details.
"""
datalist = []
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
params = []
query = """
SELECT distinct prvid, prvname, lastetl, taxrate
FROM providers WHERE 1 """
if ids.prv is not None:
query += " AND prvid = %s "
params.append(str(ids.prv))
app.logger.debug("get provider query: %s", query)
dubmetrics = utils.get_from_db(query, tuple(params), dubconn)
for dubmetric in dubmetrics:
if len(dubmetric) > 0 and dubmetric[1] is not None:
provider_row = format_provider(prv_id=dubmetric[0],
name=dubmetric[1],
lastetl=str(dubmetric[2]),
taxrate=dubmetric[3])
datalist.append(provider_row)
dubconn.close()
return datalist
def get_team_items(ids, name_filter):
"""
Given optional filters for team id and name
Return list of team entries.
"""
datalist = []
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
params = []
query = """
SELECT distinct teamid, teamname, divid
FROM teams WHERE 1 """
if ids.team is not None:
query += " AND teamid = %s "
params.append(str(ids.team))
if ids.div is not None:
query += " AND divid = %s "
params.append(str(ids.div))
if name_filter is not None:
query += " AND teamname LIKE %s "
params.append(name_filter + "%")
app.logger.debug("get team query: %s", query)
dubmetrics = utils.get_from_db(query, tuple(params), dubconn)
for dubmetric in dubmetrics:
if len(dubmetric) > 0 and dubmetric[1] is not None:
team_row = format_team(team_id=dubmetric[0],
name=dubmetric[1],
div_id=dubmetric[2])
datalist.append(team_row)
dubconn.close()
return datalist
def edit_team_item(ids, my_teamname):
"""
Given team id, modified teamname, and division id
Return modified team entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
team = format_team(team_id=int(ids.team), name=my_teamname,
div_id=int(ids.div))
query = """
UPDATE teams
SET teamname=%s, divid=%s WHERE teamid=%s
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (team['Name'], team['DivisionID'],
team['ID']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return team
def insert_team_item(ids, my_teamname):
"""
Given team, name and division id
Return inserted team entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
query = """
INSERT INTO teams
(teamname, divid)
VALUES (%s, %s)
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (my_teamname, ids.div))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return get_team_items(ids, my_teamname)[0]
def delete_team_item(ids, my_teamname):
"""
Given team id, delete from db
Return deleted team entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
team = format_team(team_id=int(ids.team), name=my_teamname,
div_id=int(ids.div))
query = """
DELETE FROM teams
WHERE teamid=%s
"""
app.logger.debug("Got a delete query of: %s ", query)
cursor = dubconn.cursor()
try:
cursor.execute(query, (team['ID'],))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return team
def get_division_items(ids, name_filter):
"""
Given optional filters for division id and name
Return list of division entries.
"""
datalist = []
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
params = []
query = """
SELECT distinct divid, divname
FROM divisions WHERE 1 """
if ids.div is not None:
query += " AND divid = %s "
params.append(str(ids.div))
if name_filter is not None:
query += " AND divname LIKE %s "
params.append(name_filter + "%")
app.logger.debug("get division query: %s", query)
dubmetrics = utils.get_from_db(query, tuple(params), dubconn)
for dubmetric in dubmetrics:
if len(dubmetric) > 0 and dubmetric[1] is not None:
division_row = format_division(div_id=dubmetric[0],
name=dubmetric[1])
datalist.append(division_row)
dubconn.close()
return datalist
def edit_division_item(ids, my_divname):
"""
Given division id and modified divisionname,
Return modified division entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
division = format_division(div_id=int(ids.div), name=my_divname)
query = """
UPDATE divisions
SET divname=%s WHERE divid=%s
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (division['Name'], division['ID']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return division
def insert_division_item(ids, my_divname):
"""
Given division name,
Return inserted division entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
query = """
INSERT INTO divisions
(divname)
VALUES (%s)
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (my_divname,))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return get_division_items(ids, my_divname)[0]
def delete_division_item(ids, my_divname):
"""
Given division id, delete from db
Return deleted division entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
division = format_division(div_id=int(ids.div), name=my_divname)
query = """
DELETE FROM divisions
WHERE divid=%s
"""
app.logger.debug("Got a delete query of: %s ", query)
cursor = dubconn.cursor()
try:
cursor.execute(query, (division['ID'],))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return division
def get_project_items(ids, name_filter, extid_filter):
"""
Given optional filters for provider id, team id, project id,
project name, project external id,...
Return list of project entries.
"""
datalist = []
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
params = []
query = """
SELECT distinct prjid, extname, extid,
prvid, teamid
FROM projects WHERE 1 """
if ids.project is not None:
query += " AND prjid = %s "
params.append(str(ids.project))
if ids.team is not None:
query += " AND teamid = %s "
params.append(str(ids.team))
if ids.prv is not None:
query += " AND prvid = %s "
params.append(str(ids.prv))
if name_filter is not None:
query += " AND extname LIKE %s "
params.append(name_filter + "%")
if extid_filter is not None:
query += " AND extid LIKE %s "
params.append(str(extid_filter) + "%")
app.logger.debug("get project query: %s", query)
dubmetrics = utils.get_from_db(query, tuple(params), dubconn)
for dubmetric in dubmetrics:
if len(dubmetric) > 0 and dubmetric[1] is not None:
project_row = format_project(prj_id=dubmetric[0],
name=dubmetric[1],
extid=dubmetric[2],
prv_id=dubmetric[3],
team_id=dubmetric[4])
datalist.append(project_row)
dubconn.close()
return datalist
def edit_project_item(ids, my_extname, my_extid):
"""
Given project id, and modified: providers, team, external name,
or external id,
Return modified project entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
project = format_project(prj_id=int(ids.project),
name=my_extname,
extid=my_extid,
team_id=int(ids.team),
prv_id=int(ids.prv))
query = """
UPDATE projects
SET extname=%s, extid=%s, prvid=%s, teamid=%s
WHERE prjid=%s
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (project['ExtName'], project['ExtID'],
project['ProviderID'], project['TeamID'],
project['ID']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return project
def insert_project_item(ids, my_extname, my_extid):
"""
Given providers, team, project, extname, and extid
Return inserted project entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
project = format_project(prj_id=None,
name=my_extname,
extid=my_extid,
team_id=int(ids.team),
prv_id=int(ids.prv))
query = """
INSERT INTO projects
(extname, extid, prvid, teamid)
VALUES (%s, %s, %s, %s)
"""
cursor = dubconn.cursor()
try:
cursor.execute(query, (project['ExtName'], project['ExtID'],
project['ProviderID'], project['TeamID']))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return get_project_items(ids, my_extname, my_extid)[0]
def delete_project_item(ids, my_extname, my_extid):
"""
Given project id, delete from db
Return deleted project entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
project = format_project(prj_id=int(ids.project),
name=my_extname,
extid=my_extid,
team_id=int(ids.team),
prv_id=int(ids.prv))
query = """
DELETE FROM projects
WHERE prjid=%s
"""
app.logger.debug("Got a delete query of: %s ", query)
cursor = dubconn.cursor()
try:
cursor.execute(query, (project['ID'],))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return project
| StarcoderdataPython |
41139 | # Refaça o desafio 035, acrescentando o recurso de mostrar que
# tipo de triângulo será formado:
# - Equilátero: todos os lados iguais;
# - Isósceles: dois lados iguais;
# - Escaleno: todos os lados diferentes.
n1 = float(input('\033[34mMedida 1:\033[m '))
n2 = float(input('\033[31mMedida 2:\033[m '))
n3 = float(input('\033[36mMedida 3:\033[m '))
if n1 < n2 + n3 and n2 < n1 + n3 and n3 < n1 + n2:
print('\033[30mCom essas medidas É POSSÍVEL montarmos um triângulo!\033[m')
if n1 == n2 and n2 == n3:
print('Esse triângulo será EQUILÁTERO.')
elif n1 == n2 or n1 == n3:
print('Esse triângulo será ISÓSCELES.')
elif n1 != n2 and n2 != n3:
print('Esse triângulo será ESCALENO.')
else:
print('\033[30mCom essas medidas NÃO É POSSÍVEL montarmos um triângulo!\033[m')
# Forma mais complexa e desnecessária que acabei fazendo
''' if n1 == n2 and n2 == n3 and n1 < n2 + n3 and n2 < n1 + n3 and n3 < n1 + n2:
print('\033[35mEsse triângulo será EQUILÁTERO!\033[m')
elif n1 == n2 or n1 == n3 and n1 < n2 + n3 and n2 < n1 + n3 and n3 < n1 + n2:
print('\033[35mEsse triângulo será ISÓSCELES!\033[m')
elif n1 != n2 and n2 != n3 and n1 < n2 + n3 and n2 < n1 + n3 and n3 < n1 + n2:
print('\033[35mEsse triângulo será ESCALENO!\033[m') '''
| StarcoderdataPython |
3364989 | #!/usr/bin/env python
"""
CX input generation script.
"""
import logging
import sys
import h5py
import numpy as np
import matplotlib as mpl
import matplotlib.animation as anim
import matplotlib.image
import matplotlib.cm as cm
import matplotlib.pyplot as plt
try:
from shutil import which
except:
from shutilwhich import which
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout,
format='%(asctime)s %(name)s %(levelname)s %(message)s')
logger = logging.getLogger('cx')
class PlaySavedSignal(anim.TimedAnimation):
"""
Animate a sequence of frames saved in an HDF5 file.
The stored signal must be stored in an order-3 tensor
with dimensions `(number_of_frames, frame_rows, frame_cols)`.
"""
def __init__(self, data, interval=100):
fig = plt.figure()
self.data = data
self.ax = plt.imshow(self.data[0, :, :], cmap=cm.get_cmap('gray'))
super(self.__class__, self).__init__(fig, interval=interval,
blit=True, repeat=False)
def _draw_frame(self, framedata):
frame = self.data[framedata, :, :]
self.ax.set_data(frame)
def new_frame_seq(self):
return iter(range(1, self.data.shape[0]))
@classmethod
def from_file(self, file_name, dataset='array', interval=100):
f = h5py.File(file_name, 'r')
data = f[dataset][:]
return PlaySavedSignal(data, interval)
class PlayCXInputSignals(anim.TimedAnimation):
"""
Animate CX input signals.
"""
def __init__(self, input, bu_input_l, bu_input_r, pb_input, interval=100, step=1):
# Frame data step:
self.step = step
assert bu_input_l.shape[0] == pb_input.shape[0]
assert bu_input_r.shape[0] == pb_input.shape[0]
assert bu_input_l.shape[1] == bu_input_r.shape[1]
self.input = input
self.bu_input_l = bu_input_l
self.bu_input_r = bu_input_r
self.pb_input = pb_input
fig = plt.figure()
self.ax = plt.subplot2grid((3, 2), (0, 0), colspan=2)
self.ax_im = self.ax.imshow(input[0],
vmin=np.min(input), vmax=np.max(input),
cmap=cm.get_cmap('gray'))
self.ax.axis('tight')
plt.title('Input')
self.ax_bu_l = plt.subplot2grid((3, 2), (1, 0))
self.ax_bu_l_im = self.ax_bu_l.imshow(bu_input_l[0],
vmin=np.min(bu_input_l), vmax=np.max(bu_input_l),
cmap=cm.get_cmap('gray'))
self.ax_bu_l.axis('tight')
plt.title('BU RF Response')
self.ax_bu_r = plt.subplot2grid((3, 2), (1, 1))
self.ax_bu_r_im = self.ax_bu_r.imshow(bu_input_r[0],
vmin=np.min(bu_input_r), vmax=np.max(bu_input_r),
cmap=cm.get_cmap('gray'))
self.ax_bu_r.axis('tight')
plt.title('bu RF Response')
self.ax_pb = plt.subplot2grid((3, 2), (2, 0), colspan=2)
self.ax_pb_im = self.ax_pb.imshow(pb_input[0][np.newaxis],
vmin=np.min(pb_input), vmax=np.max(pb_input),
cmap=cm.get_cmap('gray'))
self.ax_pb.axis('tight')
plt.title('PB RF Response')
plt.tight_layout()
super(self.__class__, self).__init__(fig, interval=interval,
blit=True, repeat=False)
def _draw_frame(self, framedata):
self.ax_im.set_data(self.input[framedata])
self.ax_bu_l_im.set_data(self.bu_input_l[framedata])
self.ax_bu_r_im.set_data(self.bu_input_r[framedata])
self.ax_pb_im.set_data(self.pb_input[framedata][np.newaxis])
def new_frame_seq(self):
return iter(range(1, self.pb_input.shape[0], self.step))
@classmethod
def from_file(self, input_file, bu_input_l_file, bu_input_r_file, pb_input_file,
dataset='array', interval=100, step=1):
f = h5py.File(input_file, 'r')
input = f[dataset][:]
f = h5py.File(bu_input_l_file, 'r')
bu_input_l = f[dataset][:]
f = h5py.File(bu_input_r_file, 'r')
bu_input_r = f[dataset][:]
f = h5py.File(pb_input_file, 'r')
pb_input = f[dataset][:]
return PlayCXInputSignals(input, bu_input_l, bu_input_r, pb_input,
interval, step)
def save(self, filename):
if which('ffmpeg'):
w = anim.FFMpegFileWriter()
elif which('avconv'):
w = anim.AVConvFileWriter()
else:
raise RuntimeError('avconv or ffmpeg required')
super(self.__class__, self).save(filename, writer=w)
class CreateSignal(object):
"""
Create a test video signal.
"""
def __init__(self, shape, dt, dur):
self.shape = shape
self.dt = dt
self.dur = dur
self.N_t = int(self.dur/self.dt)
def moving_bar_l2r(self, width):
data = np.empty((self.N_t, self.shape[0], self.shape[1]), np.float64)
for i in range(self.N_t):
start = int(np.ceil(i*(self.shape[1]-width)/float(self.N_t)))
frame = np.zeros(self.shape, dtype=np.float64)
frame[:, start:start+width] = 1.0
data[i, :, :] = frame
return data
def moving_bar_r2l(self, width):
data = np.empty((self.N_t, self.shape[0], self.shape[1]), np.float64)
for i in range(self.N_t):
start = int(np.ceil(i*(self.shape[1]-width)/float(self.N_t)))
frame = np.zeros(self.shape, dtype=np.float64)
frame[:, start:start+width] = 1.0
data[i, :, :] = frame
return data[::-1]
@classmethod
def write(cls, data, file_name, dataset='array'):
f = h5py.File(file_name, 'w')
f.create_dataset(dataset, data.shape, data.dtype,
maxshape=(None,)+data.shape[1:])
f[dataset][:] = data
f.close()
class CircularGaussianFilterBank(object):
"""
Create a bank of circular 2D Gaussian filters.
Parameters
----------
shape : tuple
Image dimensions.
sigma : float
Parameter of Gaussian.
n : int
How many blocks should occupy the x-axis.
"""
def __init__(self, shape, sigma, n):
self.shape = shape
self.sigma = sigma
self.n = n
# Compute maximal and minimal response of a centered filter to use for
# normalization:
self.norm_min = np.inner(np.zeros(np.prod(shape)),
self.gaussian_mat(shape, sigma, 0, 0, n).reshape(-1))
self.norm_max = np.inner(np.ones(np.prod(shape)),
self.gaussian_mat(shape, sigma, 0, 0, n).reshape(-1))
self.filters = self.create_filters(shape, sigma, n)
def normalize_output(self, output):
"""
Normalize filter output against range of responses to a centered RF.
"""
return output/(self.norm_max-self.norm_min)
@classmethod
def func_gaussian(cls, x, y, sigma):
"""
2D Gaussian function.
"""
return (1.0/(1*np.pi*(sigma**2)))*np.exp(-(1.0/(2*(sigma**2)))*(x**2+y**2))
@classmethod
def gaussian_mat(cls, shape, sigma, n_x_offset, n_y_offset, n):
"""
Compute offset circular 2D Gaussian.
"""
# Image dimensions in pixels:
N_y, N_x = shape
# Normalized image width and height:
x_max = 1.0
y_max = N_y/float(N_x)
X, Y = np.meshgrid(np.linspace(-x_max/2, x_max/2, N_x)-(n_x_offset/float(n)),
np.linspace(-y_max/2, y_max/2, N_y)-(n_y_offset/float(n)))
return cls.func_gaussian(X, Y, sigma)
@classmethod
def create_filters(cls, shape, sigma, n):
"""
Create filter bank as order-4 tensor.
"""
N_y, N_x = shape
# Normalized image width and height:
x_max = 1.0
y_max = N_y/float(N_x)
# Compute how many blocks to use along the y-axis:
m = n*N_y/N_x
# Construct filters offset by the blocks:
n_x_offsets = np.linspace(np.ceil(-n/2.0), np.floor(n/2.0), n)
n_y_offsets = np.linspace(np.ceil(-m/2.0), np.floor(m/2.0), m)
filters = np.empty((m, n, N_y, N_x), np.float64)
for j, n_x_offset in enumerate(n_x_offsets):
for i, n_y_offset in enumerate(n_y_offsets):
filters[i, j] = cls.gaussian_mat(shape, sigma,
n_x_offset, n_y_offset, n)
return filters
def apply_filters(self, frame, normalize=True):
"""
Compute inner products of computed filters and a video frame.
"""
result = np.tensordot(self.filters, frame)
if normalize:
return self.normalize_output(result)
else:
return result
class RectangularFilterBank(object):
"""
Create a bank of 2D rectangular filters that tile the x-axis.
"""
def __init__(self, shape, n):
self.shape = shape
self.n = n
# Compute maximal and minimal response of a centered filter to use for
# normalization:
self.norm_min = np.inner(np.zeros(np.prod(shape)),
self.rect_mat(shape, 0, n).reshape(-1))
self.norm_max = np.inner(np.ones(np.prod(shape)),
self.rect_mat(shape, 0, n).reshape(-1))
self.filters = self.create_filters(shape, n)
def normalize_output(self, output):
"""
Normalize filter output against range of responses to a centered RF.
"""
return output/(self.norm_max-self.norm_min)
@classmethod
def func_rect(cls, x, y, width):
return np.logical_and(x > -width/2.0, x <= width/2.0).astype(np.float64)
@classmethod
def rect_mat(cls, shape, n_x_offset, n):
N_y, N_x = shape
x_max = 1.0
y_max = N_y/float(N_x)
X, Y = np.meshgrid(np.linspace(-x_max/2, x_max/2, N_x)-(n_x_offset/float(n)),
np.linspace(-y_max/2, y_max/2, N_y))
return cls.func_rect(X, Y, 1.0/n)
@classmethod
def create_filters(cls, shape, n):
N_y, N_x = shape
# Normalized image width and height:
x_max = 1.0
y_max = N_y/float(N_x)
# Construct filters offset by the blocks:
n_x_offsets = np.linspace(np.ceil(-n/2.0), np.floor(n/2.0), n)
filters = np.empty((n, N_y, N_x), np.float64)
for j, n_x_offset in enumerate(n_x_offsets):
filters[j] = cls.rect_mat(shape, n_x_offset, n)
return filters
def apply_filters(self, frame, normalize=True):
"""
Compute inner products of computed filters and a video frame.
"""
result = np.tensordot(self.filters, frame)
if normalize:
return self.normalize_output(result)
else:
return result
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', default='l2r', type=str,
help='Direction [l2r, r2l]')
args = parser.parse_args()
logger.info('generating input video signal (%s)' % args.d)
shape = (200, 500)
dt = 1e-4
dur = 0.2
c = CreateSignal(shape, dt, dur)
if args.d == 'l2r':
data = c.moving_bar_l2r(50)
elif args.d == 'r2l':
data = c.moving_bar_r2l(50)
else:
raise RuntimeError('unsupported signal type')
c.write(data, 'moving_bar.h5')
logger.info('generating Gaussian RFs for BU')
fc = CircularGaussianFilterBank((shape[0], shape[1]/2), 0.05, 10)
logger.info('generating rectangular RFs for PB')
fr = RectangularFilterBank(shape, 18)
logger.info('filtering with Gaussian RFs')
BU_input_pre = np.empty((len(data),)+fc.filters.shape[0:2])
bu_input_pre = np.empty((len(data),)+fc.filters.shape[0:2])
for i, frame in enumerate(data):
BU_input_pre[i, :, :] = 3*fc.apply_filters(frame[:, :shape[1]/2])
bu_input_pre[i, :, :] = 3*fc.apply_filters(frame[:, shape[1]/2:])
logger.info('filtering with rectangular RFs')
PB_input_pre = np.empty((len(data), fr.filters.shape[0]))
for i, frame in enumerate(data):
PB_input_pre[i, :] = 3*fr.apply_filters(frame)
logger.info('saving RF responses')
with h5py.File('BU_input_pre.h5', 'w') as f:
f.create_dataset('/array', data=BU_input_pre)
with h5py.File('bu_input_pre.h5', 'w') as f:
f.create_dataset('/array', data=bu_input_pre)
with h5py.File('PB_input_pre.h5', 'w') as f:
f.create_dataset('/array', data=PB_input_pre)
| StarcoderdataPython |
3444992 | <reponame>jhonifreitas/auto-service<gh_stars>0
from model_mommy import mommy
from rest_framework.serializers import ValidationError
from django.test import TestCase
from django.contrib.auth.models import User
from gojob.api.v1.auth.serializers import LoginSerializer
class LoginSerializerValidTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.profile = mommy.make('custom_profile.Profile', user=self.user)
self.serializer = LoginSerializer(data={'username': self.user.username, 'password': '<PASSWORD>'})
def test_serializer_is_valid(self):
self.assertTrue(self.serializer.is_valid())
def test_serializer_get_token(self):
self.serializer.is_valid()
self.assertTrue(self.serializer.get_token())
class LoginSerializerInvalidTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.serializer = LoginSerializer(data={})
def test_serializer_not_is_valid(self):
self.assertFalse(self.serializer.is_valid())
def test_validate_password_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.validate({'username': self.user.username, 'password': '<PASSWORD>'})
def test_validate_username_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.validate({'username': 'invalid-username'})
| StarcoderdataPython |
262997 | import math
import collections
import nltk
import time
from glob import glob
import os
from nltk.tokenize import RegexpTokenizer
from nltk.stem.snowball import SnowballStemmer
from nltk.util import ngrams
from nltk.corpus import stopwords
tokenizer = RegexpTokenizer(r'\w+')
stemmer = SnowballStemmer("english")
stopset = set(stopwords.words('english'))
import math
import pulp
import numpy as np
# Constants to be used by you when you fill the functions
START_SYMBOL = '*'
STOP_SYMBOL = 'STOP'
MINUS_INFINITY_SENTENCE_LOG_PROB = -1000
root_dir = "out_files"
DATA_PATH = 'data/'
OUTPUT_PATH = 'gen_summary/'
WORD_LIMIT = 100
def calc_probabilities(training_corpus):
unigram_c = collections.defaultdict(int)
bigram_c = collections.defaultdict(int)
trigram_c = collections.defaultdict(int)
for sentence in training_corpus:
tokens0 = sentence.strip().split()
tokens1 = tokens0 + [STOP_SYMBOL]
tokens2 = [START_SYMBOL] + tokens0 + [STOP_SYMBOL]
tokens3 = [START_SYMBOL] + [START_SYMBOL] + tokens0 + [STOP_SYMBOL]
# unigrams
for unigram in tokens1:
unigram_c[unigram] += 1
# bigrams
for bigram in nltk.bigrams(tokens2):
bigram_c[bigram] += 1
# trigrams
for trigram in nltk.trigrams(tokens3):
trigram_c[trigram] += 1
unigrams_len = sum(unigram_c.itervalues())
unigram_p = {k: math.log(float(v) / unigrams_len, 2) for k, v in unigram_c.iteritems()}
unigram_c[START_SYMBOL] = len(training_corpus)
bigram_p = {k: math.log(float(v) / unigram_c[k[0]], 2) for k, v in bigram_c.iteritems()}
bigram_c[(START_SYMBOL, START_SYMBOL)] = len(training_corpus)
trigram_p = {k: math.log(float(v) / bigram_c[k[:2]], 2) for k, v in trigram_c.iteritems()}
return unigram_p, bigram_p, trigram_p
def q1_output(unigrams, bigrams, trigrams, filename):
# output probabilities
outfile = open(filename, 'w')
unigrams_keys = unigrams.keys()
unigrams_keys.sort()
for unigram in unigrams_keys:
outfile.write('UNIGRAM ' + unigram[0] + ' ' + str(unigrams[unigram]) + '\n')
bigrams_keys = bigrams.keys()
bigrams_keys.sort()
for bigram in bigrams_keys:
outfile.write('BIGRAM ' + bigram[0] + ' ' + bigram[1] + ' ' + str(bigrams[bigram]) + '\n')
trigrams_keys = trigrams.keys()
trigrams_keys.sort()
for trigram in trigrams_keys:
outfile.write('TRIGRAM ' + trigram[0] + ' ' + trigram[1] + ' ' + trigram[2] + ' ' + str(trigrams[trigram]) + '\n')
outfile.close()
def score(ngram_p, n, corpus):
scores = []
for sentence in corpus:
sentence_score = 0
tokens0 = sentence.strip().split()
if n == 1:
tokens = tokens0 + [STOP_SYMBOL]
elif n == 2:
tokens = nltk.bigrams([START_SYMBOL] + tokens0 + [STOP_SYMBOL])
elif n == 3:
tokens = nltk.trigrams([START_SYMBOL] + [START_SYMBOL] + tokens0 + [STOP_SYMBOL])
else:
raise ValueError('Parameter "n" has an invalid value %s' % n)
for token in tokens:
try:
p = ngram_p[token]
except KeyError:
p = MINUS_INFINITY_SENTENCE_LOG_PROB
sentence_score += p
scores.append(sentence_score)
return scores
def score_output(scores, filename, end):
outfile = open(filename, 'w')
for score in scores:
outfile.write(str(score) + end)
outfile.close()
def linearscore(unigrams, bigrams, trigrams, corpus):
scores = []
lambda_ = 1.0 / 3
for sentence in corpus:
interpolated_score = 0
tokens0 = sentence.strip().split()
for trigram in nltk.trigrams([START_SYMBOL] + [START_SYMBOL] + tokens0 + [STOP_SYMBOL]):
try:
p3 = trigrams[trigram]
except KeyError:
p3 = MINUS_INFINITY_SENTENCE_LOG_PROB
try:
p2 = bigrams[trigram[1:3]]
except KeyError:
p2 = MINUS_INFINITY_SENTENCE_LOG_PROB
try:
p1 = unigrams[trigram[2]]
except KeyError:
p1 = MINUS_INFINITY_SENTENCE_LOG_PROB
interpolated_score += math.log(lambda_ * (2 ** p3) + lambda_ * (2 ** p2) + lambda_ * (2 ** p1), 2)
scores.append(interpolated_score)
return scores
def get_ngrams(sentence, N):
tokens = tokenizer.tokenize(sentence.lower())
clean = [stemmer.stem(token) for token in tokens]
return [gram for gram in ngrams(clean, N)]
def get_len(element):
return len(tokenizer.tokenize(element))
def get_overlap(sentence_a, sentence_b, N):
tokens_a = tokenizer.tokenize(sentence_a.lower())
tokens_b = tokenizer.tokenize(sentence_b.lower())
ngrams_a = [gram for gram in ngrams(tokens_a, N)]
ngrams_b = [gram for gram in ngrams(tokens_b, N)]
if N == 1:
ngrams_a = [gram for gram in ngrams_a if not gram in stopset]
ngrams_b = [gram for gram in ngrams_b if not gram in stopset]
overlap = [gram for gram in ngrams_a if gram in ngrams_b]
return overlap
def build_binary_overlap_matrix(scored_sentences, overlap_discount, N):
sentences = [tup[0] for tup in scored_sentences]
size = len(sentences)
overlap_matrix = [[-1 for x in xrange(size)] for x in xrange(size)]
for i, elem_i in enumerate(sentences):
for j in range(i, len(sentences)):
elem_j = sentences[j]
##
## Get an approximation for the pairwise intersection term from ROUGE.
overlap = get_overlap(elem_i, elem_j, N)
score = len(overlap) * overlap_discount
overlap_matrix[i][j] = score
overlap_matrix[j][i] = score
return overlap_matrix
def solve(sentences, length_constraint, damping, overlap_discount, N):
sentences_scores = [tup[1] for tup in sentences]
sentences_lengths = [get_len(tup[0]) for tup in sentences]
overlap_matrix = build_binary_overlap_matrix(sentences, overlap_discount, N)
sentences_idx = [tup[0] for tup in enumerate(sentences)]
pairwise_idx = []
for i in sentences_idx:
for j in sentences_idx[i+1:]:
pairwise_idx.append((i, j))
x = pulp.LpVariable.dicts('sentences', sentences_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)
alpha = pulp.LpVariable.dicts('pairwise_interactions', (sentences_idx, sentences_idx), lowBound=0, upBound=1, cat=pulp.LpInteger)
prob = pulp.LpProblem("ILP-R", pulp.LpMaximize)
prob += sum(x[i] * sentences_scores[i] for i in sentences_idx) - damping * sum(alpha[i][j] * overlap_matrix[i][j] for i,j in pairwise_idx)
prob += sum(x[i] * sentences_lengths[i] for i in sentences_idx) <= length_constraint
for i in sentences_idx:
for j in sentences_idx:
prob += alpha[i][j] - x[i] <= 0
prob += alpha[i][j] - x[j] <= 0
prob += x[i] + x[j] - alpha[i][j] <= 1
prob.solve()
summary = []
total_len = 0
for idx in sentences_idx:
if x[idx].value() == 1.0:
total_len += sentences_lengths[idx]
summary.append(sentences[idx])
return summary, total_len
def ILP_R_Optimizer(sentences, length_constraint, overlap_discount=(1./150.), damping=0.9, max_depth=50, N=2):
sorted_sentences = sorted(sentences, key=lambda tup:tup[1], reverse=True)
tmp = sorted_sentences
if len(sorted_sentences) > max_depth:
sorted_sentences = sorted_sentences[:max_depth]
summary, total_len = solve(sentences=sorted_sentences,
length_constraint=length_constraint,
damping=damping,
overlap_discount=overlap_discount,
N=N)
for i in range(3):
for e in tmp:
if e in summary:
continue
l = get_len(e[0])
if l <= length_constraint - total_len:
summary.append(e)
total_len += l
break
return summary
def main():
# start timer
time.clock()
dirs = [os.path.join(root_dir,x) for x in os.listdir(root_dir)]
for d in dirs:
print "Processing :::: " + d
# print root
corpus = []
docs_folder = [os.path.join(d,f) for f in os.listdir(d)]
for docs in docs_folder:
#print docs
with open(docs) as m:
# print file_len(docs)
for line in m:
corpus.append(line)
unigrams, bigrams, trigrams = calc_probabilities(corpus)
biscores = score(bigrams, 2, corpus)
sen = []
for i in range(0,len(corpus)):
sen.append((corpus[i], biscores[i]*biscores[i]))
selected_sentences = ILP_R_Optimizer(sen, WORD_LIMIT)
result_summary = []
for i in selected_sentences:
temp = i[0].split("\n")
result_summary.append(temp[0])
score_output(result_summary, OUTPUT_PATH+d.split('/')[1] + '.txt', ' ')
if __name__ == "__main__": main()
| StarcoderdataPython |
6702208 | class Solution:
def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:
r1 = [A,B,C,D]
r2 = [E,F,G,H]
ra1 = (C-A)*(D-B)
ra2 = (G-E)*(H-F)
ra3 = 0
dx = min(r1[2], r2[2]) - max(r1[0], r2[0])
dy = min(r1[3], r2[3]) - max(r1[1], r2[1])
if (dx>=0) and (dy>=0):
ra3 = dx*dy
return ra1+ra2-ra3 | StarcoderdataPython |
5092895 | <filename>pipeline/tests/core/flow/io/test_object_item_schema.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.test import TestCase
from pipeline.core.flow.io import ArrayItemSchema, IntItemSchema, ObjectItemSchema, StringItemSchema
class ObjectItemSchemaTestCase(TestCase):
def setUp(self):
self.maxDiff = None
self.description = "a simple item"
self.int_description = "a integer"
self.int_schema = IntItemSchema(description=self.int_description)
self.string_description = "a string"
self.string_schema = StringItemSchema(description=self.string_description)
self.array_description = "a array"
self.array_item_description = "item in array"
self.array_schema = ArrayItemSchema(
description=self.array_description, item_schema=StringItemSchema(description=self.array_item_description)
)
self.inner_object_description = "inner object"
self.inner_object_schema = ObjectItemSchema(
description=self.inner_object_description,
property_schemas={"int_key": self.int_schema, "str_key": self.string_schema},
)
def test_as_dict(self):
object_schema = ObjectItemSchema(
description=self.description,
property_schemas={"array_key": self.array_schema, "object_key": self.inner_object_schema},
)
schema_dict = object_schema.as_dict()
self.assertEqual(
schema_dict,
{
"type": "object",
"description": self.description,
"enum": [],
"properties": {
"array_key": {
"type": "array",
"description": self.array_description,
"enum": [],
"items": {"type": "string", "description": self.array_item_description, "enum": []},
},
"object_key": {
"type": "object",
"description": self.inner_object_description,
"enum": [],
"properties": {
"int_key": {"type": "int", "description": self.int_description, "enum": []},
"str_key": {"type": "string", "description": self.string_description, "enum": []},
},
},
},
},
)
| StarcoderdataPython |
4939478 | import pytest
import packerlicious.provisioner as provisioner
class TestPowerShellProvisioner(object):
def test_required_fields_missing(self):
b = provisioner.PowerShell()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'PowerShell: one of the following must be specified: inline, script, scripts' == str(excinfo.value)
| StarcoderdataPython |
5068612 | <reponame>VerdigrisTech/data_science_tools
""" Data Science Tools
"""
# flake8: noqa: F403,F401
# pylint: disable=import-outside-toplevel,global-statement
from typing import List
def get_version() -> str:
"""Get version number"""
import os # pylint: disable=redefined-outer-name
filepath = os.path.join(os.path.dirname(__file__), "__version__")
if os.path.exists(filepath):
with open(filepath, encoding="utf-8") as buffer:
return buffer.readline().strip()
else:
return "dev"
__version__ = get_version()
__all__ = ["__version__"]
def import_tools(names: List[str] = None) -> None:
"""Load tools into the main namespace
Purpose
-------
This is to make it easier to use the various tools within this package.
Instead of tools.plotly_tools.subplots.make_subplots it imports to tools.make_subplots.
Also allows to opt-in for importing modules which may not be necessary. By default
none are loaded so if someone wanted something specific they can import just that
and nothing else. e.g. `from data_science_tools import graph`.
Parameters
----------
names: List[str] - Names of modules/tool sets to load. By default loads all.
"""
global __all__
import importlib
names = names or [
"dataframe",
"graph",
"python_interactive",
"quantize",
"statistics",
"utils",
"weighted",
"matplotlib_tools",
"plotly_tools",
]
for module_name in names:
module = importlib.import_module(f"{__package__}.{module_name}")
names = module.__all__ # type: ignore
namespace = {name: getattr(module, name) for name in names} # type: ignore
namespace[module_name] = module # type: ignore
__all__ += namespace.keys() # type: ignore
globals().update(namespace) # type: ignore
| StarcoderdataPython |
11267452 | <gh_stars>1-10
import sys; import os
from pathlib import Path
import configparser
import subprocess
def install(package):
print('Installing:', fp)
print('Install Exit Code:',
subprocess.check_call(
[sys.executable, "-m", "pip",
"install", package])
)
lib_fp = Path('libraries')
ls = lib_fp.glob("*")
for fp in ls:
if not fp.is_dir():
print(fp, 'not a directory, skipped.')
continue
install(package=fp)
from mod.core import * | StarcoderdataPython |
12813856 | <reponame>jnoddell/LC
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def dfs(a, b):
if not a and not b:
return True
if not a or not b:
return False
if a.val != b.val:
return False
return dfs(a.left, b.right) and dfs(a.right, b.left)
return dfs(root.left, root.right)
| StarcoderdataPython |
4954831 | <reponame>SounakMandal/AlgoBook
class edge:
def __init__(self, u, w):
self.From = u
self.weight = w
def addedge(u, v, w):
edges[v].append(edge(u, w))
def shortestpath(dp):
for i in range(V + 1):
for j in range(V):
dp[i][j] = -1
for i in range(1, V + 1):
for j in range(V):
for k in range(len(edges[j])):
if (dp[i - 1][edges[j][k].From] != -1):
curr_wt = (dp[i - 1][edges[j][k].From] +
edges[j][k].weight)
if (dp[i][j] == -1):
dp[i][j] = curr_wt
else:
dp[i][j] = min(dp[i][j], curr_wt)
vg values
avg = [-1] * V
for i in range(V):
if (dp[V][i] != -1):
for j in range(V):
if (dp[j][i] != -1):
avg[i] = max(avg[i], (dp[V][i] -
dp[j][i]) / (V - j))
result = avg[0]
for i in range(V):
if (avg[i] != -1 and avg[i] < result):
result = avg[i]
return result
# Driver Code
V = 4
# vector to store edges
edges = [[] for i in range(V)]
addedge(0, 1, 1)
addedge(0, 2, 10)
addedge(1, 2, 3)
addedge(2, 3, 2)
addedge(3, 1, 0)
addedge(3, 0, 8)
print(minAvgWeight()) | StarcoderdataPython |
3542522 | from tornado import web, gen, httpclient
import socket
import json
class Repos(web.RequestHandler):
@gen.coroutine
def get(self):
return_data = []
for port in range(8010, 8040):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
client = httpclient.AsyncHTTPClient()
response = yield client.fetch(
"http://localhost:"+str(port)+"/servicename",
method='GET',
headers={"Content-Type": "application/json"},
)
json_data = json.loads(response.body)
return_data.append({"short_id": str(port), "ports": str(port), "name": json_data["data"]["servicename"]})
return self.finish({'error': True, 'data': return_data})
class Configs(web.RequestHandler):
''' /localservices/service_name/service_port/handler '''
@gen.coroutine
def get(self, *args, **kwargs):
url_parts = [x for x in self.request.path.split("/") if x]
handler = url_parts[-1]
name = url_parts[1]
port = url_parts[2]
client = httpclient.AsyncHTTPClient()
response = yield client.fetch(
"http://localhost:"+port+"/"+handler,
method='GET',
headers={"Content-Type": "application/json"},
)
json_data = json.loads(response.body)
print(json_data)
self.finish({'error': True, 'data': json_data})
@gen.coroutine
def put(self, *args, **kwargs):
# if data = json.loads(self.request.body.decode('utf-8')), make sure to json.dumps() when sending in body of new async http request
url_parts = [x for x in self.request.path.split("/") if x]
handler = url_parts[-1]
port = url_parts[2]
client = httpclient.AsyncHTTPClient()
response = yield client.fetch(
"http://localhost:"+port+"/"+handler,
method='PUT',
headers={"Content-Type": "application/json"},
body=self.request.body
)
json_data = json.loads(response.body)
print(json_data)
self.finish({'error': True, 'data': json_data})
| StarcoderdataPython |
6473550 | <filename>scholar_crawler.py
# -*- coding: utf-8 -*-
from requests import get
from bs4 import BeautifulSoup
import numpy as np
from time import sleep
import pandas as pd
url = 'https://scholar.google.com/citations?view_op=search_authors&hl=en&mauthors=label:reinforcement_learning'
base_url = 'https://scholar.google.com'
base_page_url = url + '&after_author='
def page_parse(html_soup):
# FIND AUTHOR LIST AND CITATION NUMBERS #
authors = html_soup.find_all('div', class_ = 'gsc_1usr gs_scl')
ind = 0
author_list = []
link_list = []
citation_list = []
while ind < len(authors):
link_list.append(authors[ind].a['href'])
author_list.append(authors[ind].a.text.lower())
cited_string = authors[ind].find('div', class_='gsc_oai_cby').text
cited_num = int(cited_string.split()[2])
citation_list.append(cited_num)
ind += 1
# FIND INSTITUTION LIST #
institution_list = []
for link in link_list:
link_response = get(base_url + link)
link_soup = BeautifulSoup(link_response.text, 'html.parser')
institution_html = link_soup.find('div', class_ = 'gsc_prf_il')
if institution_html.a is None:
institution_list.append('0')
else:
institution_list.append(institution_html.a.text.lower())
# FIND THE NEXT PAGE URL #
next_page_list = []
button = html_soup.find('button', class_ = 'gs_btnPR gs_in_ib gs_btn_half gs_btn_lsb gs_btn_srt gsc_pgn_pnx')
button_text = button['onclick']
count = 0
for let in button_text[::-1]:
if let == "\\":
count += 1
if count == 2 and let != "\\":
next_page_list.append(let)
next_page = ''.join(next_page_list)[::-1][3:]
return author_list, citation_list, institution_list, next_page
if __name__ == "__main__":
df = pd.DataFrame(columns=['Authors', 'Citations', 'Institutions'])
page = 0
npages = 100
turl = url
while page < npages:
html_soup = BeautifulSoup(get(turl).text, 'html.parser')
author_list, citation_list, institution_list, next_page = page_parse(html_soup)
df_temp = pd.DataFrame({
'Authors': author_list,
'Citations': citation_list,
'Institutions': institution_list,
})
df = df.append(df_temp, ignore_index=True)
turl = base_page_url + next_page
page += 1
if page%10 == 0:
df.to_csv('schools.csv', encoding='utf-8')
print("On page ", page)
# Hard coded for top authors that do not list their institution on google scholar.
df.loc[df['Authors'] == '<NAME>', 'Institutions'] = 'university of alberta'
df.loc[df['Authors'] == '<NAME>', 'Institutions'] = 'university of florida'
df.loc[df['Authors'] == '<NAME>', 'Institutions'] = 'vrije universiteit brussel'
df.loc[df['Authors'] == '<NAME>', 'Institutions'] = 'university of alberta'
df.loc[df['Authors'] == '<NAME>', 'Institutions'] = 'university of hamburg'
df.loc[df['Authors'] == '<NAME>', 'Institutions'] = 'duke university'
data_clean = df.loc[(df['Institutions'] != '0')]
data_clean = data_clean.replace('brown', 'brown university')
data_clean = data_clean.replace('university of california at berkeley', 'uc berkeley')
data_clean = data_clean.replace('university of california, berkeley', 'uc berkeley')
data_clean = data_clean.replace('<NAME>', 'carnegie mellon university')
data_clean.to_csv('schools_clean.csv', encoding='utf-8')
| StarcoderdataPython |
1912396 | # Generated by Django 2.2.4 on 2019-08-13 13:43
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user', '0001_initial'),
('goods', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderInfo',
fields=[
('order_id', models.CharField(max_length=128, primary_key=True, serialize=False)),
('pay_method', models.SmallIntegerField(choices=[(0, 'Pay on delivery'), (1, 'WeChat Pay'), (2, 'AliPay'), (3, 'UniPay')], default=2)),
('total_count', models.IntegerField(default=1)),
('total_price', models.DecimalField(decimal_places=2, max_digits=10)),
('postage_fee', models.DecimalField(decimal_places=2, max_digits=10)),
('order_status', models.SmallIntegerField(choices=[(0, 'To be paid'), (1, 'To be delivered'), (2, 'In transit'), (3, 'To be reviewed'), (4, 'Complete')], default=1)),
('trade_no', models.CharField(max_length=128)),
('addr', models.ForeignKey(on_delete=False, to='user.Address')),
('user', models.ForeignKey(on_delete=False, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fs_order',
},
),
migrations.CreateModel(
name='OrderGoods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_delete', models.BooleanField(default=False)),
('count', models.IntegerField(default=1)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('comment', models.CharField(max_length=256)),
('order', models.ForeignKey(on_delete=True, to='order.OrderInfo')),
('sku', models.ForeignKey(on_delete=False, to='goods.GoodsSKU')),
],
options={
'db_table': 'fs_order_goods',
},
),
]
| StarcoderdataPython |
1707787 | import os
from index import SysConst
if __name__ == '__main__':
folder = SysConst.getDownloadPath()
movieTypes = set(["avi", "mp4", "mkv", "rmvb", "wmv", "iso"])
for fpath, dirs, fs in os.walk(folder):
for filename in fs:
# fullpath = os.path.join(fpath, filename)
suffix = filename[-3:].lower()
if filename[0:1] != "." and len(filename) > 4 and suffix in movieTypes:
newFileName = filename[:-4] + ".txt"
newFullPath = fpath + "/" + newFileName
print("createFile: " + newFullPath)
file = open(newFullPath, "wt")
file.close()
| StarcoderdataPython |
5123755 | # -*- coding: utf-8 -*-
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignRelatedObjectsDescriptor
class DataProvider(object):
def __init__(self, model=None, queryset=None, filters={}, fields=[],
related_fields={}, field_mappings=None, ordering=[]):
if (model is None and queryset is None) or (model and queryset):
raise AttributeError(
"DataProvider class must be called with either Model class or QuerySet instance.")
self.model, self.queryset = model, queryset
self.filters = filters
self.related_fields = related_fields
self.ordering = ordering
if field_mappings is None:
field_mappings = []
self.field_mappings = field_mappings
self.fields = []
self._values = {}
model = queryset.model
queryset = list(self.get_queryset()) # evaluating queryset
for f in fields:
try:
if model._meta.get_field(f):
self._values[f] = [getattr(v, f) for v in queryset]
self.fields.append(f)
except FieldDoesNotExist:
try:
if isinstance(getattr(model, f), property):
self._values[f] = [getattr(v, f) for v in queryset]
self.fields.append(f)
except AttributeError:
pass
if isinstance(getattr(model, f), ForeignRelatedObjectsDescriptor):
for o in queryset:
related_fields = self.related_fields.get(f, {})
if 'categoryField' in related_fields and 'valueField' in related_fields:
for item in DataProvider(queryset=getattr(o, f).get_query_set(),
fields=related_fields.values()):
if item[related_fields['categoryField']] in self._values:
self._values[item[related_fields['categoryField']]].append(
item[related_fields['valueField']])
else:
self._values[item[related_fields['categoryField']]] = list(
item[related_fields['valueField']])
if not item[related_fields['categoryField']] in self.fields:
self.fields.append(item[related_fields['categoryField']])
self._results = []
for f in self.fields:
from_fields = [fm['fromField'] for fm in self.field_mappings]
if f in from_fields:
matches = [fm for fm in self.field_mappings if fm['fromField'] == f]
for fm in matches:
field_name = fm['toField']
self._results.append(zip([
field_name for i in xrange(0, self.__len__())], self._values[f]))
else:
self._results.append(zip([f for i in xrange(0, self.__len__())], self._values[f]))
def __len__(self):
if not hasattr(self, '_count'):
self._count = self.get_queryset().count()
return self._count
def __iter__(self):
for r in zip(*self._results):
yield dict((k, v) for k, v in r if v is not None)
# def __getitem__(self, key):
# if not isinstance(key, int):
# raise TypeError, "DataProvider indices must be integers, not %s" % type(key)
# try:
# for f in self.fields:
# if f in [fm['fromField'] for fm in self.field_mappings]:
# for fm in [fm for fm in self.field_mappings if fm['fromField'] == f]:
# field_name = fm['toField']
# if callable(field_name):
# yield field_name(self._values, key)
# else:
# yield field_name, self._values[f][key]
# else:
# yield f, self._values[f][key]
# except IndexError:
# raise IndexError, "DataProvider index out of range"
def get_queryset(self):
if hasattr(self, '_queryset'):
return self._queryset
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
else:
queryset = self.model._default_manager.get_query_set()
if self.filters:
queryset = queryset.filter(**self.filters)
self._queryset = queryset.order_by(*self.ordering)
return self._queryset
| StarcoderdataPython |
1715587 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchaudio
from librosa.filters import mel as librosa_mel_fn
from asr.data import Audio2Mel
path = "/home/ubuntu/data/datasets/data_processed_voc/wav/voc_26_part_65.wav"
vocoder = torch.hub.load('descriptinc/melgan-neurips', 'load_melgan')
attr = ['n_fft',
'hop_length',
'win_length',
'sampling_rate',
'n_mel_channels']
for att in attr:
print(att, getattr(vocoder.fft, att))
audio_2_mel = Audio2Mel()
torch.manual_seed(1)
for i in range(10, 15):
path = f"/home/ubuntu/data/datasets/data_processed_voc/wav/voc_0_part_{i}.wav"
wav, _ = torchaudio.load(path)
spec1 = audio_2_mel.forward(wav)
spec2 = vocoder(wav)
diff = torch.mean(spec1.cpu() - spec2.cpu())
print(diff)
| StarcoderdataPython |
8065366 | import pygame
from pygame.locals import *
import gamelib
from elements import Arrow
from elements import Text
class MainGame(gamelib.SimpleGame):
BLACK = pygame.Color('black')
WHITE = pygame.Color('white')
SECOND = 1000
def __init__(self):
super(MainGame, self).__init__('VimHero', MainGame.BLACK)
self.instruction = []
self.arrow = Arrow(pos = (self.window_size[0] / 2, self.window_size[1] / 2))
x = self.window_size[0] / 20
y = self.window_size[1]
self.create_instruction("h : left", (x, y - 120), MainGame.WHITE, self.font)
# self.create_instruction("j : up", (x, y - 100), MainGame.WHITE, self.font)
# self.create_instruction("k : down", (x, y - 80), MainGame.WHITE, self.font)
# self.create_instruction("l : right", (x, y - 60), MainGame.WHITE, self.font)
# self.create_instruction("Spacebar : Restart", (x, y - 40), MainGame.WHITE, self.font)
# self.create_instruction("ESC : Exit", (x, y - 20), MainGame.WHITE, self.font)
self.init_game()
def create_instruction(self, text, pos, color, font):
self.instruction.append(Text(text, pos, color, font))
def init(self):
super(MainGame, self).init()
def init_game(self):
self.score = 0
self.arrow.change()
self.time_limit = 2 * MainGame.SECOND
self.time_decrease = 40
self.is_started = False
self.is_game_over = False
def update(self):
if self.is_started and not self.is_game_over:
if self.is_over_time():
self.game_over()
self.check_key()
def is_over_time(self):
if self.get_time() - self.time > self.time_limit:
return True
return False
def get_time(self):
return pygame.time.get_ticks()
def check_key(self):
for event in pygame.event.get():
if event.type == KEYUP:
self.check_key_exit(event)
if not self.is_game_over:
self.check_key_direction(event)
else:
self.check_key_reset(event)
def check_key_exit(self, event):
if event.key == pygame.K_ESCAPE:
self.terminate()
def check_key_direction(self, event):
if event.key == pygame.K_h:
self.check_direction('left')
elif event.key == pygame.K_j:
self.check_direction('up')
elif event.key == pygame.K_k:
self.check_direction('down')
elif event.key == pygame.K_l:
self.check_direction('right')
def check_direction(self, direction):
if not self.is_started:
self.is_started = True
if self.arrow.get_direction() is direction:
self.correct_key()
else:
self.game_over()
def check_key_reset(self, event):
if event.key == pygame.K_SPACE:
self.reset_game()
def correct_key(self):
self.time_limit -= self.time_decrease
self.set_time()
self.arrow.change()
self.score += 1
def set_time(self):
self.time = pygame.time.get_ticks()
def game_over(self):
self.is_game_over = True
def reset_game(self):
self.init_game()
def render_instruction(self):
self.instruction1 = self.font.render("h : left", 0, MainGame.WHITE)
self.instruction2 = self.font.render("j : up", 0, MainGame.WHITE)
self.instruction3 = self.font.render("k : down", 0, MainGame.WHITE)
self.instruction4 = self.font.render("l : right", 0, MainGame.WHITE)
self.instruction5 = self.font.render("Spacebar : Restart ", 0, MainGame.WHITE)
self.instruction6 = self.font.render("ESC : Exit", 0, MainGame.WHITE)
def render_score(self):
self.score_image = self.font.render("Score = %d" % self.score, 0, MainGame.WHITE)
self.set_score_position()
def set_score_position(self):
self.score_pos_x = (self.window_size[0] / 2) - (self.score_image.get_width() / 2)
if not self.is_game_over:
self.score_pos_y = (self.window_size[1] / 10) - (self.score_image.get_height() / 2)
else:
self.score_pos_y = (self.window_size[1] / 2) - (self.score_image.get_height() / 2)
def render(self, surface):
self.render_score()
self.render_instruction()
surface.blit(self.score_image, (self.score_pos_x, self.score_pos_y))
surface.blit(self.instruction1, (20, self.window_size[1] - 120))
surface.blit(self.instruction2, (20, self.window_size[1] - 100))
surface.blit(self.instruction3, (20, self.window_size[1] - 80))
surface.blit(self.instruction4, (20, self.window_size[1] - 60))
surface.blit(self.instruction5, (20, self.window_size[1] - 40))
surface.blit(self.instruction6, (20, self.window_size[1] - 20))
if not self.is_game_over:
self.arrow.render(surface)
def main():
game = MainGame()
game.run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8000536 |
# Copyright 2018-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from pysmapi.smapi import *
class Virtual_Network_LAN_Delete(Request):
def __init__(self,
lan_name = "",
lan_owner = "",
**kwargs):
super(Virtual_Network_LAN_Delete, self).__init__(**kwargs)
# Request parameters
self._lan_name = lan_name
self._lan_owner = lan_owner
@property
def lan_name(self):
return self._lan_name
@lan_name.setter
def lan_name(self, value):
self._lan_name = value
@property
def lan_owner(self):
return self._lan_owner
@lan_owner.setter
def lan_owner(self, value):
self._lan_owner = value
def pack(self):
ln_len = len(self._lan_name)
lo_len = len(self._lan_owner)
# lan_name_length (int4)
# lan_name (string,1-8,char36 plus $#@)
# lan_owner_length (int4)
# lan_owner (string,1-8,char42)
# (string,6,SYSTEM)
fmt = "!I%dsI%ds" % (ln_len, lo_len)
buf = struct.pack(fmt,
ln_len,
s2b(self._lan_name),
lo_len,
s2b(self._lan_owner))
return buf
| StarcoderdataPython |
11223124 | import os
from setuptools import setup
from pypandoc import convert
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-rundbg',
version='0.1.3',
packages=['django_rundbg'],
include_package_data=True,
license='Apache License',
description='Provides a lightweight development runserver on Werkzeug with debugging',
long_description=convert('README.md', 'rst'),
keywords=['django', 'debug', 'django-rest-framework', 'api'],
url='https://github.com/octobot-dev/django-rundbg',
download_url='https://github.com/octobot-dev/django-rundbg/archive/0.1.3.tar.gz',
author='<NAME>',
author_email='<EMAIL>',
zip_safe=True,
install_requires=[
'Django>=1.7',
'Werkzeug>=0.11',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.7',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Information Technology',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application'
],
)
| StarcoderdataPython |
4949738 | import numpy as np
class SimplePreprocessor(object):
def __init__(self, is_categorical):
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('ordinal', OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1))])
self.preprocessor = ColumnTransformer(
transformers=[
('cat', categorical_transformer, np.where(is_categorical)[0]),
('num', numeric_transformer, np.where(~is_categorical)[0])])
def fit(self, x):
return self.preprocessor.fit(x)
def fit_transform(self, x):
return self.preprocessor.fit_transform(x)
def transform(self, x):
return self.preprocessor.transform(x)
class MetaDataset(object):
def __init__(self):
self.datasets = []
self.datasets_orig = []
self.primary_y = []
self.drift_types = []
self.drops = []
self.metrics_id = []
self.meta_features = []
def append(self, dataset, dataset_orig, primary_y, drift_type, drop, metrics_id, meta_features):
self.datasets.append(dataset)
self.datasets_orig.append(dataset_orig)
self.primary_y.append(primary_y)
self.drift_types.append(drift_type)
self.drops.append(drop)
self.metrics_id.append(metrics_id)
self.meta_features.append(meta_features)
def arrayfy(self):
self.datasets = np.array(self.datasets)
self.datasets_orig = np.array(self.datasets_orig)
self.primary_y = np.array(self.primary_y)
self.drift_types = np.array(self.drift_types)
self.drops = np.array(self.drops)
self.metrics_id = np.array(self.metrics_id)
self.meta_features = np.array(self.meta_features)
def shuffle(self):
shuffled_indices = np.random.permutation(len(self.drift_types))
self.datasets = self.datasets[shuffled_indices]
self.datasets_orig = self.datasets_orig[shuffled_indices]
self.primary_y = self.primary_y[shuffled_indices]
self.drops = self.drops[shuffled_indices]
self.drift_types = self.drift_types[shuffled_indices]
self.metrics_id = self.metrics_id[shuffled_indices]
class ReferenceTask(object):
# drop is zero
def __init__(self, model, X_src_orig, y_src, preprocess, is_categorical):
self.model = model
self.X_orig = X_src_orig # val set
self.y = y_src # val set
self.preprocess = preprocess
self.is_categorical = is_categorical
| StarcoderdataPython |
97724 | <filename>google/cloud/recommendationengine_v1beta1/types/__init__.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .catalog import (
CatalogItem,
Image,
ProductCatalogItem,
)
from .catalog_service import (
CreateCatalogItemRequest,
DeleteCatalogItemRequest,
GetCatalogItemRequest,
ListCatalogItemsRequest,
ListCatalogItemsResponse,
UpdateCatalogItemRequest,
)
from .common import FeatureMap
from .import_ import (
CatalogInlineSource,
GcsSource,
ImportCatalogItemsRequest,
ImportCatalogItemsResponse,
ImportErrorsConfig,
ImportMetadata,
ImportUserEventsRequest,
ImportUserEventsResponse,
InputConfig,
UserEventImportSummary,
UserEventInlineSource,
)
from .prediction_apikey_registry_service import (
CreatePredictionApiKeyRegistrationRequest,
DeletePredictionApiKeyRegistrationRequest,
ListPredictionApiKeyRegistrationsRequest,
ListPredictionApiKeyRegistrationsResponse,
PredictionApiKeyRegistration,
)
from .prediction_service import (
PredictRequest,
PredictResponse,
)
from .user_event import (
EventDetail,
ProductDetail,
ProductEventDetail,
PurchaseTransaction,
UserEvent,
UserInfo,
)
from .user_event_service import (
CollectUserEventRequest,
ListUserEventsRequest,
ListUserEventsResponse,
PurgeUserEventsMetadata,
PurgeUserEventsRequest,
PurgeUserEventsResponse,
WriteUserEventRequest,
)
__all__ = (
"CatalogItem",
"Image",
"ProductCatalogItem",
"CreateCatalogItemRequest",
"DeleteCatalogItemRequest",
"GetCatalogItemRequest",
"ListCatalogItemsRequest",
"ListCatalogItemsResponse",
"UpdateCatalogItemRequest",
"FeatureMap",
"CatalogInlineSource",
"GcsSource",
"ImportCatalogItemsRequest",
"ImportCatalogItemsResponse",
"ImportErrorsConfig",
"ImportMetadata",
"ImportUserEventsRequest",
"ImportUserEventsResponse",
"InputConfig",
"UserEventImportSummary",
"UserEventInlineSource",
"CreatePredictionApiKeyRegistrationRequest",
"DeletePredictionApiKeyRegistrationRequest",
"ListPredictionApiKeyRegistrationsRequest",
"ListPredictionApiKeyRegistrationsResponse",
"PredictionApiKeyRegistration",
"PredictRequest",
"PredictResponse",
"EventDetail",
"ProductDetail",
"ProductEventDetail",
"PurchaseTransaction",
"UserEvent",
"UserInfo",
"CollectUserEventRequest",
"ListUserEventsRequest",
"ListUserEventsResponse",
"PurgeUserEventsMetadata",
"PurgeUserEventsRequest",
"PurgeUserEventsResponse",
"WriteUserEventRequest",
)
| StarcoderdataPython |
11364850 | <gh_stars>1-10
def encode(plain_text):
text = decode(plain_text.lower())
return ' '.join(text[i:i + 5] for i in range(0, len(text), 5))
def decode(ciphered_text):
chars = []
for ch in ciphered_text:
if ch.isalnum():
chars.append(chr(219 - ord(ch)) if ch.islower() else ch)
return ''.join(chars)
| StarcoderdataPython |
11306349 | # Name: SpatialJoin_AddField
# Description: Join attributes
# Requirements: os module
# Import system modules
import arcpy
import os
# Set local variables
workspace = "F:\\data_test\\fahui_wang\\"
outWorkspace = "F:\\data_test\\fahui_wang\\"
targetFeatures = os.path.join(workspace, "d_04")
joinFeatures = os.path.join(workspace, "d_05")
outfc = os.path.join(outWorkspace, "dd05")
# Create a new fieldmappings and add the two input feature classes.
fieldmappings = arcpy.FieldMappings()
fieldmappings.addTable(targetFeatures)
fieldmappings.addTable(joinFeatures)
# First get the POP1990 fieldmap. POP1990 is a field in the cities feature class.
# The output will have the states with the attributes of the cities. Setting the
# field's merge rule to mean will aggregate the values for all of the cities for
# each state into an average value. The field is also renamed to be more appropriate
# for the output.
sumFieldIndex = fieldmappings.findFieldMapIndex("OUTPUT")
fieldmap = fieldmappings.getFieldMap(sumFieldIndex)
# Get the output field's properties as a field object
field = fieldmap.outputField
# Rename the field and pass the updated field object back into the field map
field.name = "d_05"
field.aliasName = "d_05"
fieldmap.outputField = field
# Run the Spatial Join tool, using the defaults for the join operation and join type
arcpy.SpatialJoin_analysis(targetFeatures, joinFeatures, outfc, "#", "#", fieldmappings) | StarcoderdataPython |
5047628 | from typing import Optional
def say_hi(name: Optional[str] = None):
if name is not None:
print(f"Hey {name}!")
else:
print("Hello World")
| StarcoderdataPython |
11258321 | import os
import shutil
import re
import argparse
import json
import socket
import time
CONFIG_TEMPLATES_ROOT_PATH = '/cfg-templates/'
CONFIG_OUTPUT_ROOT_PATH = '/config/'
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = re.sub('[^-a-zA-Z0-9_.]+', '', value)
return value
def str2bool(v:str):
"""Converts string to boolean"""
return v.lower() in ('yes', 'true', 't', '1')
def save_json(path:str, data):
with open(path, 'w') as outfile:
json.dump(data, outfile, indent=1)
def load_json(path:str):
with open(path, 'r') as inputfile:
return json.load(inputfile)
def init_args():
# Parse arguments
parser = argparse.ArgumentParser(description='Cardano Configurator')
parser.add_argument('--node-port', dest='node_port', help='Port of node. Defaults to 3000.', type=int, default=os.environ.get('NODE_PORT', 3000))
parser.add_argument('--node-name', dest='name', help='Name of node. Defaults to node1.', type=slugify, default=os.environ.get('NODE_NAME', 'node1'))
parser.add_argument('--node-topology', dest='topology', help='Topology of the node. Should be comma separated for each individual node to add, on the form: <ip>:<port>/<valency>. So for example: 127.0.0.1:3001/1,127.0.0.1:3002/1.', type=str, default=os.environ.get('NODE_TOPOLOGY', ''))
parser.add_argument('--node-relay', dest='relay', help='Set to 1 if default IOHK relay should be added to the network topology.', type=str2bool, default=os.environ.get('NODE_RELAY', False))
parser.add_argument('--cardano-network', dest='network', help='Carano network to use (main, test, pioneer). Defaults to main.', type=str, default=os.environ.get('CARDANO_NETWORK', 'main'))
parser.add_argument('--ekg-port', dest='ekg_port', help='Port of EKG monitoring. Defaults to 12788.', type=int, default=os.environ.get('EKG_PORT', 12788))
parser.add_argument('--prometheus-host', dest='prometheus_host', help='Host of Prometheus monitoring. Defaults to 127.0.0.1.', type=str, default=os.environ.get('PROMETHEUS_HOST', '127.0.0.1'))
parser.add_argument('--prometheus-port', dest='prometheus_port', help='Port of Prometheus monitoring. Defaults to 12798.', type=int, default=os.environ.get('PROMETHEUS_PORT', 12798))
parser.add_argument('--resolve-hostnames', dest='resolve_hostnames', help='Resolve hostnames in topology to IP-addresses.', type=str2bool, default=os.environ.get('RESOLVE_HOSTNAMES', False))
parser.add_argument('--replace-existing', dest='replace_existing', help='Replace existing configs.', type=str2bool, default=os.environ.get('REPLACE_EXISTING_CONFIG', False))
args = parser.parse_args()
# Init network specific paths
args.CONFIG_TEMPLATES_PATH = os.path.join(CONFIG_TEMPLATES_ROOT_PATH, args.network)
CONFIG_NAME = args.network+'-'+args.name
args.CONFIG_OUTPUT_PATH = os.path.join(CONFIG_OUTPUT_ROOT_PATH, CONFIG_NAME)
args.BYRON_GENESIS_PATH = os.path.join(args.CONFIG_OUTPUT_PATH, 'byron-genesis.json')
args.SHELLEY_GENESIS_PATH = os.path.join(args.CONFIG_OUTPUT_PATH, 'shelley-genesis.json')
args.ALONZO_GENESIS_PATH = os.path.join(args.CONFIG_OUTPUT_PATH, 'alonzo-genesis.json')
args.TOPOLOGY_PATH = os.path.join(args.CONFIG_OUTPUT_PATH, 'topology.json')
args.CONFIG_PATH = os.path.join(args.CONFIG_OUTPUT_PATH, 'config.json')
args.VARS_PATH = os.path.join(args.CONFIG_OUTPUT_PATH, 'VARS')
return args
def init_folder(args):
"""Creates network/node config folders"""
if not os.path.exists(args.CONFIG_OUTPUT_PATH):
os.makedirs(args.CONFIG_OUTPUT_PATH)
def init_genesis(args):
"""Initializes the genesis file"""
ALONZO_SRC = os.path.join(args.CONFIG_TEMPLATES_PATH, 'alonzo-genesis.json')
SHELLEY_SRC = os.path.join(args.CONFIG_TEMPLATES_PATH, 'shelley-genesis.json')
BYRON_SRC = os.path.join(args.CONFIG_TEMPLATES_PATH, 'byron-genesis.json')
if not os.path.exists(args.ALONZO_GENESIS_PATH) or args.replace_existing:
print('Generating new alonzo genesis file %s from template %s' % (args.ALONZO_GENESIS_PATH, ALONZO_SRC))
shutil.copy(ALONZO_SRC, args.ALONZO_GENESIS_PATH)
if not os.path.exists(args.SHELLEY_GENESIS_PATH) or args.replace_existing:
print('Generating new shelley genesis file %s from template %s' % (args.SHELLEY_GENESIS_PATH, SHELLEY_SRC))
shutil.copy(SHELLEY_SRC, args.SHELLEY_GENESIS_PATH)
if not os.path.exists(args.BYRON_GENESIS_PATH) or args.replace_existing:
print('Generating new byron genesis file %s from template %s' % (args.BYRON_GENESIS_PATH, BYRON_SRC))
shutil.copy(BYRON_SRC, args.BYRON_GENESIS_PATH)
def resolve_hostname(hostname, tries=0):
"""Resolve IP from hostname"""
try:
return socket.gethostbyname(hostname)
except:
if tries<10:
time.sleep(1)
return resolve_hostname(hostname, tries=tries+1)
else:
return hostname
def parse_topology_str(s) -> list:
"""Parses node-topology string and returns list of dicts"""
topology = []
if s:
for a in s.split(','):
(ip_port, valency) = a.split('/')
(ip, port) = ip_port.split(':')
#if resolve_hostname: ip = resolve_hostname(ip)
topology.append({
'addr': str(ip),
'port': int(port),
'valency': int(valency)
})
return topology
def init_topology(args):
"""Initializes the topology file"""
if args.relay:
INPUT_PATH = os.path.join(args.CONFIG_TEMPLATES_PATH, 'topology-relay.json')
else:
INPUT_PATH = os.path.join(args.CONFIG_TEMPLATES_PATH, 'topology.json')
if not os.path.exists(args.TOPOLOGY_PATH) or args.replace_existing:
print('Generating new topology %s from template %s' % (args.TOPOLOGY_PATH, INPUT_PATH))
print('Topology: ', args.topology)
# Load template file
data = load_json(INPUT_PATH)
# Parse topology string
topology = parse_topology_str(args.topology)
# Add default IOHK relay
data['Producers'] = data['Producers']+topology
save_json(args.TOPOLOGY_PATH, data)
def init_config(args):
"""Initializes the config file"""
INPUT_PATH = os.path.join(args.CONFIG_TEMPLATES_PATH, 'config.json')
if not os.path.exists(args.CONFIG_PATH) or args.replace_existing:
print('Generating new config file %s from template %s' % (args.CONFIG_PATH, INPUT_PATH))
data = load_json(INPUT_PATH)
data['hasEKG'] = args.ekg_port
data['hasPrometheus'] = [args.prometheus_host, args.prometheus_port]
data['ShelleyGenesisFile'] = args.SHELLEY_GENESIS_PATH
data['ByronGenesisFile'] = args.BYRON_GENESIS_PATH
data['AlonzoGenesisFile'] = args.ALONZO_GENESIS_PATH
save_json(args.CONFIG_PATH, data)
def init_vars(args):
INPUT_PATH = os.path.join(args.CONFIG_TEMPLATES_PATH, 'VARS')
if not os.path.exists(args.VARS_PATH) or args.replace_existing:
print('Generating new VARS %s from template %s' % (args.VARS_PATH, INPUT_PATH))
# Just copy it
shutil.copy(INPUT_PATH, args.VARS_PATH)
if __name__ == '__main__':
args = init_args()
init_folder(args)
init_genesis(args)
init_topology(args)
init_config(args)
#init_vars(args)
| StarcoderdataPython |
3358216 | from pathmagic import Dir
import os
from miscutils import Profiler
testdir = Dir.from_home().new_dir("test")
testdir.settings.lazy = True
run = 1000
print("pathmagic")
with Profiler() as magic_create_profiler:
for num in range(run):
testdir.new_file(f"test{num + 1}", "txt").write(f"Hi, I'm file number {num + 1}.")
print(magic_create_profiler)
with Profiler() as magic_delete_profiler:
for file in testdir.files:
file.delete()
print(magic_delete_profiler)
print("standard")
with Profiler() as standard_create_profiler:
for num in range(run):
with open(fR"{testdir}\test{num + 1}.txt", "w") as stream:
stream.write(f"Hi, I'm file number {num + 1}.")
print(standard_create_profiler)
with Profiler() as standard_delete_profiler:
for file in os.listdir(str(testdir)):
os.remove(f"{testdir}/{file}")
print(standard_delete_profiler)
| StarcoderdataPython |
82556 | def reverse(string):
return string[::-1]
print('Gimmie some word')
s = input()
print(reverse(s))
| StarcoderdataPython |
6434033 | """API Handler for hacs_repositories"""
from homeassistant.components import websocket_api
import voluptuous as vol
from custom_components.hacs.share import get_hacs
@websocket_api.async_response
@websocket_api.websocket_command({vol.Required("type"): "hacs/repositories"})
async def hacs_repositories(_hass, connection, msg):
"""Handle get media player cover command."""
hacs = get_hacs()
repositories = hacs.repositories
content = []
for repo in repositories:
if (
repo.data.category in hacs.common.categories
and not repo.ignored_by_country_configuration
):
data = {
"additional_info": repo.information.additional_info,
"authors": repo.data.authors,
"available_version": repo.display_available_version,
"beta": repo.data.show_beta,
"can_install": repo.can_install,
"category": repo.data.category,
"country": repo.data.country,
"config_flow": repo.data.config_flow,
"custom": repo.custom,
"default_branch": repo.data.default_branch,
"description": repo.data.description,
"domain": repo.data.domain,
"downloads": repo.data.downloads,
"file_name": repo.data.file_name,
"first_install": repo.status.first_install,
"full_name": repo.data.full_name,
"hide": repo.data.hide,
"hide_default_branch": repo.data.hide_default_branch,
"homeassistant": repo.data.homeassistant,
"id": repo.data.id,
"info": repo.information.info,
"installed_version": repo.display_installed_version,
"installed": repo.data.installed,
"issues": repo.data.open_issues,
"javascript_type": repo.information.javascript_type,
"last_updated": repo.data.last_updated,
"local_path": repo.content.path.local,
"main_action": repo.main_action,
"name": repo.display_name,
"new": repo.data.new,
"pending_upgrade": repo.pending_upgrade,
"releases": repo.data.published_tags,
"selected_tag": repo.data.selected_tag,
"stars": repo.data.stargazers_count,
"state": repo.state,
"status_description": repo.display_status_description,
"status": repo.display_status,
"topics": repo.data.topics,
"updated_info": repo.status.updated_info,
"version_or_commit": repo.display_version_or_commit,
}
content.append(data)
connection.send_message(websocket_api.result_message(msg["id"], content))
| StarcoderdataPython |
3251483 | import crawler
import psycopg2
from datetime import datetime
def delete():
conn=get_conn()
cur = conn.cursor()
quary = "DELETE FROM crawl_lists"
cur.execute(quary)
conn.commit()
cur = conn.cursor()
quary = "DELETE FROM posts"
cur.execute(quary)
conn.commit()
cur = conn.cursor()
quary = "DELETE FROM Bulletins"
cur.execute(quary)
conn.commit()
def get_conn():
host=
port=
dbname=
user=
password=
conn = psycopg2.connect("dbname={} user={} host={} password={}".format(dbname,user,host,password))
return conn
def insert_into_bulletin(domain_title):
print("insert_into_bulletin")
conn=get_conn()
cur = conn.cursor()
quary = "Select title,id from Bulletins"
cur.execute(quary)
rows = cur.fetchall()
is_joongbok = False
for row in rows:
if domain_title == row[0]:
is_joongbok = True
return row[1]
if not is_joongbok:
cur = conn.cursor()
now = datetime.now()
query = "insert into Bulletins(title,created_at,updated_at) values ('"+domain_title+"','"+str(now)+"','"+str(now)+"')"
cur.execute(query)
conn.commit()
cur = conn.cursor()
query = "Select id from Bulletins where title='"+domain_title+"'"
cur.execute(query)
rows = cur.fetchall()
for row in rows:
return row[0]
def insert_into_post(bulletin_id,site_title,url,select_rule):
print("insert_into_post")
conn=get_conn()
cur = conn.cursor()
quary = "Select title,id from posts"
cur.execute(quary)
rows = cur.fetchall()
is_joongbok = False
for row in rows:
if site_title == row[0]:
is_joongbok = True
return row[1]
if not is_joongbok:
cur = conn.cursor()
now = datetime.now()
query = "insert into Posts(bulletin_id,title,url,select_role,created_at,updated_at) values ("+str(bulletin_id)+",'"+site_title+"','"+url+"','"+select_rule+"','"+str(now)+"','"+str(now)+"')"
cur.execute(query)
conn.commit()
cur = conn.cursor()
query = "Select id from Posts where title='"+site_title+"'"
cur.execute(query)
rows = cur.fetchall()
for row in rows:
return row[0]
else:
return 3
def insert_into_crawl_list(post_id,title,url):
print("insert start")
conn=get_conn()
cur = conn.cursor()
quary = "Select title,id from crawl_lists"
cur.execute(quary)
rows = cur.fetchall()
is_joongbok = False
for row in rows:
if title == row[0]:
is_joongbok = True
return
if not is_joongbok:
cur = conn.cursor()
now = datetime.now()
query = "insert into crawl_lists(post_id,title,created_at,updated_at,url) values ("+str(post_id)+",'"+title+"','"+str(now)+"','"+str(now)+"','"+url+"')"
cur.execute(query)
conn.commit()
def insert_into_tables(rule, site_title, domain_title,url):
print("ssssinsert start")
bulletin_id = insert_into_bulletin(domain_title)
post_id = insert_into_post(bulletin_id,site_title,url,rule)
return post_id
if __name__ == '__main__':
url = 'https://cs.kookmin.ac.kr/news/notice/'
str1 = '네이버 커넥트재단 부스트캠프'
str2 = '2019학년도 1학기 학업성적 확인 및 정정요청 기간 …'
crawler.start()
rule, site_title, domain_title = crawler.find_board_info(url, str1, str2)
print("rule: ", rule, "\nsite_title", site_title, "\ndomain_title: ", domain_title)
boards,urls = crawler.crawl(url,rule)
for i in crawler.crawl(url, rule):
print(i)
post_id = insert_into_tables(rule,site_title,domain_title,url)
board_urls = list(zip(boards,urls))
for elem in board_urls:
print(elem)+"\n\n"
#insert_into_crawl_list(post_id,elem[0],elem[1])
insert_into_tables(rule,site_title, domain_title,url)
| StarcoderdataPython |
3219718 | import datetime
import time
class EarthquakeUSGS:
"""
@brief Class that holds earthquake data records.
Class that hold earthquake data, for use with USGIS retrieved quake data.
BRIDGES uses scripts to
continually monitor USGIS site (tweets) and retrieve the latest
quake data for use in student projects.
This object is generally not created by the user, to see how its created check
out bridges::data_src_dependent::data_source::get_earthquake_usgs_data()
@sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_EQ_USGS.html
@author <NAME>, <NAME>,
@date 2/18/18, 12/29/20, 1/6/21
"""
def __set_time_from_unix_timestamp(self, tm):
epoch_time = int(tm)
eq_time = epoch_time / 1000
eqt = time.gmtime(eq_time)
self._time = time.strftime("%Y-%m-%d %H:%M:%S", eqt)
def __init__(self, magnitude=None, longit=None, latit=None, location=None,
title=None, url=None, time=None):
"""
@brief constructor
Args:
magnitude: magnitude of quake
latit: latitude position
longit: longitude position
location: location of quake
title: title (has some of eq info in a string)
url: url for more information
time: occurrence time of quake
"""
self._time = int()
if magnitude is not None:
self._magnitude = magnitude
else:
self._magnitude = 0.0
if longit is not None:
self._longit = longit
else:
self._longit = 0.0
if latit is not None:
self._latit = latit
else:
self._latit = 0.0
if location is not None:
self._location = location
else:
self._location = ""
if title is not None:
self._title = title
else:
self._title = ""
if url is not None:
self._url = url
else:
self._url = ""
if time is not None:
self.time = time
@property
def time(self):
"""
@brief Get occurrence time (epoch) of quake
Returns:
Quake occurrence time
"""
return self._time
@time.setter
def time(self, tm) -> None:
"""
@brief Set occurrence time (epoch) of quake
Args:
tm: Quake occurrence time to set
"""
self.__set_time_from_unix_timestamp(tm)
@property
def latit(self) -> float:
"""
@brief Get latitude of quake
Returns:
Quake latitude
"""
return self._latit
@latit.setter
def latit(self, latit: float) -> None:
"""
@brief Set latitude of quake
Args:
latit: quake latitude to set
"""
self._latit = latit
@property
def longit(self) -> float:
"""
@brief Get longitude of quake
Returns:
Quake longitude
"""
return self._longit
@longit.setter
def longit(self, longit: float) -> None:
"""
@brief Set longitude of quake
Args:
longit: quake longitude to set
"""
self._longit = longit
@property
def location(self) -> str:
"""
@brief Get location of quake (typically a city or something of the sort)
Returns:
Quake location
"""
return self._location
@location.setter
def location(self, location: str):
"""
@brief Set location of quake
Args:
location: quake location to set
"""
self._location = location
@property
def title(self) -> str:
"""
@brief Get quake title
Returns:
Quake title
"""
return self._title
@title.setter
def title(self, title: str):
"""
@brief Set title of quake
Args:
title: quake title to set
"""
self._title = title
@property
def url(self) -> str:
"""
@brief Get quake url
Returns:
Quake url
"""
return self._url
@url.setter
def url(self, url: str):
"""
@brief Set url of quake
Args:
url: quake url to set
"""
self._url = url
@property
def magnitude(self) -> float:
"""
@brief Get quake magnitude (Richter scale)
Returns:
Quake magnitude
"""
return self._magnitude
@magnitude.setter
def magnitude(self, magn: float):
"""
Setter for the magnitude of the quake
Args:
magn: magnitude to set
"""
self._magnitude = magnitude
| StarcoderdataPython |
11361101 | # Python Standard Library Imports
import json
# Django Imports
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.http import Http404
from django.urls import reverse
from django.utils.http import (
base36_to_int,
int_to_base36,
)
# HTK Imports
from htk.utils import (
htk_setting,
utcnow,
)
from htk.utils.cache_descriptors import CachedAttribute
from htk.utils.general import resolve_model_dynamically
# isort: off
"""
While we *could* import models here for convenience,
we must also remember to be careful to not assume that the external dependencies will be met for every platform, so it's better to import only what's needed explicitly
For example, the following module requires AWS Credentials.
from htk.lib.aws.s3.models import S3MediaAsset
Others, like imaging libraries, require PIL, etc
"""
class HtkBaseModel(models.Model):
"""An abstract class extending Django models.Model for performing common operations
"""
class Meta:
abstract = True
def json_encode(self):
"""Returns a dictionary that can be `json.dumps()`-ed as a JSON representation of this object
"""
value = {
'id' : self.id,
}
return value
def json_decode(self, payload):
"""Iterates over a flat dictionary `payload` and updates the attributes on `self`
"""
was_updated = False
for key, value in payload.items():
if hasattr(self, key):
was_updated = True
setattr(self, key, value)
if was_updated:
self.save()
return was_updated
##
# Crypto
@classmethod
def _luhn_xor_key(cls):
xor_key = htk_setting('HTK_LUHN_XOR_KEYS').get(cls.__name__, 0)
return xor_key
@CachedAttribute
def id_with_luhn_base36(self):
from htk.utils.luhn import calculate_luhn
xor_key = self.__class__._luhn_xor_key()
xored = self.id ^ xor_key
check_digit = calculate_luhn(xored)
id_with_luhn = xored * 10 + check_digit
encoded_id = int_to_base36(id_with_luhn)
return encoded_id
@classmethod
def from_encoded_id_luhn_base36(cls, encoded_id):
from htk.utils.luhn import is_luhn_valid
id_with_luhn = base36_to_int(encoded_id)
if is_luhn_valid(id_with_luhn):
xored = id_with_luhn // 10
xor_key = cls._luhn_xor_key()
obj_id = xored ^ xor_key
obj = cls.objects.get(id=obj_id)
else:
obj = None
return obj
@classmethod
def from_encoded_id_luhn_base36_or_404(cls, encoded_id):
try:
obj = cls.from_encoded_id_luhn_base36(encoded_id)
except cls.DoesNotExist:
obj = None
if obj is None:
raise Http404('No %s matches the given query.' % cls.__name__)
return obj
##
# URLs
def get_admin_url(self):
content_type = ContentType.objects.get_for_model(self.__class__)
url = reverse("admin:%s_%s_change" % (content_type.app_label, content_type.model), args=(self.id,))
return url
def get_absolute_url(self):
raise Exception('Not implemented')
class AbstractAttribute(models.Model):
"""An abstract class for storing an arbitrary attribute on a Django model
The concrete implementing class should have a ForeignKey to the holder object
and a related_name of "attributes", e.g.
holder = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='attributes')
"""
key = models.CharField(max_length=128, blank=True)
value = models.TextField(max_length=4096, blank=True)
# meta
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def set_value(self, value):
self.value = value
self.save()
def value_as_json(self):
try:
value = json.loads(self.value)
except ValueError:
value = None
return value
class AbstractAttributeHolderClassFactory(object):
"""Creates an attribute holder class for multi-inheritance
"""
def __init__(self, attribute_class, holder_resolver=None, defaults=None):
self.attribute_class = attribute_class
if holder_resolver is None:
self.holder_resolver = lambda self: self
else:
self.holder_resolver = holder_resolver
self.defaults = defaults or {}
def get_class(self):
factory = self
class AbstractAttributeHolderClass(object):
def set_attribute(self, key, value, as_bool=False):
if as_bool:
value = int(bool(value))
attribute = self._get_attribute_object(key)
if attribute is None:
holder = factory.holder_resolver(self)
attribute = factory.attribute_class.objects.create(
holder=holder,
key=key,
value=value
)
else:
attribute.set_value(value)
return attribute
def _get_attribute_object(self, key):
try:
holder = factory.holder_resolver(self)
attribute = holder.attributes.get(
holder=holder,
key=key
)
except factory.attribute_class.DoesNotExist:
attribute = None
return attribute
def get_attribute(self, key, as_bool=False):
attribute = self._get_attribute_object(key)
value = attribute.value if attribute else factory.defaults.get(key, None)
if as_bool:
try:
value = bool(int(value))
except TypeError:
value = False
except ValueError:
value = False
return value
def delete_attribute(self, key):
attribute = self._get_attribute_object(key)
if attribute:
attribute.delete()
@CachedAttribute
def attribute_fields(self):
"""Returns a list of attribute keys
"""
return ()
@CachedAttribute
def boolean_attributes_lookup(self):
"""Returns a dictionary of attribute keys that are boolean values
"""
return {}
return AbstractAttributeHolderClass
| StarcoderdataPython |
8188680 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sys
import time
from unittest import mock
from ironic_lib import metrics_utils
import oslo_policy
from oslo_utils import timeutils
from ironic.api.controllers.v1 import node as node_api
from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import context
from ironic.common import service
from ironic.conf import CONF # noqa To Load Configuration
from ironic.db import api as db_api
from ironic.objects import conductor
from ironic.objects import node
def _calculate_delta(start, finish):
return finish - start
def _add_a_line():
print('------------------------------------------------------------')
def _assess_db_performance():
start = time.time()
dbapi = db_api.get_instance()
print('Phase - Assess DB performance')
_add_a_line()
got_connection = time.time()
nodes = dbapi.get_node_list()
node_count = len(nodes)
query_complete = time.time()
delta = _calculate_delta(start, got_connection)
print('Obtained DB client in %s seconds.' % delta)
delta = _calculate_delta(got_connection, query_complete)
print('Returned %s nodes in python %s seconds from the DB.\n' %
(node_count, delta))
# return node count for future use.
return node_count
def _assess_db_and_object_performance():
print('Phase - Assess DB & Object conversion Performance')
_add_a_line()
start = time.time()
node_list = node.Node().list(context.get_admin_context())
got_list = time.time()
delta = _calculate_delta(start, got_list)
print('Obtained list of node objects in %s seconds.' % delta)
count = 0
tbl_size = 0
# In a sense, this helps provide a relative understanding if the
# database is the bottleneck, or the objects post conversion.
# converting completely to json and then measuring the size helps
# ensure that everything is "assessed" while not revealing too
# much detail.
for node_obj in node_list:
# Just looping through the entire set to count should be
# enough to ensure that the entry is loaded from the db
# and then converted to an object.
tbl_size = tbl_size + sys.getsizeof(node_obj.as_dict(secure=True))
count = count + 1
delta = _calculate_delta(got_list, time.time())
print('Took %s seconds to iterate through %s node objects.' %
(delta, count))
print('Nodes table is roughly %s bytes of JSON.\n' % tbl_size)
observed_vendors = []
for node_obj in node_list:
vendor = node_obj.driver_internal_info.get('vendor')
if vendor:
observed_vendors.append(vendor)
@mock.patch('ironic.api.request') # noqa patch needed for the object model
@mock.patch.object(metrics_utils, 'get_metrics_logger', lambda *_: mock.Mock)
@mock.patch.object(api_utils, 'check_list_policy', lambda *_: None)
@mock.patch.object(api_utils, 'check_allow_specify_fields', lambda *_: None)
@mock.patch.object(api_utils, 'check_allowed_fields', lambda *_: None)
@mock.patch.object(oslo_policy.policy, 'LOG', autospec=True)
def _assess_db_object_and_api_performance(mock_log, mock_request):
print('Phase - Assess DB & Object conversion Performance')
_add_a_line()
# Just mock it to silence it since getting the logger to update
# config seems like not a thing once started. :\
mock_log.debug = mock.Mock()
# Internal logic requires major/minor versions and a context to
# proceed. This is just to make the NodesController respond properly.
mock_request.context = context.get_admin_context()
mock_request.version.major = 1
mock_request.version.minor = 71
start = time.time()
node_api_controller = node_api.NodesController()
node_api_controller.context = context.get_admin_context()
fields = ("uuid,power_state,target_power_state,provision_state,"
"target_provision_state,last_error,maintenance,properties,"
"instance_uuid,traits,resource_class")
total_nodes = 0
res = node_api_controller._get_nodes_collection(
chassis_uuid=None,
instance_uuid=None,
associated=None,
maintenance=None,
retired=None,
provision_state=None,
marker=None,
limit=None,
sort_key="id",
sort_dir="asc",
fields=fields.split(','))
total_nodes = len(res['nodes'])
while len(res['nodes']) != 1:
print(" ** Getting nodes ** %s Elapsed: %s seconds." %
(total_nodes, _calculate_delta(start, time.time())))
res = node_api_controller._get_nodes_collection(
chassis_uuid=None,
instance_uuid=None,
associated=None,
maintenance=None,
retired=None,
provision_state=None,
marker=res['nodes'][-1]['uuid'],
limit=None,
sort_key="id",
sort_dir="asc",
fields=fields.split(','))
new_nodes = len(res['nodes'])
if new_nodes == 0:
break
total_nodes = total_nodes + new_nodes
delta = _calculate_delta(start, time.time())
print('Took %s seconds to return all %s nodes via '
'nodes API call pattern.\n' % (delta, total_nodes))
def _report_conductors():
print('Phase - identifying conductors/drivers')
_add_a_line()
conductors = conductor.Conductor().list(
context.get_admin_context(),
)
drivers = []
groups = []
online_count = 0
online_by = timeutils.utcnow(with_timezone=True) - \
datetime.timedelta(seconds=90)
for conductor_obj in conductors:
if conductor_obj.conductor_group:
groups.append(conductor_obj.conductor_group)
if conductor_obj.updated_at > online_by:
online_count = online_count + 1
for driver in conductor_obj.drivers:
drivers.append(driver)
conductor_count = len(conductors)
print('Conductor count: %s' % conductor_count)
print('Online conductor count: %s' % online_count)
running_with_groups = len(groups)
print('Conductors with conductor_groups: %s' % running_with_groups)
group_count = len(set(groups))
print('Conductor group count: %s' % group_count)
driver_list = list(set(drivers))
print('Presently supported drivers: %s' % driver_list)
def main():
service.prepare_service()
CONF.set_override('debug', False)
_assess_db_performance()
_assess_db_and_object_performance()
_assess_db_object_and_api_performance()
_report_conductors()
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
11324993 | #!/usr/bin/env python3
import json
import os
if not os.path.exists('json-out'):
os.makedirs('json-out')
for file in os.listdir("."):
if file[0:1] == "x":
with open(file) as f:
contents = f.read()
contents = contents[0:(len(contents) - 2)]
try:
contents_obj = json.loads('[' + contents + ']')
except ValueError as e:
print("Error in file " + file)
raise e
print("load " + str(len(contents_obj)) + " objects")
with open("json-out/" + file + '.json', 'w') as f2:
json.dump(contents_obj, f2, indent=2)
print("Done")
| StarcoderdataPython |
6511543 | # Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import deque
d = deque()
N = int(input()) #number of element
if N > 0 and N <= 100:
for _ in range(N):
command = input().split()
if command[0] == "append":
d.append(int(command[1]))
elif command[0] == "pop":
d.pop()
elif command[0] == "popleft":
d.popleft()
elif command[0] == "appendleft":
d.appendleft(int(command[1]))
print(*d)
| StarcoderdataPython |
8006064 | from haystack.fields import NgramField
try:
from .elasticsearch import SuggestField
except ImportError:
class SuggestField(NgramField):
pass
class SearchQuerySetWrapper(object):
"""
Decorates a SearchQuerySet object using a generator for efficient iteration
"""
def __init__(self, sqs, model):
self.sqs = sqs
self.model = model
def count(self):
return self.sqs.count()
def __iter__(self):
for result in self.sqs:
yield result.object
def __getitem__(self, key):
if isinstance(key, int) and (key >= 0 or key < self.count()):
# return the object at the specified position
return self.sqs[key].object
# Pass the slice/range on to the delegate
return SearchQuerySetWrapper(self.sqs[key], self.model)
| StarcoderdataPython |
8087101 | <reponame>shridarpatil/Flask-RestApi
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exceptions to be raised by every other sub-package"""
__all__ = ['RestApixBaseException', 'ResourceNotFound']
class RestApixBaseException(Exception):
"""Base exception for crux utils."""
pass
class ResourceNotFound(RestApixBaseException):
"""Resource not found exception"""
pass
class DuplicateEntry(RestApixBaseException):
"""Mysql DuplicateEntry Error"""
pass
class InvalidUsage(Exception):
"""Invalid Usage."""
status_code = 400
def __init__(self, message, status_code=None, payload=None):
"""Initialise."""
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
"""To dict."""
response = dict(self.payload or ())
response['message'] = self.message
response['success'] = False
response['type'] = 'Error'
return response
| StarcoderdataPython |
9745189 | <gh_stars>1-10
from datetime import date, datetime
from decimal import Decimal
import itertools
import time
def encode(value):
encoder = encodings.get(type(value))
if encoder is None:
raise ValueError('Can\'t encode type: %s' % type(value))
return encoder(value)
def decode(value):
decoder = decodings.get(value[:1])
if decoder is None:
raise ValueError('Can\'t decode value of unknown type: %s' % value)
return decoder(value[2:])
def encode_dict(value):
return dict(itertools.starmap(
lambda k, v: (k, encode(v)),
value.iteritems()
))
def decode_dict(value):
return dict(itertools.starmap(
lambda k, v: (k, decode(v)),
value.iteritems()
))
encodings = {
type(None): lambda value: 'None',
int: lambda value: 'i_' + str(value),
long: lambda value: 'i_' + str(value),
float: lambda value: 'f_' + str(value),
bool: lambda value: 'b_' + str(int(value)),
Decimal: lambda value: 'd_' + str(value),
str: lambda value: 'u_' + _escape(value.encode('base-64')),
unicode: lambda value: 'u_' + _escape(value.encode('utf-8').encode('base-64')),
datetime: lambda value: 't_%i.%06i'%(time.mktime(value.timetuple()), value.microsecond),
date: lambda value: 'a_%i'%(time.mktime(value.timetuple())),
tuple: lambda value: 'l_' + '_'.join(map(lambda a: _escape(encode(a)), value)),
list: lambda value: 'l_' + '_'.join(map(lambda a: _escape(encode(a)), value)),
dict: lambda value: 'h_' + '_'.join(map(lambda a: '%s:%s' % (_escape(encode(a[0])), _escape(encode(a[1]))), value.items())),
}
decodings = {
'N': lambda value: None,
'i': int,
'f': float,
'b': lambda value: bool(int(value)),
'd': Decimal,
'u': lambda value: _unescape(value).decode('base-64').decode('utf-8'),
't': lambda value: datetime.fromtimestamp(float(value)),
'a': lambda value: date.fromtimestamp(float(value)),
'l': lambda value: map(decode, map(_unescape, value.split('_'))),
'h': lambda value: dict(map(lambda a: map(decode, map(_unescape, a.split(':'))), value.split('_'))),
}
def _escape(value):
return value.replace('|', '||').replace('\n', '|n').replace('_', '|u')
def _unescape(value):
return value.replace('||', '|').replace('|n', '\n').replace('|u', '_')
| StarcoderdataPython |
1685315 | <gh_stars>10-100
'''
@package: pyAudioLex
@author: <NAME>
@module: polarity
Take in a text sample and output the average, standard deviation, and variance
polarity. (+) indicates happy and (-) indicates sad, 0 is neutral.
'''
import nltk
from nltk import word_tokenize
from textblob import TextBlob
import numpy as np
def polarity(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
#sentiment polarity of the session
polarity=TextBlob(importtext).sentiment[0]
#sentiment subjectivity of the session
sentiment=TextBlob(importtext).sentiment[1]
#average difference polarity every 3 words
polaritylist=list()
for i in range(0,len(tokens),3):
if i <= len(tokens)-3:
words=text[i]+' '+text[i+1]+' '+text[i+2]
polaritylist.append(TextBlob(words).sentiment[0])
else:
pass
avgpolarity=np.mean(polaritylist)
#std polarity every 3 words
stdpolarity=np.std(polaritylist)
#variance polarity every 3 words
varpolarity=np.var(polaritylist)
return [float(avgpolarity), float(stdpolarity), float(varpolarity)]
| StarcoderdataPython |
3589455 | # Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import glob
import requests
import json
from deepdiff import DeepDiff
from .utils import config
def test_check_objects_integrity():
with requests.Session() as session:
session.auth = ('admin1', config.USER_PASS)
for filename in glob.glob(os.path.join(config.ASSETS_DIR, '*.json')):
with open(filename) as f:
endpoint = os.path.basename(filename).rsplit('.')[0]
response = session.get(config.get_api_url(endpoint, page_size='all'))
json_objs = json.load(f)
resp_objs = response.json()
assert DeepDiff(json_objs, resp_objs, ignore_order=True,
exclude_regex_paths="root\['results'\]\[\d+\]\['last_login'\]") == {}
| StarcoderdataPython |
5113575 | <reponame>manoadamro/jason<filename>tests/unit/props/types/test_bool.py
import pytest
from jason import props
def test_validates():
assert props.Bool().load(True)
def test_true_from_string():
assert props.Bool().load("true") is True
def test_false_from_string():
assert props.Bool().load("false") is False
def test_allow_strings_is_false():
with pytest.raises(props.PropertyValidationError):
assert props.Bool(allow_strings=False).load("false") is False
def test_nullable():
props.Bool(nullable=True).load(None)
def test_not_nullable():
with pytest.raises(props.PropertyValidationError):
props.Bool().load(None)
def test_wrong_type():
with pytest.raises(props.PropertyValidationError):
props.Bool().load("12345")
def test_default():
assert props.Bool(default=True).load(None) is True
| StarcoderdataPython |
12825614 | #!/usr/bin/env python
from __future__ import print_function
import os
import subprocess
import signal
import time
import glob
import unittest
ZX_SPEC_OUTPUT_FILE = "printout.txt"
ZX_SPEC_TEST_END_MARKER = '-- ZX SPEC TEST END --'
class TestPasses(unittest.TestCase):
@classmethod
def setUpClass(self):
clean()
self.output = run_zx_spec("bin/test-passes.tap")
self.num_tests = 64
def test_zx_spec_header_displayed(self):
self.assertRegexpMatches(self.output, 'ZX Spec v')
def test_marks_show_tests_passed(self):
self.assertRegexpMatches(
self.output.replace('\n', ''),
'\.' * self.num_tests)
def test_all_tests_pass(self):
self.assertRegexpMatches(
self.output,
'Pass: {0}, Fail: 0, Total: {0}'.format(
self.num_tests))
def test_framework_exited_correctly(self):
self.assertRegexpMatches(self.output, ZX_SPEC_TEST_END_MARKER)
@classmethod
def tearDownClass(self):
clean()
class TestFailures(unittest.TestCase):
@classmethod
def setUpClass(self):
clean()
self.num_tests = 48
self.output = run_zx_spec("bin/test-failures.tap")
def test_zx_spec_header_displayed(self):
self.assertRegexpMatches(self.output, 'ZX Spec v')
def test_shows_failed_tests(self):
self.assertRegexpMatches(self.output, 'assert_fail')
self.assertRegexpMatches(self.output, 'assert_a_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_b_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_c_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_d_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_e_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_h_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_l_equal\n fails for different value\n\nExpected: 250, Actual: 5')
self.assertRegexpMatches(self.output, 'assert_hl_equal\n fails for different value\n\nExpected: 502, Actual: 500')
self.assertRegexpMatches(self.output, 'assert_bc_equal\n fails for different value\n\nExpected: 502, Actual: 500')
self.assertRegexpMatches(self.output, 'assert_de_equal\n fails for different value\n\nExpected: 502, Actual: 500')
self.assertRegexpMatches(self.output, 'assert_ix_equal\n fails for different value')
self.assertRegexpMatches(self.output, 'assert_byte_equal\n fails for different value\n\nExpected: 255, Actual: 204')
self.assertRegexpMatches(self.output, 'assert_word_equal\n fails for different value\n\nExpected: 258, Actual: 65501')
self.assertRegexpMatches(self.output, 'assert_str_equal\n fails for different value\n\nExpected: "diff test string", Ac\ntual: "test string\?\?\?\?\?"')
self.assertRegexpMatches(self.output, 'assert_bytes_equal\n fails for different value\n\nExpected: 12,34,55,66,AA,BB,DE,F\n0, Actual: 12,34,56,78,9A,BC,DE,\nF0')
self.assertRegexpMatches(self.output, 'assert_bytes_not_equal\n fails for same value')
self.assertRegexpMatches(self.output, 'assert_z_set\n fails when zero flag reset')
self.assertRegexpMatches(self.output, 'assert_z_reset\n fails when zero flag set')
self.assertRegexpMatches(self.output, 'assert_carry_set\n fails when carry flag reset')
self.assertRegexpMatches(self.output, 'assert_carry_reset\n fails when carry flag set')
self.assertRegexpMatches(self.output, 'assert_s_set\n fails when signed flag reset')
self.assertRegexpMatches(self.output, 'assert_s_reset\n fails when signed flag set')
self.assertRegexpMatches(self.output, 'assert_p_v_set\n fails when overflow flag reset')
self.assertRegexpMatches(self.output, 'assert_p_v_reset\n fails when overflow flag set')
self.assertRegexpMatches(self.output, 'x\n fails for different value\n\nExpected: 503, Actual: 500')
self.assertRegexpMatches(self.output, 'x\n fails for different value\n\nExpected: 2, Actual: 1')
self.assertRegexpMatches(self.output, 'x\n fails for different value\n\nExpected: 3, Actual: 1')
self.assertRegexpMatches(self.output, 'assert_a_not_equal')
self.assertRegexpMatches(self.output, 'assert_b_not_equal')
self.assertRegexpMatches(self.output, 'assert_c_not_equal')
self.assertRegexpMatches(self.output, 'assert_d_not_equal')
self.assertRegexpMatches(self.output, 'assert_e_not_equal')
self.assertRegexpMatches(self.output, 'assert_h_not_equal')
self.assertRegexpMatches(self.output, 'assert_l_not_equal')
self.assertRegexpMatches(self.output, 'assert_hl_not_equal')
self.assertRegexpMatches(self.output, 'assert_bc_not_equal')
self.assertRegexpMatches(self.output, 'assert_de_not_equal')
self.assertRegexpMatches(self.output, 'assert_ix_not_equal')
self.assertRegexpMatches(self.output, 'assert_a_is_zero')
self.assertRegexpMatches(self.output, 'assert_a_is_not_zero')
self.assertRegexpMatches(self.output, 'assert_byte_not_equal')
self.assertRegexpMatches(self.output, 'assert_word_not_equal')
self.assertRegexpMatches(self.output, 'assert_str_not_equal')
def test_16_bit_numbers_displayed_correctly(self):
self.assertRegexpMatches(self.output, 'assert_word_equal\n fails for different value\n\nExpected: 258, Actual: 65501')
def test_all_tests_failed(self):
self.assertRegexpMatches(self.output, 'Pass: 0, Fail: {0}, Total: {0}'.format(
self.num_tests
))
def test_framework_exited_correctly(self):
self.assertRegexpMatches(self.output, ZX_SPEC_TEST_END_MARKER)
@classmethod
def tearDownClass(self):
clean()
class TestHexDisplay(unittest.TestCase):
@classmethod
def setUpClass(self):
clean()
self.output = run_zx_spec("bin/test-hex.tap")
def test_hex_values_are_displayed_correctly(self):
self.assertRegexpMatches(self.output, 'assert_word_equal\n fails for different value\n\nExpected: 0000, Actual: FFFF')
self.assertRegexpMatches(self.output, 'fails for different value\n\nExpected: ACDC, Actual: FFFF')
self.assertRegexpMatches(self.output, 'assert_byte_equal\n fails for different value\n\nExpected: 3B, Actual: 2A')
def test_framework_exited_correctly(self):
self.assertRegexpMatches(self.output, ZX_SPEC_TEST_END_MARKER)
@classmethod
def tearDownClass(self):
clean()
class TestVerbose(unittest.TestCase):
@classmethod
def setUpClass(self):
clean()
self.output = run_zx_spec("bin/test-verbose-mix.tap")
def test_descriptions_displayed_correctly(self):
self.assertRegexpMatches(self.output, 'assert_pass\n passes test')
self.assertRegexpMatches(self.output, 'assert_a_equal\n passes for same value')
def test_framework_exited_correctly(self):
self.assertRegexpMatches(self.output, ZX_SPEC_TEST_END_MARKER)
@classmethod
def tearDownClass(self):
clean()
def clean():
for f in glob.glob("printout.*"):
os.remove(f)
def printout_txt(filename):
with open(filename, 'r') as f:
return f.read()
def wait_for_printout(filename):
wait_count = 0
while not os.path.exists(filename):
time.sleep(0.2)
wait_count += 1
if wait_count == 120 * 5:
raise IOError('Output file not produced in time')
def wait_for_framework_completion(filename):
wait_count = 0
cursor = 0
while 1:
contents = printout_txt(filename)
if ZX_SPEC_TEST_END_MARKER in contents:
break
print(contents[cursor:], end='')
cursor = len(contents)
time.sleep(0.2)
wait_count += 1
if wait_count == 120 * 5:
raise Exception('Framework did not indicate clean exit in time')
def run_zx_spec(tape):
cmd_line = "{0} --no-sound --zxprinter --printer --tape {1} --auto-load --no-autosave-settings".format(
os.getenv("FUSE", "fuse"),
tape)
proc = subprocess.Popen(
cmd_line, shell=True, preexec_fn=os.setsid)
wait_for_printout(ZX_SPEC_OUTPUT_FILE)
wait_for_framework_completion(ZX_SPEC_OUTPUT_FILE)
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
return printout_txt(ZX_SPEC_OUTPUT_FILE)
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
4936422 | # see https://www.codewars.com/kata/55968ab32cf633c3f8000008/solutions/python
def initials(name):
names = name.split(" ")
ret = ''
for i, name in enumerate(names):
if i == len(names) - 1:
ret += name.capitalize()
break
ret += name[0].upper() + '.'
return ret
print(initials('code wars') == 'C.Wars')
print(initials('Barack hussein obama') == 'B.H.Obama')
print(initials('barack hussein Obama') == 'B.H.Obama') | StarcoderdataPython |
1692437 |
def get_lti_value(key, tool_provider):
""" Searches for the given key in the tool_provider and its custom and external params.
If not found returns None """
lti_value = None
if "custom" in key:
lti_value = tool_provider.custom_params[key]
if "ext" in key:
lti_value = tool_provider.ext_params[key]
if not lti_value:
try:
lti_value = getattr(tool_provider,key)
except AttributeError:
print "Attribute: %s not found in LTI tool_provider" % key
return lti_value
| StarcoderdataPython |
274672 | import pytest
from django.contrib.auth import get_user_model
from pontoon.base.models import Project, Resource
from pontoon.base.utils import (
aware_datetime,
extension_in,
get_m2m_changes,
get_object_or_none,
glob_to_regex,
latest_datetime)
def test_util_glob_to_regex():
assert glob_to_regex('*') == '^.*$'
assert glob_to_regex('/foo*') == '^\\/foo.*$'
assert glob_to_regex('*foo') == '^.*foo$'
assert glob_to_regex('*foo*') == '^.*foo.*$'
@pytest.mark.django_db
def test_util_glob_to_regex_db(resource0, resource1):
assert resource0 in Resource.objects.filter(path__regex=glob_to_regex('*'))
assert resource1 in Resource.objects.filter(path__regex=glob_to_regex('*'))
assert (
list(Resource.objects.filter(path__regex=glob_to_regex('*')))
== list(Resource.objects.all()))
assert (
resource0
in Resource.objects.filter(
path__regex=glob_to_regex('*0*')))
assert (
resource1
not in Resource.objects.filter(
path__regex=glob_to_regex('*0*')))
assert (
list(Resource.objects.filter(path__regex=glob_to_regex('*0*')))
== list(Resource.objects.filter(path__contains='0')))
@pytest.mark.django_db
def test_get_m2m_changes_no_change(user0):
assert get_m2m_changes(
get_user_model().objects.none(),
get_user_model().objects.none()
) == ([], [])
assert get_m2m_changes(
get_user_model().objects.filter(pk=user0.pk),
get_user_model().objects.filter(pk=user0.pk),
) == ([], [])
@pytest.mark.django_db
def test_get_m2m_added(user0, user1):
assert get_m2m_changes(
get_user_model().objects.none(),
get_user_model().objects.filter(pk=user1.pk)
) == ([user1], [])
assert get_m2m_changes(
get_user_model().objects.filter(pk=user0.pk),
get_user_model().objects.filter(pk__in=[user0.pk, user1.pk])
) == ([user1], [])
@pytest.mark.django_db
def test_get_m2m_removed(user0, user1):
assert get_m2m_changes(
get_user_model().objects.filter(pk=user1.pk),
get_user_model().objects.none(),
) == ([], [user1])
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user0.pk, user1.pk]),
get_user_model().objects.filter(pk=user0.pk),
) == ([], [user1])
@pytest.mark.django_db
def test_get_m2m_mixed(user0, user1, userX):
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user1.pk, userX.pk]),
get_user_model().objects.filter(pk__in=[user0.pk, user1.pk]),
) == ([user0], [userX])
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user0.pk, user1.pk]),
get_user_model().objects.filter(pk__in=[userX.pk]),
) == ([userX], [user0, user1])
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user1.pk]),
get_user_model().objects.filter(pk__in=[userX.pk, user0.pk]),
) == ([user0, userX], [user1])
def test_util_base_extension_in():
assert extension_in('filename.txt', ['bat', 'txt'])
assert extension_in('filename.biff', ['biff'])
assert extension_in('filename.tar.gz', ['gz'])
assert not extension_in('filename.txt', ['png', 'jpg'])
assert not extension_in('.dotfile', ['bat', 'txt'])
# Unintuitive, but that's how splitext works.
assert not extension_in('filename.tar.gz', ['tar.gz'])
@pytest.mark.django_db
def test_util_base_get_object_or_none(project0):
assert get_object_or_none(Project, slug='does-not-exist') is None
assert get_object_or_none(Project, slug='project0') == project0
def test_util_base_latest_datetime():
larger = aware_datetime(2015, 1, 1)
smaller = aware_datetime(2014, 1, 1)
assert latest_datetime([None, None, None]) is None
assert latest_datetime([None, larger]) == larger
assert latest_datetime([None, smaller, larger]) == larger
| StarcoderdataPython |
11385480 | from __future__ import print_function
from helper import Helper
import pandas as pd
from mldata.lib.category_converter import CategoryConverter
from mldata.lib.normalizer import Normalizer
class Processor(object):
def __init__(self, csv_file_path, target_column, exclude_column_list=None, category_list=None, positive_tag=1,
csv_header=0, invalid_values=None):
if exclude_column_list is None:
exclude_column_list = []
self._exclude_column_list = exclude_column_list
if category_list is None:
category_list = []
if invalid_values is None:
invalid_values = []
self._csv_file_path = csv_file_path
self._target_column = target_column
self._category_column_list = category_list
self._positive_tag = positive_tag
self._csv_header = csv_header
self._raw_data = pd.read_csv(self._csv_file_path, header=self._csv_header)
self._header = self._raw_data.columns.values.tolist()
self._invalid_values = invalid_values
print("headers: %s" % str(self._header))
# check parameters, if failed will throw illegal parameter exception
self._check_parameters()
def _check_parameters(self):
if not Helper.is_file_exist(self._csv_file_path):
raise ValueError("CSV file %s not exist!" % self._csv_file_path)
if self._target_column not in self._header:
raise ValueError("target column %s not exist in excel header" % self._target_column)
def _get_numerical_column_list(self):
return set(self._header) - {self._target_column} - set(self._exclude_column_list) \
- set(self._category_column_list)
def normalize(self):
for category_column in self._category_column_list:
print("starting normalize %s" % category_column)
self._raw_data[category_column] = self._normalize_category_column(category_column)
for numerical_column in self._get_numerical_column_list():
print("starting normalize %s" % numerical_column)
self._raw_data[numerical_column] = self._normalize_numerical_column(numerical_column)
def _normalize_numerical_column(self, numerical_column):
column_value_list = self._raw_data[numerical_column].values.tolist()
return Normalizer(column_value_list, self._invalid_values).normalize_numerical_value()
def _normalize_category_column(self, category_column):
column_value_list = self._raw_data[category_column].values.tolist()
target_list = self._raw_data[self._target_column].values.tolist()
numerical_list = CategoryConverter(column_value_list, target_list, self._positive_tag).convert()
return Normalizer(numerical_list, self._invalid_values).normalize_numerical_value()
def save_to_file(self, new_file_name):
self._raw_data.to_csv(new_file_name, index=False)
| StarcoderdataPython |
11230712 | import random
import requests
from celery import shared_task
from celery.utils.log import get_task_logger
from celery.signals import task_postrun
from django.core.management import call_command
from polls.consumers import notify_channel_layer
logger = get_task_logger(__name__)
@shared_task()
def sample_task(email):
from polls.views import api_call
api_call(email)
@shared_task(
bind=True,
autoretry_for=(Exception,),
retry_backoff=5,
retry_jitter=True,
retry_kwargs={'max_retries': 5},
)
def task_process_notification(self):
if not random.choice([0, 1]):
raise Exception()
requests.post('https://httpbin.org/delay/5')
@task_postrun.connect
def task_postrun_handler(task_id, **kwargs):
"""
When celery task finish, send notification to Django channel_layer,
so Django channel would receive the event and then send it to web client.
"""
notify_channel_layer(task_id)
@shared_task(name='task_clear_session')
def task_clear_session():
call_command('clearsessions')
@shared_task(name='default:dynamic_example_one')
def dynamic_example_one():
logger.info('Example One')
@shared_task(name='low_priority:dynamic_example_two')
def dynamic_example_two():
logger.info('Example Two')
@shared_task(name='high_priority:dynamic_example_three')
def dynamic_example_three():
logger.info('Example Three')
| StarcoderdataPython |
6572309 | <reponame>DuJiajun1994/BinaryNetwork
import tensorflow as tf
from tensorflow.python.framework import ops
def binary_identity(inputs, name=None):
graph = tf.get_default_graph()
with ops.name_scope(name, 'BinaryIdentity', [inputs]):
with graph.gradient_override_map({"Sign": "Identity"}):
outputs = tf.sign(inputs)
return outputs
| StarcoderdataPython |
1640429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
from gensim.summarization.bm25 import get_bm25_weights
from gensim.test.utils import common_texts
class TestBM25(unittest.TestCase):
def test_max_match_with_itself(self):
""" Document should show maximum matching with itself """
weights = get_bm25_weights(common_texts)
for index, doc_weights in enumerate(weights):
expected = max(doc_weights)
predicted = doc_weights[index]
self.assertAlmostEqual(expected, predicted)
def test_with_generator(self):
""" Check above function with input as generator """
text_gen = (i for i in common_texts)
weights = get_bm25_weights(text_gen)
for index, doc_weights in enumerate(weights):
expected = max(doc_weights)
predicted = doc_weights[index]
self.assertAlmostEqual(expected, predicted)
def test_nonnegative_weights(self):
""" All the weights for a partiular document should be non negative """
weights = get_bm25_weights(common_texts)
for doc_weights in weights:
for weight in doc_weights:
self.assertTrue(weight >= 0.)
def test_same_match_with_same_document(self):
""" A document should always get the same weight when matched with a particular document """
corpus = [['cat', 'dog', 'mouse'], ['cat', 'lion'], ['cat', 'lion']]
weights = get_bm25_weights(corpus)
self.assertAlmostEqual(weights[0][1], weights[0][2])
def test_disjoint_docs_if_weight_zero(self):
""" Two disjoint documents should have zero matching"""
corpus = [['cat', 'dog', 'lion'], ['goat', 'fish', 'tiger']]
weights = get_bm25_weights(corpus)
self.assertAlmostEqual(weights[0][1], 0)
self.assertAlmostEqual(weights[1][0], 0)
def test_multiprocessing(self):
""" Result should be the same using different processes """
weights1 = get_bm25_weights(common_texts)
weights2 = get_bm25_weights(common_texts, n_jobs=2)
weights3 = get_bm25_weights(common_texts, n_jobs=-1)
self.assertAlmostEqual(weights1, weights2)
self.assertAlmostEqual(weights1, weights3)
self.assertAlmostEqual(weights2, weights3)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| StarcoderdataPython |
6510944 | from fpdf import FPDF #PDF_In_Columns inherites from this class
"""
This class is used to create PDF files formated into two columns
In the context of Exam PDF Generator, it's only used in for the Exam sheet
"""
class PDF_In_Columns(FPDF):
def __init__(self):
super().__init__()
self.col = 0 #Variable that tracks the current column that needs to be filled (0 indexed)
self.y0= None #y coordinate within the document where the column should start
"""
Sets the document variables to take into account the current column
Returns: nothing
"""
def set_col(self, col):
self.col = col #We change the variable to refer to the current column being filled
x = 10+(col*100) #The multiplier indicates how many dots will be allowed in the column width. 10 is the margin between columns
self.set_left_margin(x) #We need to redefine the left margin that will be used as reference for the given column
self.set_x(x) #We update the x coordinate of the cursor to point to where the current column begins so it can start writting there
"""
Adds a footer to each page
Returns: nothing
"""
def footer(self):
self.set_left_margin(10) #We reset the margin so it doesn't depend on the current column margin
where_foot_is=-10 #This indicatates that the foot should be 10 mm up the bottom of the page
self.set_y(where_foot_is)
self.set_font('Arial','I',8)
self.cell(0,10,'Pág. '+ str(self.page) ,0,0,'C')
"""
Defines the logic to be followed when checking if a page break is needed. In this case, we only need a page break
when the there is two columns and the content that will be inserted won't feet in the space that is left on the second column
Returns: bool, indicating if a page break will be performed or not
"""
def accept_page_break(self): #note: Cell and multicell call this method every time they are called
#If there is no big image that takes up space in the two columnsm we use this logic first
if self.img_trg==False: #Si no hay una imagen grande que tome dos columnas, se usan estos valores
if self.page==1:
self.y0=50 #note: this should be changed as the header size in the exam sheet changes, based on what get_y() returns after the header
else:
self.y0=10 #note: this value depends on the margin b or t
#If there is space for another column, just add another column, don't accept a page break
if(self.col<1):
self.set_col((self.col+1))
self.set_y(self.y0)
return False
#If there is no space for another column, accept the page break and reset the column tracker
else:
self.set_col(0)
return True
"""
Inserts an image in the current column
This method is necessary because when inserting an image with .image() (as it is inherited from FPDF)
the FPDF accept_page_break method would be called instead of the one that has been overriden in this class definition.
It also helps to differentiate when you need no insert a big image vs just a helping figure for a question
@param name (str): path to the file containing the image
@param x (int): x coordinate within the document where the image will be inserted at
@param y (int): y coordinate within the document where the image will be inserted at
@param w (int): width of the image to be inserted
@param h (int): height of the image to be inserted
@param type (str): format of the image. If not specified, it is obtained from the image file extension
@param link (int): if the image will be use as a hyperlink, link is the address where the user will be redirected to
Returns: nothing
"""
def image_mod(self, name, x=None, y=None, w=0,h=0,type='',link=''):
super().image(name, x=None, y=None, w=w, h=h, type='',link='') | StarcoderdataPython |
5122348 | #!/usr/bin/env python2
import sys
import time
from Nonin import *
finished = False
while not finished:
try:
# device = Nonin3150.get_device()
device = sys.argv[1]
nonin = Nonin3150(device)
current_time = nonin.get_current_time()
finished = True
except Exception:
time.sleep(1)
print 'Time Information'
print current_time.strftime(' Time on Nonin, GMT: %Y-%m-%d %H:%M:%S UDT')
print current_time.astimezone(tz.tzlocal()).strftime(' Time on Nonin, translated to local timezone: %Y-%m-%d %H:%M:%S%z')
| StarcoderdataPython |
8062672 | <gh_stars>10-100
from rlscore.learner import KronRLS
from rlscore.measure import cindex
import davis_data
def main():
X1_train, X2_train, Y_train, X1_test, X2_test, Y_test = davis_data.settingC_split()
learner = KronRLS(X1 = X1_train, X2 = X2_train, Y = Y_train)
log_regparams = range(15, 35)
for log_regparam in log_regparams:
learner.solve(2.**log_regparam)
P = learner.predict(X1_test, X2_test)
perf = cindex(Y_test, P)
print("regparam 2**%d, cindex %f" %(log_regparam, perf))
if __name__=="__main__":
main()
| StarcoderdataPython |
3344988 | from nipype.interfaces.base import (TraitedSpec, File, traits, CommandLine, InputMultiPath,
CommandLineInputSpec)
from nipype.utils.filemanip import fname_presuffix
import os
class ImageReconInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='--source=%s',
mandatory=True,
position=-4,
desc='input .src file')
method = traits.Int(
exists=True,
argstr='--method=%d',
mandatory=True,
position=-3,
desc='Method index')
param0 = traits.Float(
exists=True,
argstr='--param0=%f',
mandatory=True,
position=-2,
desc='Parameters')
class ImageReconOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='output data in .fib format')
class ImageRecon(CommandLine):
_cmd = 'dsi_studio --action=rec'
input_spec = ImageReconInputSpec
output_spec = ImageReconOutputSpec
def _list_outputs(self):
in_prefix = self.inputs.in_file
in_param = str(self.inputs.param0)
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(
fname_presuffix(
"", prefix=in_prefix, suffix='.odf8.f5.rdi.gqi.'+in_param+'.fib.gz'))
return outputs
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.