id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3480816 | #!/usr/bin/env python
'''
Description: this function translates, speaks, and records a user-specified number of secrets from a file
Author: <NAME>
'''
import os, sys
import json
import collections
import random
import utils
from pprint import pprint
from playsound import playsound
import datetime
from six import string_types
from boto3 import Session
from botocore.exceptions import BotoCoreError, ClientError
from contextlib import closing
from pollySpeak import newSession, pollySpeech, speakSecrets
from concatMp3 import concatMp3
USAGE_TEXT = """
Usage: secretReader.py <number of secrets to read> <voice>
"""
# Defaults (integrate this into main, and with keyword args)
from params import *
def usage():
print(USAGE_TEXT)
sys.exit(-1)
def main(argv):
if len(argv) < 1:
usage()
# should check and make sure this is an int
num_secrets = int(argv[0])
print(language)
voice = argv[1]
if voice == 'random':
randVoice = True
else:
randVoice = False
if voice not in voiceIds:
voice = voiceIds[0]
# Load in secrets
secrets = []
with open(datapath) as data_file:
secrets = json.load(data_file)
# only do this is num_secrets is valid and > 0
if len(secrets) == 0:
print('Error: no secrets loaded. Please check the file referenced in params.py.')
sys.exit(-1)
if num_secrets == 0:
print('Error: no secrets requested.')
usage()
sys.exit(-1)
if num_secrets > len(secrets):
print('Warning: number of secrets requested > number of secrets available ({} requested, {} available)'.format(num_secrets,len(secrets)))
# create a new folder with this timestamp to save the secret files in
mp3path = utils.createTimestampedDir(mp3path_base)
speakSecrets(secrets[0:num_secrets], voiceIds, mp3path,
shuffleSecrets=shuffleSecrets,
randVoice=randVoice,
translate_lang=translate_lang,
ssml=ssml,
whisperFreq=whisperFreq,
language=language,
target_lang=target_lang,
concatSecretMp3s=concatSecretMp3s,
mp3_padding=mp3_padding)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
8148683 | <filename>Project/Test.py
import unittest
import numpy as np
from unittest.mock import patch
import Transformation as my
atolerance, rtolerance = 0.01, 0.01
class TestPolygon(unittest.TestCase):
# P1 is the square and P2 a pentagon
inp = np.array([[1.0, 1.0, 1.0], [1.0, 5.0, 1.0],
[5.0, 5.0, 1.0], [5.0, 1.0, 1.0]])
P1 = my.Polygon(inp)
inp2 = np.array([[2.0, 1.0, 1.0], [4.0, 1.0, 1.0], [
5.0, 2.0, 1.0], [3.0, 3.0, 1.0], [1.0, 2.0, 1.0]])
P2 = my.Polygon(inp2)
inp3 = np.array([[0.0, 0.0, 1.0], [4.0, 0.0, 1.0],
[4.0, 4.0, 1.0], [0.0, 4.0, 1.0]])
P3 = my.Polygon(inp3)
def test_1(self):
print('test_1 starts')
user_output = self.P1.rotate(90)
exp_output = (np.array([1.0, 5.0, 5.0, 1.0]),
np.array([-1.0, -1.0, -5.0, -5.0]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_1 ends')
def test_2(self):
print('test_2 starts')
user_output = self.P1.translate(2, 2)
exp_output = (np.array([3.0, 7.0, 7.0, 3.0]),
np.array([1.0, 1.0, -3.0, -3.0]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_2 ends')
def test_3(self):
print('test_3 starts')
user_output = self.P1.scale(3, 2)
exp_output = (np.array([-1.0, 11.0, 11.0, -1.0]),
np.array([3.0, 3.0, -5.0, -5.0]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_3 ends')
def test_4(self):
print('test_4 starts')
user_output = self.P2.scale(-2, -2)
exp_output = (np.array([5.0, 1.0, -1.0, 3.0, 7.0]),
np.array([3.4, 3.4, 1.4, -0.6, 1.4]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_4 ends')
def test_5(self):
print('test_5 starts')
user_output = self.P2.rotate(-45)
exp_output = (np.array([1.13137085, -1.69705627, -1.69705627, 2.54558441, 3.95979797]),
np.array([5.93969696, 3.11126984, 0.28284271, 1.69705627, 5.93969696]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_5 ends')
def test_6(self):
print('test_6 starts')
user_output = self.P2.scale(0.5, 0.3)
exp_output = (np.array([0.98994949, -0.42426407, -0.42426407, 1.69705627, 2.40416306]),
np.array([4.15778787, 3.30925974, 2.4607316, 2.88499567, 4.15778787]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_6 ends')
def test_7(self):
print('test_7 starts')
user_output = self.P3.rotate(45, 2, 2)
exp_output = (np.array([-0.82842712, 2.0, 4.82842712, 2.0]),
np.array([2.0, -0.82842712, 2.0, 4.82842712]))
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_7 ends')
class TestCircle(unittest.TestCase):
C1 = my.Circle(2.0, 2.0, 3.0) # 2,2 is center and 3 is radius
C2 = my.Circle(2.0, 2.0, 3.0) # 2,2 is center and 3 is radius
def test_1(self):
print('test_1 starts')
user_output = self.C1.rotate(45)
exp_output = (2.8284271247461903, 0.0, 3.0)
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_1 ends')
def test_2(self):
print('test_2 starts')
user_output = self.C1.scale(0.5)
exp_output = (2.8284271247461903, 0.0, 1.5)
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_2 ends')
def test_3(self):
print('test_3 starts')
user_output = self.C1.translate(-3, 3)
exp_output = (-0.1715728752538097, 3.0, 1.5)
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_3 ends')
def test_4(self):
print('test_4 starts')
user_output = self.C2.rotate(45, 4, 4)
exp_output = (1.1715728752538097, 4.0, 3.0)
print(f'returned value {user_output}')
print(f'expected value {exp_output}')
np.testing.assert_allclose(exp_output, user_output, rtol=rtolerance, atol=atolerance)
print('test_4 ends')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9646962 | <gh_stars>10-100
from digideep.environment.common.vec_env import VecEnvWrapper
class VecRandomState(VecEnvWrapper):
def __init__(self, venv):
VecEnvWrapper.__init__(self, venv)
def step_wait(self):
return self.venv.step_wait()
def reset(self):
return self.venv.reset()
def state_dict(self):
# print(">>>> state_dict of VecRandomState is called. <<<<")
states = self.venv.unwrapped.get_rng_state()
return states
def load_state_dict(self, state_dict):
# print(">>>> load_state_dict of VecRandomState is called. <<<<")
self.venv.unwrapped.set_rng_state(state_dict)
| StarcoderdataPython |
11207850 | <filename>python/ray/tests/test_projects.py
import jsonschema
import os
import pytest
import subprocess
import yaml
from click.testing import CliRunner
import sys
from unittest.mock import patch, DEFAULT
from contextlib import contextmanager
from ray.projects.scripts import (session_start, session_commands,
session_execute)
import ray
TEST_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_files")
def load_project_description(project_file):
path = os.path.join(TEST_DIR, project_file)
with open(path) as f:
return yaml.safe_load(f)
def test_validation():
project_dirs = ["docker_project", "requirements_project", "shell_project"]
for project_dir in project_dirs:
project_dir = os.path.join(TEST_DIR, project_dir)
ray.projects.ProjectDefinition(project_dir)
bad_schema_dirs = ["no_project1"]
for project_dir in bad_schema_dirs:
project_dir = os.path.join(TEST_DIR, project_dir)
with pytest.raises(jsonschema.exceptions.ValidationError):
ray.projects.ProjectDefinition(project_dir)
bad_project_dirs = ["no_project2", "noproject3"]
for project_dir in bad_project_dirs:
project_dir = os.path.join(TEST_DIR, project_dir)
with pytest.raises(ValueError):
ray.projects.ProjectDefinition(project_dir)
def test_project_root():
path = os.path.join(TEST_DIR, "project1")
project_definition = ray.projects.ProjectDefinition(path)
assert os.path.normpath(project_definition.root) == os.path.normpath(path)
path2 = os.path.join(TEST_DIR, "project1", "subdir")
project_definition = ray.projects.ProjectDefinition(path2)
assert os.path.normpath(project_definition.root) == os.path.normpath(path)
path3 = ray.utils.get_user_temp_dir() + os.sep
with pytest.raises(ValueError):
project_definition = ray.projects.ProjectDefinition(path3)
def test_project_validation():
path = os.path.join(TEST_DIR, "project1")
subprocess.check_call(["ray", "project", "validate"], cwd=path)
def test_project_no_validation():
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(["ray", "project", "validate"], cwd=TEST_DIR)
@contextmanager
def _chdir_and_back(d):
old_dir = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(old_dir)
def run_test_project(project_dir, command, args):
# Run the CLI commands with patching
test_dir = os.path.join(TEST_DIR, project_dir)
with _chdir_and_back(test_dir):
runner = CliRunner()
with patch.multiple(
"ray.projects.scripts",
create_or_update_cluster=DEFAULT,
rsync=DEFAULT,
exec_cluster=DEFAULT,
) as mock_calls:
result = runner.invoke(command, args)
return result, mock_calls, test_dir
def test_session_start_default_project():
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "project-pass"), session_start,
["default"])
loaded_project = ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
# Part 1/3: Cluster Launching Call
create_or_update_cluster_call = mock_calls["create_or_update_cluster"]
assert create_or_update_cluster_call.call_count == 1
_, kwargs = create_or_update_cluster_call.call_args
assert kwargs["config_file"] == loaded_project.cluster_yaml()
# Part 2/3: Rsync Calls
rsync_call = mock_calls["rsync"]
# 1 for rsyncing the project directory, 1 for rsyncing the
# requirements.txt.
assert rsync_call.call_count == 2
_, kwargs = rsync_call.call_args
assert kwargs["source"] == loaded_project.config["environment"][
"requirements"]
# Part 3/3: Exec Calls
exec_cluster_call = mock_calls["exec_cluster"]
commands_executed = []
for _, kwargs in exec_cluster_call.call_args_list:
commands_executed.append(kwargs["cmd"].replace(
"cd {}; ".format(loaded_project.working_directory()), ""))
expected_commands = loaded_project.config["environment"]["shell"]
expected_commands += [
command["command"] for command in loaded_project.config["commands"]
]
if "requirements" in loaded_project.config["environment"]:
assert any("pip install -r" for cmd in commands_executed)
# pop the `pip install` off commands executed
commands_executed = [
cmd for cmd in commands_executed if "pip install -r" not in cmd
]
assert expected_commands == commands_executed
def test_session_execute_default_project():
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "project-pass"), session_execute,
["default"])
loaded_project = ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
assert mock_calls["rsync"].call_count == 0
assert mock_calls["create_or_update_cluster"].call_count == 0
exec_cluster_call = mock_calls["exec_cluster"]
commands_executed = []
for _, kwargs in exec_cluster_call.call_args_list:
commands_executed.append(kwargs["cmd"].replace(
"cd {}; ".format(loaded_project.working_directory()), ""))
expected_commands = [
command["command"] for command in loaded_project.config["commands"]
]
assert expected_commands == commands_executed
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "project-pass"), session_execute,
["--shell", "uptime"])
assert result.exit_code == 0
def test_session_start_docker_fail():
result, _, _ = run_test_project(
os.path.join("session-tests", "with-docker-fail"), session_start, [])
assert result.exit_code == 1
assert ("Docker support in session is currently "
"not implemented") in result.output
def test_session_invalid_config_errored():
result, _, _ = run_test_project(
os.path.join("session-tests", "invalid-config-fail"), session_start,
[])
assert result.exit_code == 1
assert "validation failed" in result.output
# check that we are displaying actional error message
assert "ray project validate" in result.output
def test_session_create_command():
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "commands-test"), session_start,
["first", "--a", "1", "--b", "2"])
# Verify the project can be loaded.
ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
exec_cluster_call = mock_calls["exec_cluster"]
found_command = False
for _, kwargs in exec_cluster_call.call_args_list:
if "Starting ray job with 1 and 2" in kwargs["cmd"]:
found_command = True
assert found_command
def test_session_create_multiple():
for args in [{"a": "*", "b": "2"}, {"a": "1", "b": "*"}]:
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "commands-test"), session_start,
["first", "--a", args["a"], "--b", args["b"]])
loaded_project = ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
exec_cluster_call = mock_calls["exec_cluster"]
commands_executed = []
for _, kwargs in exec_cluster_call.call_args_list:
commands_executed.append(kwargs["cmd"].replace(
"cd {}; ".format(loaded_project.working_directory()), ""))
assert commands_executed.count("echo \"Setting up\"") == 2
if args["a"] == "*":
assert commands_executed.count(
"echo \"Starting ray job with 1 and 2\"") == 1
assert commands_executed.count(
"echo \"Starting ray job with 2 and 2\"") == 1
if args["b"] == "*":
assert commands_executed.count(
"echo \"Starting ray job with 1 and 1\"") == 1
assert commands_executed.count(
"echo \"Starting ray job with 1 and 2\"") == 1
# Using multiple wildcards shouldn't work
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "commands-test"), session_start,
["first", "--a", "*", "--b", "*"])
assert result.exit_code == 1
def test_session_commands():
result, mock_calls, test_dir = run_test_project(
os.path.join("session-tests", "commands-test"), session_commands, [])
assert "This is the first parameter" in result.output
assert "This is the second parameter" in result.output
assert 'Command "first"' in result.output
assert 'Command "second"' in result.output
if __name__ == "__main__":
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
3349224 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import MagicMock, patch
from boto.compat import json # type: ignore
from airflow.contrib.operators.dynamodb_to_s3 import DynamoDBToS3Operator
class DynamodbToS3Test(unittest.TestCase):
def setUp(self):
self.output_queue = []
def mock_upload_file(self, Filename, Bucket, Key): # pylint: disable=unused-argument,invalid-name
with open(Filename) as f:
lines = f.readlines()
for line in lines:
self.output_queue.append(json.loads(line))
@patch('airflow.contrib.operators.dynamodb_to_s3.S3Hook')
@patch('airflow.contrib.operators.dynamodb_to_s3.AwsDynamoDBHook')
def test_dynamodb_to_s3_success(self, mock_aws_dynamodb_hook, mock_s3_hook):
responses = [
{
'Items': [{'a': 1}, {'b': 2}],
'LastEvaluatedKey': '123',
},
{
'Items': [{'c': 3}],
},
]
table = MagicMock()
table.return_value.scan.side_effect = responses
mock_aws_dynamodb_hook.return_value.get_conn.return_value.Table = table
s3_client = MagicMock()
s3_client.return_value.upload_file = self.mock_upload_file
mock_s3_hook.return_value.get_conn = s3_client
dynamodb_to_s3_operator = DynamoDBToS3Operator(
task_id='dynamodb_to_s3',
dynamodb_table_name='airflow_rocks',
s3_bucket_name='airflow-bucket',
file_size=4000,
)
dynamodb_to_s3_operator.execute(context={})
self.assertEqual([{'a': 1}, {'b': 2}, {'c': 3}], self.output_queue)
| StarcoderdataPython |
8178994 | <filename>src/example/graph_test.py
import os,sys
sys.path.extend("../")
from lib_piglet.domains import graph
from lib_piglet.expanders.graph_expander import graph_expander
from lib_piglet.search.tree_search import tree_search
from lib_piglet.search.graph_search import graph_search
from lib_piglet.utils.data_structure import bin_heap,stack,queue
from lib_piglet.search.search_node import compare_node_g, compare_node_f
from lib_piglet.cli.cli_tool import print_header, statistic_template
from lib_piglet.heuristics import graph_h
from lib_piglet.search.iterative_deepening import iterative_deepening, ID_threshold
file_folder = os.path.dirname(os.path.abspath(__file__))
inputfile = os.path.join(file_folder, "graphmap/sample.graph")
gm = graph.graph(inputfile)
expander = graph_expander(gm)
print_header()
search = tree_search(stack(), expander,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5))
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
search = tree_search(queue(), expander,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5))
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
search = graph_search(queue(), expander,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5))
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
search = graph_search(stack(), expander,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5))
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
search = graph_search(bin_heap(compare_node_g), expander,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5))
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
search = graph_search(bin_heap(compare_node_f), expander,heuristic_function=graph_h.straight_heuristic,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5))
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
# search = iterative_deepening(stack(), expander,heuristic_function=graph_h.straight_heuristic,time_limit=10)
# path = search.get_path(gm.get_vertex(1),gm.get_vertex(5),threshold_type=ID_threshold.cost)
# print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
search = iterative_deepening(stack(), expander,heuristic_function=None,time_limit=10)
path = search.get_path(gm.get_vertex(1),gm.get_vertex(5),threshold_type=ID_threshold.depth)
print(statistic_template.format("","",*[str(x) for x in search.get_statistic()], str(search.solution_)))
| StarcoderdataPython |
3278412 | import LibraryTT.txt2array as conversion
a=conversion.txt2array()
conversion.imprimir3D(a) | StarcoderdataPython |
1991576 | <reponame>texttheater/tbsp<gh_stars>1-10
import clf
import pathlib
import quantities
import sys
import unittest
class QuantityTestCase(unittest.TestCase):
def setUp(self):
gold_path = (pathlib.Path(__file__).parent.parent / 'data' /
'pmb-2.2.0' / 'gold' / 'train.txt')
silver_path = (pathlib.Path(__file__).parent.parent / 'data' /
'pmb-2.2.0' / 'silver' / 'train.txt')
drss = []
for path in (gold_path,):
with open(path) as f:
drss.extend(clf.read(f))
self.cases = []
for drs in drss:
for word, fragment in zip(*drs):
for clause in fragment:
if (clause[1] in ('Quantity', 'EQU')
and not clf.is_constant(clause[3])
and not clf.is_ref(clause[3])):
self.cases.append((word.lower(), clause[3]))
def test_quantity(self):
for word, gold in self.cases:
print(word, gold)
pred = quantities.quote(quantities.guess_quantity(word))
if gold == '"?"':
self.assertIn(pred, ('"+"', '"?"'))
elif (word, gold) == ('dozen', '"6"'):
self.assertEqual(pred, '"12"')
else:
self.assertEqual(pred, gold)
| StarcoderdataPython |
161840 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import numpy as np
import os
root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
import sys
sys.path.append(root_dir)
from adversarial_robustness.cnns import *
from adversarial_robustness.datasets.svhn import SVHN
from adversarial_robustness.datasets.notmnist import notMNIST
from adversarial_robustness.datasets.mnist import MNIST
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--savedir", type=str,
help="Place to save model")
parser.add_argument(
"--name", type=str, default="",
help="Model name")
parser.add_argument(
"--dataset", type=str, default="",
help="Dataset")
parser.add_argument(
"--l2cs", type=float, default=0.0,
help="L2 certainty sensitivity penalty")
parser.add_argument(
"--l2dbl", type=float, default=0.0,
help="L2 double backprop penalty")
parser.add_argument(
"--lr", type=float, default=0.0002,
help="learning rate")
parser.add_argument(
"--adameps", type=float, default=1e-04,
help="adam epsilon")
parser.add_argument(
"--advtraineps", type=float, default=0.0,
help="adversarial training epsilon")
parser.add_argument(
"--distilltemp", type=float, default=1.0,
help="temperature for distillation")
parser.add_argument(
"--batchsize", type=int, default=256,
help="batch size")
parser.add_argument(
"--nbatches", type=int, default=15000,
help="number of batches")
FLAGS = parser.parse_args()
name = FLAGS.name
model_dir = FLAGS.savedir
adv_X_dir = root_dir + '/cached/fgsm'
if FLAGS.dataset == 'mnist':
dataset = MNIST()
CNN = MNIST_CNN
fgsm_file = adv_X_dir + '/mnist-normal-fgsm-perturbation.npy'
elif FLAGS.dataset == 'notmnist':
dataset = notMNIST()
CNN = MNIST_CNN
fgsm_file = adv_X_dir + '/notmnist-normal-fgsm-perturbation.npy'
elif FLAGS.dataset == 'svhn':
dataset = SVHN()
CNN = SVHN_CNN
fgsm_file = adv_X_dir + '/svhn-normal-fgsm-perturbation.npy'
X = dataset.X
y = dataset.onehot_y
Xt = dataset.Xt[:1024]
yt = dataset.onehot_yt[:1024]
clip_min = dataset.X.min()
clip_max = dataset.X.max()
dX = np.sign(np.load(fgsm_file))[:1024]
def _fgsm(eps):
return np.clip(Xt[:len(dX)] + eps * dX, clip_min, clip_max)
fgsm = { 0.1: _fgsm(0.1), 0.2: _fgsm(0.2), 0.3: _fgsm(0.3) }
epses = [0.1, 0.2, 0.3]
scores = {}
train_curves = {}
train_curves['batch_number'] = []
train_curves['batch_accuracy'] = []
train_curves['cross_entropy'] = []
train_curves['l2_grad_logp_true'] = []
train_curves['l2_grad_logp_rest'] = []
train_curves['l2_grad_logp_all'] = []
train_curves['l2_param_grads'] = []
train_curves['adv_accuracy'] = []
train_curves['test_accuracy'] = []
batch_size = FLAGS.batchsize
num_batches = FLAGS.nbatches
num_epochs = int(np.ceil(num_batches / (len(X) / batch_size)))
print(num_epochs)
if FLAGS.distilltemp > 1.01:
print('distillation')
num_batches2 = min(FLAGS.nbatches, 10000)
num_epochs2 = int(np.ceil(num_batches2 / (len(X) / batch_size)))
cnn2 = CNN()
cnn2.fit(X, y, softmax_temperature=FLAGS.distilltemp, learning_rate=FLAGS.lr, epsilon=FLAGS.adameps, num_epochs=num_epochs2, batch_size=batch_size)
yhat = tf.nn.softmax(cnn2.logits/FLAGS.distilltemp)
with tf.Session() as sess:
cnn2.init(sess)
ysmooth = yhat.eval(feed_dict={ cnn2.X: X[:1000] })
for i in range(1000, len(X), 1000):
ysmooth = np.vstack((ysmooth, yhat.eval(feed_dict={ cnn2.X: X[i:i+1000] })))
y = ysmooth
tf.reset_default_graph()
cnn = CNN()
cnn.l2_grad_logp_all = tf.nn.l2_loss(tf.gradients(cnn.logps, cnn.X)[0])
cnn.l2_grad_logp_true = tf.nn.l2_loss(tf.gradients(cnn.logps * cnn.y, cnn.X)[0])
cnn.l2_grad_logp_rest = tf.nn.l2_loss(tf.gradients(cnn.logps * (1-cnn.y), cnn.X)[0])
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.lr,
epsilon=FLAGS.adameps)
loss_fn = cnn.loss_function(
softmax_temperature=FLAGS.distilltemp,
l2_certainty_sensitivity=FLAGS.l2cs,
l2_double_backprop=FLAGS.l2dbl)
if FLAGS.advtraineps > 1e-06:
print('adversarial training')
adv_loss = cnn.adversarial_training_loss(FLAGS.advtraineps, clip_min, clip_max)
loss_fn = (loss_fn + adv_loss) / 2.0
gradients, variables = zip(*optimizer.compute_gradients(loss_fn))
cnn.l2_param_grads = tf.add_n([tf.nn.l2_loss(g) for g in gradients])
cnn.train_op = optimizer.apply_gradients(zip(gradients, variables))
batches = cnn.minibatches({ 'X': X, 'y': y }, batch_size=batch_size, num_epochs=num_epochs)
t = time.time()
i = 0
checkpoint_interval = 2500
print_interval = 500
curve_interval = 100
filenames = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch in batches:
batch[cnn.is_train] = True
_, loss = sess.run([cnn.train_op, loss_fn], feed_dict=batch)
if i % checkpoint_interval == 0:
cnn.vals = [v.eval() for v in cnn.vars]
filename = model_dir+'/{}-batch{}-cnn.pkl'.format(name, i)
cnn.save(filename)
filenames.append(filename)
with open(model_dir+'/{}-batch{}-train-curves.pkl'.format(name,i), 'wb') as f:
pickle.dump(train_curves, f)
if i % print_interval == 0:
print('Batch {}, loss {}, {}s'.format(i, loss, time.time() - t))
if i % curve_interval == 0:
values = sess.run([
cnn.accuracy,
cnn.l2_grad_logp_true,
cnn.l2_grad_logp_rest,
cnn.l2_grad_logp_all,
cnn.l2_param_grads,
cnn.cross_entropy,
], feed_dict=batch)
train_curves['batch_number'].append(i)
train_curves['batch_accuracy'].append(values[0])
train_curves['l2_grad_logp_true'].append(values[1])
train_curves['l2_grad_logp_rest'].append(values[2])
train_curves['l2_grad_logp_all'].append(values[3])
train_curves['l2_param_grads'].append(values[4])
train_curves['cross_entropy'].append(values[5])
train_curves['adv_accuracy'].append(sess.run(cnn.accuracy, feed_dict={ cnn.X: fgsm[epses[1]][:512], cnn.y: yt[:512] }))
train_curves['test_accuracy'].append(sess.run(cnn.accuracy, feed_dict={ cnn.X: Xt[:512], cnn.y: yt[:512] }))
i += 1
cnn.vals = [v.eval() for v in cnn.vars]
filename = model_dir+'/{}-cnn.pkl'.format(name)
cnn.save(filename)
filenames.append(filename)
for filename in filenames:
cnn2 = CNN()
cnn2.load(filename)
cnn2.save(filename)
with open(model_dir+'/{}-train-curves.pkl'.format(name), 'wb') as f:
pickle.dump(train_curves, f)
for key, values in train_curves.items():
if key == 'batch_number':
continue
fig = plt.figure()
plt.plot(train_curves['batch_number'], values, marker='o', lw=2)
plt.title(key)
plt.xlabel('Minibatch')
plt.ylabel(key)
if 'grad' in key:
plt.yscale('log')
plt.savefig(model_dir+'/{}-traincurves-{}.png'.format(name,key))
plt.close(fig)
scores[(name, 'norm')] = cnn.score(Xt, yt).accuracy
for eps in epses:
scores[(name, eps)] = cnn.score(fgsm[eps], yt[:len(fgsm[eps])]).accuracy
print(scores)
with open(model_dir+'/{}-scores.pkl'.format(name), 'wb') as f:
pickle.dump(scores, f)
with open(model_dir+'/{}-flags.pkl'.format(name), 'wb') as f:
pickle.dump(vars(FLAGS), f)
| StarcoderdataPython |
3518579 | from evaluation import Metric, Accuracy, Precision, Recall, F1, FalseAlarm, \
AOD, EOD, SPD, DI, FR, D2H, MAR, SA, SD, SDAR, EFFECTSIZE, MMRE, MdMRE, PRED25, PRED40
from map import DatabaseNoClass
from evaluation.formulas import mar, mdar, sa, effect_size
metric_db = DatabaseNoClass(
{
# Generic
"d2h" : {
"class" : D2H,
},
# Effort estimation
"mar" : { "class" : MAR },
"sa" : { "class" : SA },
"sd" : { "class" : SD },
"sdar" : { "class" : SDAR },
"effect size" : { "class" : EFFECTSIZE },
"mmre" : { "class" : MMRE },
"mdmre" : { "class" : MdMRE },
"pred25" : { "class" : PRED25 },
"pred40" : { "class" : PRED40 },
# Classification
"accuracy" : { "class" : Accuracy },
"precision" : { "class" : Precision },
"recall" : { "class" : Recall },
"f1" : { "class" : F1 },
"falsealarm" : { "class" : FalseAlarm },
# Fairness
"aod" : { "class" : AOD },
"eod" : { "class" : EOD },
"spd" : { "class" : SPD },
"di" : { "class" : DI },
"fr" : { "class" : FR },
}
)
| StarcoderdataPython |
9768701 | import re
from random import randint, uniform
from collections import defaultdict
r_alphabet = re.compile(u'[а-яА-Я0-9-]+|[.,:;?!]+')
def gen_lines(path):
data = open(path, encoding='utf-8')
for line in data:
yield line.lower()
def gen_tokens(lines):
for line in lines:
for token in r_alphabet.findall(line):
yield token
def gen_trigrams(tokens):
t0, t1 = '$', '$'
for t2 in tokens:
yield t0, t1, t2
if t2 in '.!?':
yield t1, t2, '$'
yield t2, '$','$'
t0, t1 = '$', '$'
else:
t0, t1 = t1, t2
def train(path):
lines = gen_lines(path)
tokens = gen_tokens(lines)
trigrams = gen_trigrams(tokens)
bi, tri = defaultdict(lambda: 0.0), defaultdict(lambda: 0.0)
for t0, t1, t2 in trigrams:
bi[t0, t1] += 1
tri[t0, t1, t2] += 1
model = {}
for (t0, t1, t2), freq in tri.items():
if (t0, t1) in model:
model[t0, t1].append((t2, freq/bi[t0, t1]))
else:
model[t0, t1] = [(t2, freq/bi[t0, t1])]
return model
def generate_sentence(model):
phrase = ''
t0, t1 = '$', '$'
while 1:
t0, t1 = t1, unirand(model[t0, t1])
if t1 == '$': break
if t1 in ('.!?,;:') or t0 == '$':
phrase += t1
else:
phrase += ' ' + t1
return phrase.capitalize()
def unirand(seq):
sum_, freq_ = 0, 0
for item, freq in seq:
sum_ += freq
rnd = uniform(0, sum_)
for token, freq in seq:
freq_ += freq
if rnd < freq_:
return token
if __name__ == '__main__':
path = 'data\data.txt'
| StarcoderdataPython |
8112579 | <filename>Intermediate/filter_function.py
# filter function nr4
def add7(x):
return x + 7
def isOdd(x):
return x % 2 != 0
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# b = list(filter(isOdd, a))
c = list(map(add7, filter(isOdd, a)))
print(c)
| StarcoderdataPython |
8098795 | <gh_stars>1-10
import matplotlib.pyplot as plt
# Read the stats as string from all three input files
f = open("stats_serv_time_optimal.csv","r") # optimal scheduler
f1 = open("stats2.csv","r") # deadline scheduler
f2 = open("stats_serv_time_cfq.csv","r") #CFQ scheduler
s = f.read()
s1 = f1.read()
s2 = f2.read()
# list of individual service times
# optimal
l = []
t = s.split(";;") # to chuck off the averages
l = t[0][0:-1].split(",")
# for deadline
l1 = []
l1 = s1[0:-1].split(",")
# for cfq
l2 = []
t2 = s2.split(";;")
l2 = t2[0][0:-1].split(",")
l = l[500:] # ignore the first 500 requests
l1 = l1[500:] # ignore the first 500 requests
l2 = l2[500:] # ignore the first 500 requests
print "total records from optimal is "+str(len(l))
print str(t[1])
print "total records from deadline is "+str(len(l1))
print "total records from cfq is "+str(len(l2))
print str(t2[1])
no_of_reqs = len(l)
# plot the results for Optimal Scheduler
intlist = []
sortlist = []
for item in l:
intlist.append(int(item))
sortlist.append(int(item))
sortlist.sort()
max = max(sortlist)
result = [0]*max
for timeres in intlist:
for i in range(timeres,max):
result[i] += 1
x = []
for num in range(len(result)):
x.append(num)
test = []
for r in result:
test.append(r-300)
plt.plot(x,result,'r',label = 'Optimal scheduler')
# plot the results for the deadline scheduler
intlist1 = []
for item in l1:
intlist1.append(int(item))
result1 = [0]*max
for timeres in intlist1:
for i in range(timeres,max):
result1[i] += 1
plt.plot(x,result1,'g',label = 'deadline scheduler')
# plot the results for the cfq scheduler
intlist2 = []
for item in l2:
intlist2.append(int(item))
result2 = [0]*max
for timeres in intlist2:
for i in range(timeres,max):
result2[i] += 1
plt.plot(x,result2,'b',label = 'CFQ scheduler')
plt.axis([0,max,0,no_of_reqs+500])
plt.title("Service Distribution of Disk scheduling alorithms")
plt.xlabel("Service time(t) in milliseconds")
plt.ylabel("no.of requests served under time (t)")
plt.legend(loc='lower right')
plt.savefig("diskschedplot.png",bbox_inches=0)
plt.show()
| StarcoderdataPython |
6579318 | <filename>kornia/geometry/calibration/__init__.py<gh_stars>1000+
from kornia.geometry.calibration.distort import *
from kornia.geometry.calibration.pnp import *
from kornia.geometry.calibration.undistort import *
| StarcoderdataPython |
5191019 | <reponame>mattkirby/zfs-py
#!/usr/bin/env python
"""
Synchronize file systems with zfs send and receive
Gather information about zfs file systems locally and remotely in
order to facilitate synchronizing data across multiple systems.
"""
import os
import subprocess
import sys
import atexit
class ZfsSend:
"""
Interact with zfs send and receive
"""
def __init__(self):
self.hosts = []
def volume_exists(self, volume, host=None, sudo=True):
"""
Find if remote volume exists
"""
try:
fnull = open(os.devnull, 'w')
command = ['zfs', 'list', volume]
if host:
command = ['ssh', host, 'sudo'] + command
elif sudo:
command = ['sudo'] + command
out = subprocess.check_call(
command, stdout=fnull, stderr=subprocess.STDOUT)
fnull.close()
return True
except Exception:
fnull.close()
return False
def get_snapshots(self, volume, host=None, sudo=True):
"""
Return all snapshots for a volume
"""
try:
command = ['zfs', 'list', '-r', '-t',
'snapshot', '-o', 'name', volume]
if host:
command = ['ssh', host, 'sudo'] + command
elif sudo:
command = ['sudo'] + command
out = subprocess.check_output(command)
snapshot = []
snapshots = out.split('\n')[1:-1]
for i in snapshots:
parts = i.split('@')
snapshot.append(parts[1])
return snapshot
except Exception:
raise
def has_vol_snapshots(self, volume, host=None):
"""
Check if a volume exists and has snapshots
"""
if self.volume_exists(volume, host):
snapshots = self.get_snapshots(volume, host)
return snapshots
else:
return False
def snapshot_diff(self, volume, host):
"""
Check if remote and local snapshots differ
"""
remote = self.has_vol_snapshots(volume, host)
if remote:
local = self.has_vol_snapshots(volume)
diff = list(set(local) - set(remote))
if diff:
return diff
else:
return False
else:
return False
def replication_type(self, volume, host):
"""
Determine the type of replication to perform
Detects whether target system has zfs volume
If detected, determines whether incremental sync is possible
"""
snaps, force = None, None
local_snapshots = self.get_snapshots(volume)
remote_snapshots = self.has_vol_snapshots(volume, host)
snaps = ['{}@{}'.format(volume, local_snapshots[-1])]
send_options = ['-R']
recv_options = ['-F', '-d']
if remote_snapshots:
if remote_snapshots[-1] == local_snapshots[-1]:
return False
# Check if common snapshot exists
elif remote_snapshots[-1] in local_snapshots:
print 'Found incremental snapshot point for {} at {}'.format(volume, remote_snapshots[-1])
send_options.append('-I')
snaps.insert(0, '@{}'.format(remote_snapshots[-1]))
elif remote_snapshots[-1] not in local_snapshots:
print 'No common incremental snapshot found. Removing snapshots and forcing re-sync.'
remove = subprocess.check_output(
['ssh', host, 'sudo', '/usr/local/bin/zfs_snapshot',
'-k', '0', '-V', volume])
else:
print "I don't know how I got here"
return False
else:
print '{} does not exist on the target. Starting replication.'.format(volume)
return send_options, recv_options, snaps
def replicate(self, volume, host, sudo=True, target_volume='backup-tank'):
"""
Replicate zfs volumes
"""
atexit.register(self.remove_lock, volume)
repl_type = self.replication_type(volume, host)
if repl_type:
send_options, recv_options, snaps = repl_type
send_command = ['zfs', 'send'] + send_options + snaps
recv_command = ['ssh', host, 'sudo', 'zfs', 'receive'] + recv_options + [target_volume]
if sudo:
send_command = ['sudo'] + send_command
self.lock_file(volume)
send = subprocess.Popen(send_command, stdout=subprocess.PIPE)
pv = subprocess.Popen(['pv'], stdin=send.stdout, stdout=subprocess.PIPE)
receive = subprocess.Popen(recv_command, stdin=pv.stdout, stdout=subprocess.PIPE)
send.stdout.close()
output = receive.communicate()
if output[0]:
return output[0]
elif 'cannot receive' in output[0]:
return 'Replication failed with with message: {}'.format(output[0])
else:
return 'Replication of {} completed successfully with snapshot from {}'.format(volume, snaps[-1].split('@')[-1])
else:
return 'Volume {} is up to date'.format(volume)
def take_snapshot(self, volume):
"""
Take a snapshot of a volume with timestamp
"""
@classmethod
def lock_file(cls, volume):
"""
Create a lock file for tracking state of replication
"""
try:
lockfile = '/{}/.replication_lock'.format(volume)
if os.path.isfile(lockfile):
print 'A lockfile exists'
sys.exit(3)
lock = open(lockfile, 'w')
lock.close()
except Exception:
print 'Cannot create a lockfile at {}'.format(volume)
sys.exit(4)
@classmethod
def remove_lock(cls, volume):
"""
Remove the lockfile
"""
try:
lockfile = '/{}/.replication_lock'.format(volume)
if os.path.isfile(lockfile):
os.remove(lockfile)
except Exception:
print 'Cannot remove lockfile {}'.format(lockfile)
| StarcoderdataPython |
4890403 | <reponame>akifoezkan/Halide-HLS<gh_stars>10-100
#!/usr/bin/python3
# to be called via nose, for example
# nosetests-3.4 -v path_to/tests/test_basics.py
from halide import *
def test_types():
t0 = Int(32)
t1 = Int(16)
assert t0 != t1
assert t0.is_float() == False
assert t1.is_float() == False
print("Int(32) type:", Int(32))
print("Int(16) type:", Int(16))
return
def test_basics():
input = ImageParam(UInt(16), 2, 'input')
x, y = Var('x'), Var('y')
blur_x = Func('blur_x')
blur_xx = Func('blur_xx')
blur_y = Func('blur_y')
yy = cast(Int(32), 1)
assert yy.type() == Int(32)
print("yy type:", yy.type())
z = x + 1
input[x,y]
input[0,0]
input[z,y]
input[x+1,y]
print("ping 0.2")
input[x,y]+input[x+1,y]
if False:
aa = blur_x[x,y]
bb = blur_x[x,y+1]
aa + bb
blur_x[x,y]+blur_x[x,y+1]
print("ping 0.3")
(input[x,y]+input[x+1,y]) / 2
print("ping 0.4")
blur_x[x,y]
print("ping 0.4.1")
blur_xx[x,y] = input[x,y]
print("ping 0.5")
blur_x[x,y] = (input[x,y]+input[x+1,y]+input[x+2,y])/3
print("ping 1")
blur_y[x,y] = (blur_x[x,y]+blur_x[x,y+1]+blur_x[x,y+2])/3
xi, yi = Var('xi'), Var('yi')
print("ping 2")
blur_y.tile(x, y, xi, yi, 8, 4).parallel(y).vectorize(xi, 8)
blur_x.compute_at(blur_y, x).vectorize(x, 8)
blur_y.compile_jit()
print("Compiled to jit")
return
def test_basics2():
input = ImageParam(Float(32), 3, 'input')
r_sigma = Param(Float(32), 'r_sigma', 0.1) # Value needed if not generating an executable
s_sigma = 8 # This is passed during code generation in the C++ version
x = Var('x')
y = Var('y')
z = Var('z')
c = Var('c')
# Add a boundary condition
clamped = Func('clamped')
clamped[x, y] = input[clamp(x, 0, input.width()-1),
clamp(y, 0, input.height()-1),0]
if True:
print("s_sigma", s_sigma)
print("s_sigma/2", s_sigma/2)
print("s_sigma//2", s_sigma//2)
print()
print("x * s_sigma", x * s_sigma)
print("x * 8", x * 8)
print("x * 8 + 4", x * 8 + 4)
print("x * 8 * 4", x * 8 * 4)
print()
print("x", x)
print("(x * s_sigma).type()", )
print("(x * 8).type()", (x * 8).type())
print("(x * 8 + 4).type()", (x * 8 + 4).type())
print("(x * 8 * 4).type()", (x * 8 * 4).type())
print("(x * 8 / 4).type()", (x * 8 / 4).type())
print("((x * 8) * 4).type()", ((x * 8) * 4).type())
print("(x * (8 * 4)).type()", (x * (8 * 4)).type())
assert (x * 8).type() == Int(32)
assert (x * 8 * 4).type() == Int(32) # yes this did fail at some point
assert ((x * 8) / 4).type() == Int(32)
assert (x * (8 / 4)).type() == Float(32) # under python3 division rules
assert (x * (8 // 4)).type() == Int(32)
#assert (x * 8 // 4).type() == Int(32) # not yet implemented
# Construct the bilateral grid
r = RDom(0, s_sigma, 0, s_sigma, 'r')
val0 = clamped[x * s_sigma, y * s_sigma]
val00 = clamped[x * s_sigma * cast(Int(32), 1), y * s_sigma * cast(Int(32), 1)]
#val1 = clamped[x * s_sigma - s_sigma/2, y * s_sigma - s_sigma/2] # should fail
val22 = clamped[x * s_sigma - cast(Int(32), s_sigma//2),
y * s_sigma - cast(Int(32), s_sigma//2)]
val2 = clamped[x * s_sigma - s_sigma//2, y * s_sigma - s_sigma//2]
val3 = clamped[x * s_sigma + r.x - s_sigma//2, y * s_sigma + r.y - s_sigma//2]
return
def test_basics3():
input = ImageParam(Float(32), 3, 'input')
r_sigma = Param(Float(32), 'r_sigma', 0.1) # Value needed if not generating an executable
s_sigma = 8 # This is passed during code generation in the C++ version
x = Var('x')
y = Var('y')
z = Var('z')
c = Var('c')
# Add a boundary condition
clamped = Func('clamped')
clamped[x, y] = input[clamp(x, 0, input.width()-1),
clamp(y, 0, input.height()-1),0]
# Construct the bilateral grid
r = RDom(0, s_sigma, 0, s_sigma, 'r')
val = clamped[x * s_sigma + r.x - s_sigma//2, y * s_sigma + r.y - s_sigma//2]
val = clamp(val, 0.0, 1.0)
#zi = cast(Int(32), val * (1.0/r_sigma) + 0.5)
zi = cast(Int(32), (val / r_sigma) + 0.5)
histogram = Func('histogram')
histogram[x, y, z, c] = 0.0
ss = select(c == 0, val, 1.0)
print("select(c == 0, val, 1.0)", ss)
left = histogram[x, y, zi, c]
print("histogram[x, y, zi, c]", histogram[x, y, zi, c])
print("histogram[x, y, zi, c]", left)
left += 5
print("histogram[x, y, zi, c] after += 5", left)
left += ss
return
def test_float_or_int():
x = Var('x')
i, f = Int(32), Float(32)
assert ((x//2) - 1 + 2*(x%2)).type() == i
assert ((x/2) - 1 + 2*(x%2)).type() == i
assert ((x/2)).type() == i
assert ((x/2.0)).type() == f
assert ((x//2)).type() == i
assert ((x//2) - 1).type() == i
assert ((x%2)).type() == i
assert (2*(x%2)).type() == i
assert ((x//2) - 1 + 2*(x%2)).type() == i
assert type(x) == Var
assert (x.expr()).type() == i
assert (Expr(2.0)).type() == f
assert (Expr(2)).type() == i
assert (x + 2).type() == i
assert (2 + x).type() == i
assert (Expr(2) + Expr(3)).type() == i
assert (Expr(2.0) + Expr(3)).type() == f
assert (Expr(2) + 3.0).type() == f
assert (Expr(2) + 3).type() == i
assert (x.expr() + 2).type() == i # yes this failed at some point
assert (2 + x.expr()).type() == i
assert (2 * (x + 2)).type() == i # yes this failed at some point
assert (x + 0).type() == i
assert (x % 2).type() == i
assert (2 * x).type() == i
assert (x * 2).type() == i
assert (x * 2).type() == i
assert ((x % 2)).type() == i
assert ((x % 2) * 2).type() == i
#assert (2 * (x % 2)).type() == i # yes this failed at some point
assert ((x + 2) * 2).type() == i
return
def test_operator_order():
x = Var('x')
f = Func('f')
x + 1
1 + x
print("x", x, ", x + 1", x + 1, ", 1 + x", 1 + x)
f[x] = x ** 2
f[x] + 1
Expr(1) + f[x]
1 + f[x]
return
def test_ndarray_to_image():
if "ndarray_to_image" not in globals():
print("Skipping test_ndarray_to_image")
return
import numpy
a0 = numpy.ones((200, 300), dtype=numpy.float32)
i0 = ndarray_to_image(a0, "float32_test_image")
print("i0", i0)
a1 = numpy.ones((640, 480), dtype=numpy.uint8)
i1 = ndarray_to_image(a1, "uint8_test_image")
print("i1", i1)
return
def test_image_to_ndarray():
if "image_to_ndarray" not in globals():
print("Skipping test_image_to_ndarray")
return
import numpy
i0 = Image(Float(32), 50, 50)
assert i0.type() == Float(32)
a0 = image_to_ndarray(i0)
print("a0.shape", a0.shape)
print("a0.dtype", a0.dtype)
assert a0.dtype == numpy.float32
i1 = Image(Int(16), 50, 50)
assert i1.type() == Int(16)
i1[24, 24] = 42
assert i1(24, 24) == 42
a1 = image_to_ndarray(i1)
print("a1.shape", a1.shape)
print("a1.dtype", a1.dtype)
assert a1.dtype == numpy.int16
assert a1[24, 24] == 42
return
def test_param_bug():
"see https://github.com/rodrigob/Halide/issues/1"
p1 = Param(UInt(8), "p1", 0)
p2 = Param(UInt(8), "p2")
p3 = Param(UInt(8), 42)
return
def test_imageparam_bug():
"see https://github.com/rodrigob/Halide/issues/2"
x = Var("x")
y = Var("y")
fx = Func("fx")
input = ImageParam(UInt(8), 1, "input")
fx[x, y] = input[y]
return
if __name__ == "__main__":
test_imageparam_bug()
test_param_bug()
test_float_or_int()
test_ndarray_to_image()
test_image_to_ndarray()
test_types()
test_operator_order()
test_basics()
test_basics2()
test_basics3()
| StarcoderdataPython |
1783326 | from telegram import Update, ReplyKeyboardRemove
from telegram.ext import ConversationHandler, CallbackContext
from main import db as db, logger, facts_to_str, bot, ADMIN
from keyboards import *
from steps import CHOOSING_ACTION, CHOOSING_KIND, TYPING_REPLY_FREE_SUGGEST, CHOOSING_GAME_GENRE, CHOOSING_GENRE, \
CHOOSING_PLATFORM, TYPING_VOTE_CHOICE, ADMIN_ACTION
def user_suggested(update: Update, context: CallbackContext):
user = update.message.from_user
user_data = context.user_data
text = update.message.text
user_data['Suggested'] = text
logger.info("Location of %s: %s", user.first_name, update.message.text)
if user_data['kind'] == "Games":
db.connect()
suggested = db.add_game(user_data['Suggested'], user_data['Game_Genre'], user_data['platform'])
bot.sendMessage(chat_id=ADMIN, text=suggested)
db.conn.close()
update.message.reply_text("thanks", reply_markup=markup_genres_games, )
return CHOOSING_GAME_GENRE
if user_data['kind'] == "Book":
db.connect()
suggested = db.add_item(user_data['Suggested'], user_data['Genre'], user_data['kind'], user_data['action'])
bot.sendMessage(chat_id=ADMIN, text=suggested)
db.conn.close()
update.message.reply_text("thanks", reply_markup=markup_add_book_genres, )
return CHOOSING_GENRE
else:
db.connect()
suggested = db.add_item(user_data['Suggested'], user_data['Genre'], user_data['kind'], user_data['action'])
bot.sendMessage(chat_id=ADMIN, text=suggested)
db.conn.close()
update.message.reply_text("thanks", reply_markup=markup_genres_add, )
return CHOOSING_GENRE
def done(update: Update, context: CallbackContext) -> int:
user_data = context.user_data
user = update.message.from_user
logger.info("Location of %s: %s", user.first_name, update.message.text)
logger.info("Location of %s: %s", user.first_name, facts_to_str(user_data))
update.message.reply_text(
"until next time!",
reply_markup=ReplyKeyboardRemove(),
)
user_data.clear()
return ConversationHandler.END
def choosing_genre(update: Update, context: CallbackContext) -> int:
user = update.message.from_user
user_data = context.user_data
text = update.message.text
user_data['Genre'] = text
logger.info("Location of %s: %s", user.first_name, update.message.text)
if user_data['Genre'] == "Return":
if user_data['kind'] == "Book":
update.message.reply_text("ok", reply_markup=markup_kind, )
return CHOOSING_KIND
update.message.reply_text("ok", reply_markup=markup_kind, )
return CHOOSING_KIND
elif user_data['Genre'] == "all":
db.connect()
items = db.get_all_items(user_data['kind'])
db.conn.close()
counter = 0
temp = "Type: " + user_data['kind'] + "\n\n\n"
for x in items:
temp += " <b>" + str((x[5])) + "</b>" + " --- " + x[0] + " " + "Genre: " + x[
1] + " Votes: " + str(x[4]) + "\n\n"
counter = counter + 1
if counter == 25:
counter = 0
update.message.reply_text(temp, reply_markup=ReplyKeyboardRemove(), parse_mode='HTML')
temp = ""
if temp == "\n" or temp == "":
temp = "Thanks"
update.message.reply_text(temp, reply_markup=markup_show, parse_mode='HTML')
return CHOOSING_ACTION
elif user_data['action'] == "suggest something":
update.message.reply_text("ok please type", reply_markup=ReplyKeyboardRemove())
return TYPING_REPLY_FREE_SUGGEST
elif user_data['action'] == "peoples suggestions":
db.connect()
counter = 0
items = db.get_items(user_data['Genre'], user_data['kind'])
db.conn.close()
temp = "Type: " + user_data['kind'] + " Genre: " + user_data['Genre'] + "\n\n\n "
for x in items:
temp += " <b>" + str((x[5])) + "</b>" + " --- " + " " + "Genre: " + x[
1] + " Votes: " + str(x[4]) + "\n\n"
counter = counter + 1
if counter == 25:
counter = 0
update.message.reply_text(temp, reply_markup=ReplyKeyboardRemove(), parse_mode='HTML')
temp = ""
if temp == "\n" or temp == "":
temp = "Thanks"
update.message.reply_text(temp, reply_markup=markup_show, parse_mode='HTML')
return CHOOSING_ACTION
def start(update: Update, _: CallbackContext) -> int:
if update.message.from_user.username == ADMIN:
update.message.reply_text(
"Hi!.Welcome ",
reply_markup=markup_admin
)
return ADMIN_ACTION
else:
update.message.reply_text(
"Hi!. Welcome ",
reply_markup=markup_kind_action
)
print(CHOOSING_ACTION)
return CHOOSING_ACTION
def regular_choice(update: Update, context: CallbackContext) -> int:
user_data = context.user_data
user = update.message.from_user
text = update.message.text
logger.info("Location of %s: %s", user.first_name, update.message.text)
user_data['action'] = text
if user_data['action'] == "Vote":
update.message.reply_text(f'ok please type the id !', reply_markup=ReplyKeyboardRemove())
return TYPING_VOTE_CHOICE
elif user_data['action'] == "Return":
update.message.reply_text(f'ok', reply_markup=markup_kind_action)
return CHOOSING_ACTION
elif user_data['action'] == "Games":
update.message.reply_text(f'ok choose the platform !', reply_markup=markup_kind)
return CHOOSING_PLATFORM
else:
update.message.reply_text(f'ok choose the kind !', reply_markup=markup_kind)
return CHOOSING_KIND
def choosing_kind(update: Update, context: CallbackContext) -> int:
user_data = context.user_data
text = update.message.text
user_data['kind'] = text
user = update.message.from_user
logger.info("Location of %s: %s", user.first_name, update.message.text)
try:
if user_data['kind'] == "Return":
update.message.reply_text("ok", reply_markup=markup_kind_action)
return CHOOSING_ACTION
if user_data['kind'] == "Games":
update.message.reply_text("ok please choose the platform", reply_markup=markup_platform)
return CHOOSING_PLATFORM
if user_data['action'] == "suggest something":
if user_data['kind'] == "Book":
update.message.reply_text("ok please choose genre", reply_markup=markup_add_book_genres, )
return CHOOSING_GENRE
else:
update.message.reply_text("ok please choose genre", reply_markup=markup_genres_add)
else:
if user_data['kind'] == "Book":
update.message.reply_text("ok please choose genre", reply_markup=markup_book_genres, )
return CHOOSING_GENRE
update.message.reply_text("ok please choose genre", reply_markup=markup_genres)
return CHOOSING_GENRE
except:
update.message.reply_text("Error! Please try again later", reply_markup=ReplyKeyboardRemove(), )
return ConversationHandler.END
def done(update: Update, context: CallbackContext) -> int:
user_data = context.user_data
user = update.message.from_user
logger.info("Location of %s: %s", user.first_name, update.message.text)
logger.info("Location of %s: %s", user.first_name, facts_to_str(user_data))
update.message.reply_text(
"until next time!",
reply_markup=ReplyKeyboardRemove(),
)
user_data.clear()
return ConversationHandler.END
| StarcoderdataPython |
9736882 | from pytest import raises
from metadata_driver_interface.driver_interface import DriverInterface
from metadata_driver_interface.utils import parse_config
def test_driver_expects_plugin():
from metadata_driver_interface.data_plugin import AbstractPlugin
with raises(TypeError):
AbstractPlugin()
def test_driver_expcects_subclassed_plugin():
from metadata_driver_interface.data_plugin import AbstractPlugin
class NonSubclassPlugin:
pass
plugin = NonSubclassPlugin()
with raises(TypeError):
AbstractPlugin(plugin)
def test_parse_config():
config = parse_config('./tests/config.ini')
assert config['azure.location'] == 'westus'
def test_driver_instances():
osm = DriverInterface('http://www.example.com')
assert osm.data_plugin.type() == 'On premise'
| StarcoderdataPython |
1794310 | """Convert a yaml file to bib."""
__version__ = "0.1.4.dev0"
from yaml2bib._yaml2bib import yaml2bib
__all__ = ["yaml2bib"]
| StarcoderdataPython |
1847411 | # Model for Sign Language Recognition
# Importing all the libraries
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(16, kernel_size=3, activation='relu', input_shape=(28, 28, 3)))
# Step 2 - Max Pooling
classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding extra convolution layers
classifier.add(Conv2D(16, kernel_size=3, activation='relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
#classifier.add(Conv2D(256, kernel_size=2, activation='relu'))
#classifier.add(MaxPooling2D(pool_size = (2,2)))
# Step 3 - Flatten
classifier.add(Flatten())
# Step 4 - Fully Connected Layer
classifier.add(Dense(128, activation='relu'))
classifier.add(Dropout(0.3))
classifier.add(Dense(36, activation='softmax'))
# Compile the Model
classifier.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Keras Image Preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
#test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'ISL Gestures Dataset',
target_size=(28, 28),
batch_size=16,
class_mode='binary')
#validation_generator = test_datagen.flow_from_directory(
# 'data/validation',
# target_size=(150, 150),
# batch_size=32,
# class_mode='binary')
classifier.fit_generator(
train_generator,
steps_per_epoch=7920,
epochs=2)
import cv2
import numpy as np
import requests
cap = cv2.VideoCapture(0)
#fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
#fgmask = fgbg.apply(frame)
roi = frame[100:300, 100:300]
cv2.rectangle(frame, (100,100), (300,300), (0,255,0), 0)
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
lower_skin = np.array([0,20,70], dtype=np.uint8)
upper_skin = np.array([20,255,255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
maskpredict = np.resize(mask,[1,200,200,3])
classes = classifier.predict_classes(maskpredict)
if(classes>9):
cv2.putText(frame,chr(classes-10+ord('a')), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame,classes, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Original', frame)
cv2.imshow('Mask', mask)
k = cv2.waitKey(30) & 0xff
if(k==27):
break
cap.release()
cv2.destroyAllWindows()
#url = 'http://192.168.0.102:8080/shot.jpg'
#
#while True:
# img_resp = requests.get(url)
# img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)
# frame = cv2.imdecode(img_arr, -1)
#
# roi = frame[100:300, 100:300]
# cv2.rectangle(frame, (100,100), (300,300), (0,255,0), 0)
# hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# lower_skin = np.array([0,20,70], dtype=np.uint8)
# upper_skin = np.array([20,255,255], dtype=np.uint8)
#
# mask = cv2.inRange(hsv, lower_skin, upper_skin)
# maskpredict = np.resize(mask,[1,200,200,3])
# cv2.putText(frame, "Testing", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
# #fgmask = fgbg.apply(img)
# cv2.imshow("Android", frame)
# cv2.imshow("fg", mask)
# if(cv2.waitKey(1)==27):
# break
#cv2.destroyAllWindows() | StarcoderdataPython |
4814150 | <reponame>datadavev/noaa
from noaa_sdk.noaa import NOAA
from noaa_sdk.util import UTIL
from noaa_sdk.accept import ACCEPT
| StarcoderdataPython |
6635097 | <reponame>NehaKeshan/nanomine-graph
import rdflib
from . import ingest_tester, template
file_under_test = "L256_S3_Potschke_2004"
class IngestTestRunner(template.IngestTestSetup):
first_run = bool()
@classmethod
def setUpClass(cls):
cls.file_under_test = file_under_test
super().setUpClass()
def test_triples(self):
ingest_tester.print_triples(self)
def test_melt_viscosity(self):
ingest_tester.test_melt_viscosity(self, [rdflib.Literal(1793550.45609)])
| StarcoderdataPython |
9620154 | <reponame>svfat/ls-geofiller
#!/usr/bin/env python
# coding: utf-8
import MySQLdb
import requests
from time import sleep
import json
from difflib import SequenceMatcher
import pickle
import os
HOST = "localhost"
USER = "********"
PASSWORD= "*********"
DB = "***********"
USER_TABLE = "prefix_user"
GEOTARGET_TABLE = "prefix_geo_target"
GEOCOUNTRY_TABLE = "prefix_geo_country"
GEOREGION_TABLE = "prefix_geo_region"
GEOCITY_TABLE = "prefix_geo_city"
def search_db(targettable, name_en, targetid=None):
sql = u'''SELECT * FROM %s \
WHERE''' % (targettable)
if targettable==GEOREGION_TABLE:
field = 'country_id'
if targettable==GEOCITY_TABLE:
field = 'region_id'
if targetid:
sql += u''' %s=%s AND name_en="%s"''' % (field, targetid, name_en)
else:
sql += u''' name_en="%s"''' % name_en
cursor.execute(sql)
return cursor.fetchone()
def searchsynonym(targettable, targetid, name_en):
if os.path.isfile("ls-geo.dat"):
f = open("ls-geo.dat", "rb")
synonyms = pickle.load(f)
f.close()
else:
synonyms = {u"""Moskovskaya Oblast'""": u"""Moscow & Moscow Region""", u"""Moskva""":u"""Moscow & Moscow Region"""}
f = open("ls-geo.dat", "wb")
pickle.dump(synonyms, f)
f.close()
if name_en in synonyms.keys():
sname_en = synonyms[name_en]
return search_db(targettable, targetid=targetid, name_en=sname_en)
else:
print "! Cannot find target for %s in DB." % name_en
if targettable==GEOREGION_TABLE:
field = 'country_id'
if targettable==GEOCITY_TABLE:
field = 'region_id'
sql = u"""SELECT name_en FROM %s \
WHERE %s=%s""" \
% (targettable, field, targetid )
cursor.execute(sql)
row = cursor.fetchone()
ratiolist = []
while row is not None:
s = SequenceMatcher(None, name_en, row[0])
ratiolist.append({'name':row[0],'ratio':s.ratio()})
row = cursor.fetchone()
sratiolist = sorted(ratiolist, key=lambda k: k['ratio'])
for w in sratiolist:
print "%d - %s" % (sratiolist.index(w), w['name'])
try:
x = int(raw_input('Choose synonym for %s (Any symbol for None) ' % name_en))
except ValueError:
return None
sname_en = sratiolist[x]['name']
synonyms[name_en] = sname_en
f = open("ls-geo.dat", "wb")
pickle.dump(synonyms, f)
f.close()
return search_db(targettable, targetid=targetid, name_en=sname_en)
db = MySQLdb.connect(HOST,USER,PASSWORD,DB, charset="utf8")
cursor = db.cursor()
cursor.execute("""SELECT user_id, user_ip_register FROM %s""" % (USER_TABLE))
users = cursor.fetchall()
print "There are %d users in DB" % len(users)
cursor.execute("""SELECT target_id FROM %s WHERE target_type='user'""" % (GEOTARGET_TABLE))
data = cursor.fetchall()
users_in_geo = [item for sublist in data for item in sublist]
users_not_in_geo = []
for user in users:
if not(user[0] in users_in_geo):
users_not_in_geo.append({"user_id":user[0],"ip":user[1]})
print "%d of them haven\'t location info" % len(users_not_in_geo)
for user in users_not_in_geo:
if user["ip"]:
print "Trying to find location for %s" % user["ip"]
#sleep(1)
text = "http://api.sypexgeo.net/json/%s" % user["ip"]
r = requests.get(text)
if r.status_code == 200:
j = json.loads(r.text)
try:
country_en = j['country']['name_en']
except TypeError:
print "! Exception trying fetch data for %s" % user["ip"]
continue
if not country_en:
continue
country = search_db(GEOCOUNTRY_TABLE, name_en=country_en)
if country:
country_id = country[0]
country_name = country[2]
print "-- %s" % country_name
else:
print "! Cannot find country %s in DB" % country_en
continue
try:
region_en = j['region']['name_en']
except TypeError:
print "! Exception while searching for data in %s" % user["ip"]
continue
region = search_db(GEOREGION_TABLE, targetid=country_id, name_en=region_en)
if not region:
region = searchsynonym(targettable=GEOREGION_TABLE,
targetid=country_id,
name_en =region_en)
if not region:
continue
region_id = region[0]
print "-- %s" % region[3]
try:
city_en = j['city']['name_en']
except TypeError:
print "! Exception while searching for city in %s geodata" % user["ip"]
city = search_db(GEOCITY_TABLE, targetid=region_id, name_en=city_en)
if not city:
city = searchsynonym(targettable=GEOCITY_TABLE, targetid=region_id, name_en=city_en)
if not city:
continue
city_id = city[0]
print "-- %s" % city[4]
### exporting data in table
geo_type = 'city'
geo_id = city_id
target_type = 'user'
target_id = user['user_id']
#country_id
#region_id
#city_id
sql = """INSERT INTO %s \
VALUES ("%s",%s,"%s",%s,%s,%s,%s)""" \
% (GEOTARGET_TABLE,
geo_type,
geo_id,
target_type,
target_id,
country_id,
region_id,
city_id)
cursor.execute(sql)
db.commit()
else:
print "Cannot fetch info. Error code %d" % r.status_code
db.close()
| StarcoderdataPython |
12802813 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A basic test of compiling assembler files.
"""
import sys
import TestGyp
if sys.platform != 'win32':
# TODO(bradnelson): get this working for windows.
test = TestGyp.TestGyp(formats=['!msvs'])
test.run_gyp('assembly.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('assembly.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Got 42.
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
| StarcoderdataPython |
6429970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sq
import fp
import os
if(os.path.isfile('/home/zhao/sw/v.txt')):
os.remove('/home/zhao/sw/v.txt')
if(os.path.isfile('/home/zhao/sw/adj.txt')):
os.remove('/home/zhao/sw/adj.txt')
if(os.path.isfile('/home/zhao/sw/adv.txt')):
os.remove('/home/zhao/sw/adv.txt')
cn = sq.dbstart('sentiment')
cr = sq.cursor(cn)
#rl = sq.get_sentiment_word_attr(cn,cr,'sent_word','v')
rl = sq.get_sent_word(cn,cr,'sent_word')
lr = ''
for line in rl:
wordfile = '/home/zhao/sw/'+ line[1].encode('utf8') + '.txt'
comment = line[3].encode('utf8')
if(comment == ''):
comment = 'None'
lr = line[0].encode('utf8') +' '+ str(line[2]) +' '+ comment +'\n'
fp.appendfile(wordfile,lr)
#print lr
#fp.writefile('v.txt',lr)
sq.dbstop(cn,cr) | StarcoderdataPython |
9611594 | '''
This is an empty file :)
'''
print "The comment is a lie!!"
TEST_CONST_100 = 100
TEST_CONST_101 = 101
TEST_CONST_102 = 102
alist = [TEST_CONST_100, TEST_CONST_101, TEST_CONST_102]
alist[0] = 103
print alist
print TEST_CONST_100
print TEST_CONST_101
print TEST_CONST_102 | StarcoderdataPython |
9607159 | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QRect
from PyQt5.QtWidgets import QMessageBox, QDesktopWidget, QCheckBox
from SQL_functions import *
from search_genius import *
class Ui_Filters(QtWidgets.QWidget):
switch_window = QtCore.pyqtSignal(list)
switch_window2 = QtCore.pyqtSignal()
switch_window3 = QtCore.pyqtSignal(str)
login = ''
def setupUi(self, Filters, login):
self.login = login
Filters.setObjectName("Filters")
Filters.resize(476, 600)
Filters.setMinimumSize(QtCore.QSize(476, 600))
Filters.setMaximumSize(QtCore.QSize(476, 600))
self.Logout = QtWidgets.QPushButton(Filters)
self.Logout.setGeometry(QtCore.QRect(5, 5, 171, 40))
font = QtGui.QFont()
font.setPointSize(18)
self.Logout.setFont(font)
self.Logout.setObjectName("Logout")
self.Profile = QtWidgets.QPushButton(Filters)
self.Profile.setGeometry(QtCore.QRect(300, 5, 171, 40))
font = QtGui.QFont()
font.setPointSize(18)
self.Profile.setFont(font)
self.Profile.setObjectName("Profile")
self.label_2 = QtWidgets.QLabel(Filters)
self.label_2.setGeometry(QtCore.QRect(100, 160, 71, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.Artist = QtWidgets.QLineEdit(Filters)
self.Artist.setGeometry(QtCore.QRect(180, 110, 251, 32))
self.Artist.setObjectName("Artist")
self.Album = QtWidgets.QLineEdit(Filters)
self.Album.setGeometry(QtCore.QRect(180, 160, 251, 32))
self.Album.setObjectName("Album")
self.Song = QtWidgets.QLineEdit(Filters)
self.Song.setGeometry(QtCore.QRect(180, 210, 251, 32))
self.Song.setObjectName("Song")
self.Release_date_checkBox = QCheckBox(Filters)
self.Release_date_checkBox.setObjectName(u"Release_date_checkBox")
self.Release_date_checkBox.setGeometry(QRect(10, 260, 21, 22))
self.Release_date = QtWidgets.QDateTimeEdit(Filters)
self.Release_date.setGeometry(QtCore.QRect(180, 260, 251, 32))
self.Release_date.setObjectName("Release_date")
self.Release_date.setCalendarPopup(True)
self.Genre_checkBox = QCheckBox(Filters)
self.Genre_checkBox.setObjectName(u"Genre_checkBox")
self.Genre_checkBox.setGeometry(QRect(10, 310, 21, 22))
self.Genre = QtWidgets.QComboBox(Filters)
self.Genre.setGeometry(QtCore.QRect(180, 310, 251, 32))
self.Genre.setObjectName("Genre")
self.label = QtWidgets.QLabel(Filters)
self.label.setGeometry(QtCore.QRect(110, 110, 53, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(Filters)
self.label_3.setGeometry(QtCore.QRect(110, 210, 53, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Filters)
self.label_4.setGeometry(QtCore.QRect(40, 260, 131, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_6 = QtWidgets.QLabel(Filters)
self.label_6.setGeometry(QtCore.QRect(0, 60, 476, 32))
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
font = QtGui.QFont()
font.setPointSize(18)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.Search = QtWidgets.QPushButton(Filters)
self.Search.setGeometry(QtCore.QRect(20, 450, 171, 61))
font = QtGui.QFont()
font.setPointSize(18)
self.Search.setFont(font)
self.Search.setObjectName("Search_Button")
self.Add = QtWidgets.QPushButton(Filters)
self.Add.setGeometry(QtCore.QRect(260, 450, 171, 61))
font = QtGui.QFont()
font.setPointSize(18)
self.Add.setFont(font)
self.Add.setObjectName("Add_Button")
self.label_5 = QtWidgets.QLabel(Filters)
self.label_5.setGeometry(QtCore.QRect(102, 310, 61, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.Release_date.setEnabled(0)
self.Genre.setEnabled(0)
self.Release_date_checkBox.stateChanged.connect(self.Release_date.setEnabled)
self.Genre_checkBox.stateChanged.connect(self.Genre.setEnabled)
self.location_on_the_screen()
self.retranslateUi(Filters)
QtCore.QMetaObject.connectSlotsByName(Filters)
self.Album.returnPressed.connect(self.search)
self.Artist.returnPressed.connect(self.search)
self.Song.returnPressed.connect(self.search)
self.Search.clicked.connect(self.search)
self.Add.clicked.connect(self.request)
self.Logout.clicked.connect(self.logout)
self.Profile.clicked.connect(self.profile)
def renew_nickname(self, Filters, nickname):
self.login = nickname
self.retranslateUi(Filters)
def profile(self):
self.switch_window3.emit(self.login)
def location_on_the_screen(self):
ag = QDesktopWidget().availableGeometry()
widget = self.geometry()
x = round(ag.width()/2 - widget.width()/2)
y = round(ag.height()/2 - widget.height()/2)
self.move(x, y)
def logout(self):
self.switch_window2.emit()
def request(self):
get_lyrics(self.Artist.text(), self.Song.text(), 0, 0, 0, 1, 1)
buttonReply2 = QMessageBox.about(self, "Successfull", "All search results from Genius added to database")
def retranslateUi(self, Filters):
_translate = QtCore.QCoreApplication.translate
Filters.setWindowTitle(_translate("Filters", "Music Search System | Search Filters"))
self.Logout.setText(_translate("Filters", "Logout"))
self.Profile.setText(_translate("Filters", "Profile"))
self.label_2.setText(_translate("Filters", "Album"))
self.label.setText(_translate("Filters", "Artist"))
self.label_3.setText(_translate("Filters", "Song"))
self.label_4.setText(_translate("Filters", "Release date"))
self.label_6.setText(_translate("Filters", "Welcome back, " + self.login))
self.Search.setText(_translate("Filters", "Search"))
self.Add.setText(_translate("Filters", "Request from\nexternal"))
self.label_5.setText(_translate("Filters", "Genre"))
self.Release_date.setDisplayFormat(_translate("Filters", "yyyy-mm-dd"))
self.load_Genres()
def load_Genres(self):
print("Loading genres...")
connection = pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>',
database='Music_Search_System',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
with connection:
data = select(connection, '*', 'Genres')
for x in data:
self.Genre.addItem(x['Genre_name'])
connection.commit()
def search(self):
song = None
artist = None
album = None
release_date = None
genre = None
song_name = ''
if(not self.Song.text() == ''):
song = self.Song.text()
song = song.replace('(', '\(')
song = song.replace (')', '\)')
if(not self.Artist.text() == ''):
artist = self.Artist.text()
if(not self.Album.text() == ''):
album = self.Album.text()
if(self.Release_date.isEnabled() == True):
release_date = self.Release_date.text()
if(self.Genre.isEnabled() == True):
genre = self.Genre.currentText()
if(song == artist == album == genre == release_date == 0):
buttonReply = QMessageBox.about(self, "Error", "No search parameters supplied.")
print("Searching...")
connection = pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>',
database='Music_Search_System',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
data = ''
with connection:
#Honestly, don't try to look following coding hell
#I know, that this code isn't the best, but I'll fix it someday.
#For now, I need simply working hell :)
#I already see similar parts, but I need more time to make it better
"""
if ( song == 1 and artist == 1 and album == 1 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums, Genres',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album and Songs.ID_Genre = Genres.ID'+
'and regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s) and Release_date like %s and Genres.Genre_name like %s',
(".*("+song_name+").*",
".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*",
self.Release_date.text(),
self.Genre.currentText()))
elif( song == 1 and artist == 1 and album == 1 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s) and Release_date like %s',
(".*("+song_name+").*",
".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*",
self.Release_date.text()))
elif( song == 1 and artist == 1 and album == 1 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums, Genres',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s) and Genres.Genre_name like %s',
(".*("+song_name+").*",
".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*",
self.Genre.currentText()))
elif( song == 1 and artist == 1 and album == 1 and release_date == 0 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s)',
(".*("+song_name+").*",
".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*"))
elif( song == 1 and artist == 1 and album == 0 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Genres', 'Authors.ID = Songs.ID_Author and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and Release_date like %s and Genres.Genre_name like %s',
(".*("+song_name+").*",
".*("+self.Artist.text()+").*",
self.Release_date.text(),
self.Genre.currentText()))
elif( song == 1 and artist == 1 and album == 0 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs join Authors on Authors.ID = Songs.ID_Author',
'regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and Release_date like %s',
(".*("+song_name+").*",
".*("+self.Artist.text()+").*",
self.Release_date.text()))
elif( song == 1 and artist == 1 and album == 0 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Genres', 'Authors.ID = Songs.ID_Author and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s) and Genres.Genre_name like %s', (".*("+song_name+").*", ".*("+self.Artist.text()+").*", self.Genre.currentText()))
elif( song == 1 and artist == 1 and album == 0 and release_date == 0 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs join Authors on Authors.ID = Songs.ID_Author',
'regexp_like(Songs.Name, %s) and regexp_like(Authors.Name, %s)', (".*("+song_name+").*", ".*("+self.Artist.text()+").*"))
elif( song == 1 and artist == 0 and album == 1 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Genres, Songs_in_albums, Albums',
'Genres.ID = Songs.ID_Genre and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Songs.Name, %s) and regexp_like(Albums.Name, %s) and Release_date like %s and Genres.Genre_name like %s',
(".*("+song_name+").*",
".*("+self.Album.text()+").*",
self.Release_date.text(),
self.Genre.currentText()))
elif( song == 1 and artist == 0 and album == 1 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Songs.Name, %s) and regexp_like(Albums.Name, %s) and Release_date like %s',
(".*("+song_name+").*",
".*("+self.Album.text()+").*",
self.Release_date.text()))
elif( song == 1 and artist == 0 and album == 1 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Genres, Songs_in_albums, Albums',
'Genres.ID = Songs.ID_Genre and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Songs.Name, %s) and regexp_like(Albums.Name, %s) and Genres.Genre_name like %s',
(".*("+song_name+").*",
".*("+self.Album.text()+").*",
self.Genre.currentText()))
elif( song == 1 and artist == 0 and album == 1 and release_date == 0 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Songs.Name, %s) and regexp_like(Albums.Name, %s)',
(".*("+song_name+").*",
".*("+self.Album.text()+").*"))
elif( song == 1 and artist == 0 and album == 0 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Genres',
'Genres.ID = Songs.ID_Genre and Release_date like %s and regexp_like(Name, %s) and Genres.Genre_name like %s',
(self.Release_date.text(),
'.*('+song_name+')*',
self.Genre.currentText()))
elif( song == 1 and artist == 0 and album == 0 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs',
'Release_date like %s and regexp_like(Name, %s)',
(self.Release_date.text(),
'.*('+song_name+')*'))
elif( song == 1 and artist == 0 and album == 0 and release_date == 0 and genre == 1):
data = select_where(connection, '*', 'Songs, Genres', 'Genres.ID = Songs.ID_Genre and regexp_like(Name, %s) and Genres.Genre_name like %s', (".*("+song_name+").*", self.Genre.currentText()))
elif( song == 1 and artist == 0 and album == 0 and release_date == 0 and genre == 0):
data = select_where(connection, '*', 'Songs', 'regexp_like(Name, %s)', (".*("+song_name+").*")) # %s resolves as '*string*' (with quotes)
elif( song == 0 and artist == 1 and album == 1 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums, Genres',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s) and Release_date like %s and Genres.Genre_name like %s',
(".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*",
self.Release_date.text(),
self.Genre.currentText()))
elif( song == 0 and artist == 1 and album == 1 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s) and Release_date like %s',
(".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*",
self.Release_date.text()))
elif( song == 0 and artist == 1 and album == 1 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums, Genres',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s) and Genres.Genre_name like %s',
(".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*",
self.Genre.currentText()))
elif( song == 0 and artist == 1 and album == 1 and release_date == 0 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Authors.Name, %s) and regexp_like(Albums.Name, %s)',
(".*("+self.Artist.text()+").*",
".*("+self.Album.text()+").*"))
elif( song == 0 and artist == 1 and album == 0 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Genres', 'Authors.ID = Songs.ID_Author and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Authors.Name, %s) and Release_date like %s and Genres.Genre_name like %s',
(".*("+self.Artist.text()+").*",
self.Release_date.text(),
self.Genre.currentText()))
elif( song == 0 and artist == 1 and album == 0 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs join Authors on Authors.ID = Songs.ID_Author',
'regexp_like(Authors.Name, %s) and Release_date like %s',
(".*("+self.Artist.text()+").*",
self.Release_date.text()))
elif( song == 0 and artist == 1 and album == 0 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Genres', 'Authors.ID = Songs.ID_Author and Genres.ID = Songs.ID_Genre '+
'and regexp_like(Authors.Name, %s) and Genres.Genre_name like %s', (".*("+self.Artist.text()+").*", self.Genre.currentText()))
elif( song == 0 and artist == 1 and album == 0 and release_date == 0 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs join Authors on Authors.ID = Songs.ID_Author',
'regexp_like(Authors.Name, %s)', (".*("+self.Artist.text()+").*"))
elif( song == 0 and artist == 0 and album == 1 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Genres, Songs_in_albums, Albums',
'Genres.ID = Songs.ID_Genre and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'regexp_like(Albums.Name, %s) and Release_date like %s and Genres.Genre_name like %s',
(".*("+self.Album.text()+").*",
self.Release_date.text(),
self.Genre.currentText()))
elif( song == 0 and artist == 0 and album == 1 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'regexp_like(Albums.Name, %s) and Release_date like %s',
(".*("+self.Album.text()+").*",
self.Release_date.text()))
elif( song == 0 and artist == 0 and album == 1 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Songs_in_albums, Albums, Genres',
'Songs.ID_Genre = Genres.ID and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Albums.Name, %s) and Genres.Genre_name like %s',
(".*("+self.Album.text()+").*",
self.Genre.currentText()))
elif( song == 0 and artist == 0 and album == 1 and release_date == 0 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs, Authors, Songs_in_albums, Albums',
'Authors.ID = Songs.ID_Author and Songs_in_albums.ID_Song = Songs.ID and Albums.ID = Songs_in_albums.ID_Album '+
'and regexp_like(Albums.Name, %s)',
(".*("+self.Album.text()+").*"))
elif( song == 0 and artist == 0 and album == 0 and release_date == 1 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Genres',
'Songs.ID_Genre = Genres.ID and Genres.Genre_name like %s and Release_date like %s',
(self.Genre.currentText(), self.Release_date.text()))
elif( song == 0 and artist == 0 and album == 0 and release_date == 1 and genre == 0):
data = select_where(connection, 'Songs.*', 'Songs',
'Release_date like %s',
(self.Release_date.text()))
elif( song == 0 and artist == 0 and album == 0 and release_date == 0 and genre == 1):
data = select_where(connection, 'Songs.*', 'Songs, Genres',
'Songs.ID_Genre = Genres.ID and Genres.Genre_name like %s',
(self.Genre.currentText()))
"""
# I'll left it there for now
# I don't need it anymore, because I have procedure inside database (see database_functions.sql)
with connection.cursor() as cursor:
#Syntax: search_song(author, album, song, release_date, genre)
sql = ("call search(%s, %s, %s, %s, %s)")
cursor.execute(sql,(artist,album,song,release_date,genre))
data = cursor.fetchall()
connection.commit()
if(type(data) == tuple): ### QuickFix: for no results
#I'm trying to connect to my script to add song, if it isn't in my database
#Adding request to search in external database and add to my own
buttonReply = QMessageBox.question(self, "Error" , "This song isn't found in local database.\nDo you want to search in external database?")
if buttonReply == QMessageBox.Yes:
self.request()
else:
self.switch_window.emit(data)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Filters = QtWidgets.QWidget()
ui = Ui_Filters()
ui.setupUi(Filters)
Filters.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3420935 | # Django settings for alertspusher project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql'
DATABASE_NAME = 'alertspusher'
DATABASE_USER = 'root'
DATABASE_PASSWORD = ''
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Toronto'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
STATIC_DOC_ROOT = os.path.join(os.path.dirname(__file__), 'assets')
ROOT_URLCONF = 'alertspusher.urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates')
)
#C2DM related settings
C2DM_AUTH_TOKEN = 'FILL_AUTH_TOKEN_HERE'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'alertspusher.c2dm'
)
| StarcoderdataPython |
4893729 | import argparse
import pathlib
import os
import sys
import gym
import time
import platform
import random
import pickle
import torch
import torch.nn.functional as F
import torch.utils.data as data_utils
from torch import nn
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from my_utils.replay_memory import *
from my_utils.torch import *
from my_utils.math import *
from my_utils.t_format import *
| StarcoderdataPython |
11201697 | <reponame>VladimirZubavlenko/ikaf42-app
import jwt
from datetime import datetime
from datetime import timedelta
from django.conf import settings
from django.db import models
from django.core import validators
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from secrets import token_urlsafe
# Create your models here
class StudyGroup(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(max_length=30, verbose_name='Название группы')
class Meta:
verbose_name_plural = 'Учебная группа'
verbose_name = 'Учебные группы'
def __str__(self):
return self.name
class Role(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(max_length=30, verbose_name='Роль')
class Meta:
verbose_name_plural = 'Роль'
verbose_name = 'Роли'
def __str__(self):
return self.name
class UserManager(BaseUserManager):
"""
Django требует, чтобы пользовательские `User`
определяли свой собственный класс Manager.
Унаследовав от BaseUserManager, мы получаем много кода,
используемого Django для создания `User`.
Все, что нам нужно сделать, это переопределить функцию
`create_user`, которую мы будем использовать
для создания объектов `User`.
"""
def _create_user(self, username, email, password=<PASSWORD>, **extra_fields):
if not username:
raise ValueError('Указанное имя пользователя должно быть установлено')
if not email:
raise ValueError('Данный адрес электронной почты должен быть установлен')
email = self.normalize_email(email)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email, password=None, **extra_fields):
"""
Создает и возвращает `User` с адресом электронной почты,
именем пользователя и паролем.
"""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
"""
Создает и возвращает пользователя с правами
суперпользователя (администратора).
"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Суперпользователь должен иметь is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Суперпользователь должен иметь is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
"""
Определяет наш пользовательский класс User.
Требуется имя пользователя, адрес электронной почты и пароль.
"""
id = models.AutoField(primary_key=True, null=False)
username = models.TextField(db_index=True, max_length=255, unique=True, blank=False, verbose_name='Имя пользователя')
email = models.EmailField(
validators=[validators.validate_email],
unique=True,
verbose_name='Почта'
)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
lastName = models.TextField(max_length=30, verbose_name='Фамилия')
firstName = models.TextField(max_length=30, verbose_name='Имя')
middleName = models.TextField(max_length=30, verbose_name='Отчество', null=True, blank=True)
role = models.ForeignKey(Role, related_name='+', on_delete=models.CASCADE, verbose_name='Роль', null=True, blank=True)
studyGroup = models.ForeignKey(StudyGroup, related_name='+', on_delete=models.CASCADE, verbose_name='Группа', null=True, blank=True)
phone = models.TextField(max_length=30, verbose_name='Телефон')
workPlace = models.TextField(max_length=200, verbose_name='Место работы', null=True, blank=True)
position = models.TextField(max_length=100, verbose_name='Должность', null=True, blank=True)
academicRank = models.TextField(max_length=30, verbose_name='Ученое звание', null=True, blank=True)
academicDegree = models.TextField(max_length=50, verbose_name='Ученая степень', null=True, blank=True)
emailConfirmed = models.BooleanField(default=False, verbose_name='Почта подтверждена')
passwordResetToken = models.TextField(max_length=40, verbose_name='Токен сброса пароля', null=True, blank=True)
emailConfirmToken = models.TextField(max_length=30, verbose_name='Токен подтверждения почты', null=True, blank=True)
last_login = models.DateTimeField(verbose_name='Последний вход', blank=True, null=True)
# Свойство `USERNAME_FIELD` сообщает нам, какое поле мы будем использовать для входа.
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ('email',)
# Сообщает Django, что класс UserManager, определенный выше,
# должен управлять объектами этого типа.
objects = UserManager()
class Meta:
verbose_name_plural = 'Пользователь'
verbose_name = 'Пользователи'
def __str__(self):
"""
Возвращает строковое представление этого `User`.
Эта строка используется, когда в консоли выводится `User`.
"""
return self.lastName + " " + self.firstName
@property
def token(self):
"""
Позволяет нам получить токен пользователя, вызвав `user.token` вместо
`user.generate_jwt_token().
Декоратор `@property` выше делает это возможным.
`token` называется «динамическим свойством ».
"""
return self._generate_jwt_token()
def get_full_name(self):
"""
Этот метод требуется Django для таких вещей,
как обработка электронной почты.
Обычно это имя и фамилия пользователя.
Поскольку мы не храним настоящее имя пользователя,
мы возвращаем его имя пользователя.
"""
return self.username
def get_short_name(self):
"""
Этот метод требуется Django для таких вещей,
как обработка электронной почты.
Как правило, это будет имя пользователя.
Поскольку мы не храним настоящее имя пользователя,
мы возвращаем его имя пользователя.
"""
return self.username
def _generate_jwt_token(self):
"""
Создает веб-токен JSON, в котором хранится идентификатор
этого пользователя и срок его действия
составляет 60 дней в будущем.
"""
dt = datetime.now() + timedelta(minutes=180)
token = jwt.encode({
'id': self.pk,
'exp': int(dt.strftime('%s'))
}, settings.SECRET_KEY, algorithm='HS256')
return token.decode('utf-8')
| StarcoderdataPython |
1973096 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ActivityLimitationDTO import ActivityLimitationDTO
from alipay.aop.api.domain.FullDiscountDTO import FullDiscountDTO
from alipay.aop.api.domain.FullGiftDTO import FullGiftDTO
from alipay.aop.api.domain.FullReductionDTO import FullReductionDTO
from alipay.aop.api.domain.RechargeDTO import RechargeDTO
from alipay.aop.api.domain.SpecialPriceDTO import SpecialPriceDTO
class KmsBakingPromotionDTO(object):
def __init__(self):
self._activity_limitation = None
self._available_date = None
self._available_end_time = None
self._available_start_time = None
self._data_id = None
self._description = None
self._end_time = None
self._exclusive = None
self._full_discount = None
self._full_gift = None
self._full_reduction = None
self._member_promotion = None
self._operation_name = None
self._operation_time = None
self._promotion_channel = None
self._promotion_id = None
self._promotion_name = None
self._promotion_scope = None
self._promotion_type = None
self._recharge = None
self._special_price = None
self._start_time = None
@property
def activity_limitation(self):
return self._activity_limitation
@activity_limitation.setter
def activity_limitation(self, value):
if isinstance(value, ActivityLimitationDTO):
self._activity_limitation = value
else:
self._activity_limitation = ActivityLimitationDTO.from_alipay_dict(value)
@property
def available_date(self):
return self._available_date
@available_date.setter
def available_date(self, value):
self._available_date = value
@property
def available_end_time(self):
return self._available_end_time
@available_end_time.setter
def available_end_time(self, value):
self._available_end_time = value
@property
def available_start_time(self):
return self._available_start_time
@available_start_time.setter
def available_start_time(self, value):
self._available_start_time = value
@property
def data_id(self):
return self._data_id
@data_id.setter
def data_id(self, value):
self._data_id = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def exclusive(self):
return self._exclusive
@exclusive.setter
def exclusive(self, value):
self._exclusive = value
@property
def full_discount(self):
return self._full_discount
@full_discount.setter
def full_discount(self, value):
if isinstance(value, FullDiscountDTO):
self._full_discount = value
else:
self._full_discount = FullDiscountDTO.from_alipay_dict(value)
@property
def full_gift(self):
return self._full_gift
@full_gift.setter
def full_gift(self, value):
if isinstance(value, FullGiftDTO):
self._full_gift = value
else:
self._full_gift = FullGiftDTO.from_alipay_dict(value)
@property
def full_reduction(self):
return self._full_reduction
@full_reduction.setter
def full_reduction(self, value):
if isinstance(value, FullReductionDTO):
self._full_reduction = value
else:
self._full_reduction = FullReductionDTO.from_alipay_dict(value)
@property
def member_promotion(self):
return self._member_promotion
@member_promotion.setter
def member_promotion(self, value):
self._member_promotion = value
@property
def operation_name(self):
return self._operation_name
@operation_name.setter
def operation_name(self, value):
self._operation_name = value
@property
def operation_time(self):
return self._operation_time
@operation_time.setter
def operation_time(self, value):
self._operation_time = value
@property
def promotion_channel(self):
return self._promotion_channel
@promotion_channel.setter
def promotion_channel(self, value):
self._promotion_channel = value
@property
def promotion_id(self):
return self._promotion_id
@promotion_id.setter
def promotion_id(self, value):
self._promotion_id = value
@property
def promotion_name(self):
return self._promotion_name
@promotion_name.setter
def promotion_name(self, value):
self._promotion_name = value
@property
def promotion_scope(self):
return self._promotion_scope
@promotion_scope.setter
def promotion_scope(self, value):
self._promotion_scope = value
@property
def promotion_type(self):
return self._promotion_type
@promotion_type.setter
def promotion_type(self, value):
self._promotion_type = value
@property
def recharge(self):
return self._recharge
@recharge.setter
def recharge(self, value):
if isinstance(value, RechargeDTO):
self._recharge = value
else:
self._recharge = RechargeDTO.from_alipay_dict(value)
@property
def special_price(self):
return self._special_price
@special_price.setter
def special_price(self, value):
if isinstance(value, SpecialPriceDTO):
self._special_price = value
else:
self._special_price = SpecialPriceDTO.from_alipay_dict(value)
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.activity_limitation:
if hasattr(self.activity_limitation, 'to_alipay_dict'):
params['activity_limitation'] = self.activity_limitation.to_alipay_dict()
else:
params['activity_limitation'] = self.activity_limitation
if self.available_date:
if hasattr(self.available_date, 'to_alipay_dict'):
params['available_date'] = self.available_date.to_alipay_dict()
else:
params['available_date'] = self.available_date
if self.available_end_time:
if hasattr(self.available_end_time, 'to_alipay_dict'):
params['available_end_time'] = self.available_end_time.to_alipay_dict()
else:
params['available_end_time'] = self.available_end_time
if self.available_start_time:
if hasattr(self.available_start_time, 'to_alipay_dict'):
params['available_start_time'] = self.available_start_time.to_alipay_dict()
else:
params['available_start_time'] = self.available_start_time
if self.data_id:
if hasattr(self.data_id, 'to_alipay_dict'):
params['data_id'] = self.data_id.to_alipay_dict()
else:
params['data_id'] = self.data_id
if self.description:
if hasattr(self.description, 'to_alipay_dict'):
params['description'] = self.description.to_alipay_dict()
else:
params['description'] = self.description
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.exclusive:
if hasattr(self.exclusive, 'to_alipay_dict'):
params['exclusive'] = self.exclusive.to_alipay_dict()
else:
params['exclusive'] = self.exclusive
if self.full_discount:
if hasattr(self.full_discount, 'to_alipay_dict'):
params['full_discount'] = self.full_discount.to_alipay_dict()
else:
params['full_discount'] = self.full_discount
if self.full_gift:
if hasattr(self.full_gift, 'to_alipay_dict'):
params['full_gift'] = self.full_gift.to_alipay_dict()
else:
params['full_gift'] = self.full_gift
if self.full_reduction:
if hasattr(self.full_reduction, 'to_alipay_dict'):
params['full_reduction'] = self.full_reduction.to_alipay_dict()
else:
params['full_reduction'] = self.full_reduction
if self.member_promotion:
if hasattr(self.member_promotion, 'to_alipay_dict'):
params['member_promotion'] = self.member_promotion.to_alipay_dict()
else:
params['member_promotion'] = self.member_promotion
if self.operation_name:
if hasattr(self.operation_name, 'to_alipay_dict'):
params['operation_name'] = self.operation_name.to_alipay_dict()
else:
params['operation_name'] = self.operation_name
if self.operation_time:
if hasattr(self.operation_time, 'to_alipay_dict'):
params['operation_time'] = self.operation_time.to_alipay_dict()
else:
params['operation_time'] = self.operation_time
if self.promotion_channel:
if hasattr(self.promotion_channel, 'to_alipay_dict'):
params['promotion_channel'] = self.promotion_channel.to_alipay_dict()
else:
params['promotion_channel'] = self.promotion_channel
if self.promotion_id:
if hasattr(self.promotion_id, 'to_alipay_dict'):
params['promotion_id'] = self.promotion_id.to_alipay_dict()
else:
params['promotion_id'] = self.promotion_id
if self.promotion_name:
if hasattr(self.promotion_name, 'to_alipay_dict'):
params['promotion_name'] = self.promotion_name.to_alipay_dict()
else:
params['promotion_name'] = self.promotion_name
if self.promotion_scope:
if hasattr(self.promotion_scope, 'to_alipay_dict'):
params['promotion_scope'] = self.promotion_scope.to_alipay_dict()
else:
params['promotion_scope'] = self.promotion_scope
if self.promotion_type:
if hasattr(self.promotion_type, 'to_alipay_dict'):
params['promotion_type'] = self.promotion_type.to_alipay_dict()
else:
params['promotion_type'] = self.promotion_type
if self.recharge:
if hasattr(self.recharge, 'to_alipay_dict'):
params['recharge'] = self.recharge.to_alipay_dict()
else:
params['recharge'] = self.recharge
if self.special_price:
if hasattr(self.special_price, 'to_alipay_dict'):
params['special_price'] = self.special_price.to_alipay_dict()
else:
params['special_price'] = self.special_price
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KmsBakingPromotionDTO()
if 'activity_limitation' in d:
o.activity_limitation = d['activity_limitation']
if 'available_date' in d:
o.available_date = d['available_date']
if 'available_end_time' in d:
o.available_end_time = d['available_end_time']
if 'available_start_time' in d:
o.available_start_time = d['available_start_time']
if 'data_id' in d:
o.data_id = d['data_id']
if 'description' in d:
o.description = d['description']
if 'end_time' in d:
o.end_time = d['end_time']
if 'exclusive' in d:
o.exclusive = d['exclusive']
if 'full_discount' in d:
o.full_discount = d['full_discount']
if 'full_gift' in d:
o.full_gift = d['full_gift']
if 'full_reduction' in d:
o.full_reduction = d['full_reduction']
if 'member_promotion' in d:
o.member_promotion = d['member_promotion']
if 'operation_name' in d:
o.operation_name = d['operation_name']
if 'operation_time' in d:
o.operation_time = d['operation_time']
if 'promotion_channel' in d:
o.promotion_channel = d['promotion_channel']
if 'promotion_id' in d:
o.promotion_id = d['promotion_id']
if 'promotion_name' in d:
o.promotion_name = d['promotion_name']
if 'promotion_scope' in d:
o.promotion_scope = d['promotion_scope']
if 'promotion_type' in d:
o.promotion_type = d['promotion_type']
if 'recharge' in d:
o.recharge = d['recharge']
if 'special_price' in d:
o.special_price = d['special_price']
if 'start_time' in d:
o.start_time = d['start_time']
return o
| StarcoderdataPython |
9721426 | #!/usr/bin/env python3
# copy_pins_for_duino.py 26.4.2021/pekka
# Copies pins library files needed for PlatformIO Arduino build
# into /coderoot/lib/arduino-platformio/pins directory.
# To make this look like Arduino library all .c and .cpp
# files are copied to target root folder, and all header
# files info subfolders.
from os import listdir, makedirs
from os.path import isfile, isdir, join, splitext, exists
from shutil import copyfile
import sys
def mymakedir(targetdir):
if not exists(targetdir):
makedirs(targetdir)
def copy_level_4(sourcedir,roottargetdir,targetdir):
files = listdir(sourcedir)
# Copy header files
for f in files:
p = join(sourcedir, f)
if isfile(p):
e = splitext(p)[1]
if e == '.h':
mymakedir(targetdir)
t = join(targetdir, f)
copyfile(p, t)
if e == '.c' or e == '.cpp':
t = join(roottargetdir, f)
copyfile(p, t)
def copy_level_3(sourcedir,roottargetdir,targetdir, platforms):
files = listdir(sourcedir)
for f in files:
p = join(sourcedir, f)
if isdir(p):
if f in platforms:
copy_level_4(sourcedir + '/' + f, roottargetdir, targetdir + '/' + f)
def copy_level_2(sourcedir,roottargetdir,targetdir, platforms):
files = listdir(sourcedir)
for f in files:
p = join(sourcedir, f)
if isdir(p):
copy_level_3(sourcedir + '/' + f, roottargetdir, targetdir + '/' + f, platforms)
def copy_info(f,sourcedir,targetdir):
infodir = sourcedir + '/osbuild/duino-library'
p = join(infodir, f)
t = join(targetdir, f)
if exists(p):
copyfile(p, t)
def copy_level_1(sourcedir,targetdir, platforms):
mymakedir(targetdir)
files = listdir(sourcedir)
# Copy header files
for f in files:
p = join(sourcedir, f)
if isfile(p):
e = splitext(p)[1]
if e == '.h':
t = join(targetdir, f)
copyfile(p, t)
# Copy code and extensions folders
copy_level_3(sourcedir + '/code', targetdir, targetdir + '/code', platforms)
copy_level_2(sourcedir + '/extensions', targetdir, targetdir + '/extensions', platforms)
# Copy informative arduino files
copy_info('library.json', sourcedir, targetdir)
copy_info('library.properties', sourcedir, targetdir)
def mymain():
platforms = ["common"]
outdir = "/coderoot/lib/arduino-platformio/pins"
expectplatform = True
n = len(sys.argv)
for i in range(1, n):
if sys.argv[i][0] == "-":
if sys.argv[i][1] == "o":
expectplatform = False
else:
if expectplatform:
platforms.append(sys.argv[i])
else:
outdir = sys.argv[i];
expectplatform = True
copy_level_1("/coderoot/pins", outdir, platforms)
# Usage copy_pins_for_duino.py esp32 -o /coderoot/lib/esp32/pins
mymain()
| StarcoderdataPython |
9781629 | <filename>starthinker/task/traffic/landing_page.py
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""Handles creation and updates of landing pages."""
from starthinker.task.traffic.dao import BaseDAO
from starthinker.task.traffic.feed import FieldMap
class LandingPageDAO(BaseDAO):
"""Landing page data access object.
Inherits from BaseDAO and implements landing page specific logic for creating
and
updating landing pages.
"""
def __init__(self, config, auth, profile_id, is_admin):
"""Initializes LandingPageDAO with profile id and authentication scheme."""
super(LandingPageDAO, self).__init__(config, auth, profile_id, is_admin)
self._id_field = FieldMap.CAMPAIGN_LANDING_PAGE_ID
self._search_field = FieldMap.CAMPAIGN_LANDING_PAGE_NAME
self._list_name = 'landingPages'
self._entity = 'LANDING_PAGE'
self._parent_filter_name = 'advertiserIds'
self._parent_filter_field_name = FieldMap.ADVERTISER_ID
self._parent_dao = None
def _api(self, iterate=False):
"""Returns an DCM API instance for this DAO."""
return super(LandingPageDAO, self)._api(iterate).advertiserLandingPages()
def _process_update(self, item, feed_item):
"""Updates an landing page based on the values from the feed.
Args:
item: Object representing the landing page to be updated, this object is
updated directly.
feed_item: Feed item representing landing page values from the Bulkdozer
feed.
"""
item['name'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None)
item['url'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None)
def _process_new(self, feed_item):
"""Creates a new landing page DCM object from a feed item representing a landing page from the Bulkdozer feed.
This function simply creates the object to be inserted later by the BaseDAO
object.
Args:
feed_item: Feed item representing the landing page from the Bulkdozer
feed.
Returns:
An landing page object ready to be inserted in DCM through the API.
"""
return {
'name': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None),
'url': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None),
'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None)
}
| StarcoderdataPython |
5095912 | import torch
from torch_geometric.nn import knn
from torch_scatter import scatter_add
def knn_interpolate(x, pos_x, pos_y, batch_x=None, batch_y=None, k=3):
r"""The k-NN interpolation from the `"PointNet++: Deep Hierarchical
Feature Learning on Point Sets in a Metric Space"
<https://arxiv.org/abs/1706.02413>`_ paper.
For each point :math:`y` with position :math:`\mathbf{p}(y)`, its
interpolated features :math:`\mathbf{f}(y)` are given by
.. math::
\mathbf{f}(y) = \frac{\sum_{i=1}^k w(x_i) \mathbf{f}(x_i)}{\sum_{i=1}^k
w(x_i)} \textrm{, where } w(x_i) = \frac{1}{d(\mathbf{p}(y),
\mathbf{p}(x_i))^2}.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
pos_x (Tensor): Node position matrix
:math:`\in \mathbb{R}^{N \times d}`.
pos_y (Tensor): Upsampled node position matrix
:math:`\in \mathbb{R}^{M \times d}`.
batch_x (LongTensor, optional): Batch vector
:math:`\mathbf{b_x} \in {\{ 0, \ldots, B-1\}}^N`, which assigns
each node from :math:`\mathbf{X}` to a specific example.
(default: :obj:`None`)
batch_y (LongTensor, optional): Batch vector
:math:`\mathbf{b_y} \in {\{ 0, \ldots, B-1\}}^N`, which assigns
each node from :math:`\mathbf{Y}` to a specific example.
(default: :obj:`None`)
k (int, optional): Number of neighbors. (default: :obj:`3`)
"""
with torch.no_grad():
y_idx, x_idx = knn(pos_x, pos_y, k, batch_x=batch_x, batch_y=batch_y)
diff = pos_x[x_idx] - pos_y[y_idx]
squared_distance = (diff * diff).sum(dim=-1, keepdim=True)
weights = 1.0 / torch.clamp(squared_distance, min=1e-16)
y = scatter_add(x[x_idx] * weights, y_idx, dim=0, dim_size=pos_y.size(0))
y = y / scatter_add(weights, y_idx, dim=0, dim_size=pos_y.size(0))
return y
| StarcoderdataPython |
1863442 | class Solution:
"""
@param a: a string
@param b: a string
@return: a string representing their multiplication
"""
def complexNumberMultiply(self, a, b):
# Write your code here
ra, ia = map(int, a[:-1].split('+'))
rb, ib = map(int, b[:-1].split('+'))
return '{}+{}i'.format(ra * rb - ia * ib, ra * ib + rb * ia)
| StarcoderdataPython |
1716284 | <filename>NeuroMechFly/sdf/units.py
"""
-----------------------------------------------------------------------
Copyright 2018-2020 <NAME>, <NAME>
Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-----------------------------------------------------------------------
Module to apply units
"""
from ..utils.options import Options
class SimulationUnitScaling(Options):
"""Simulation scaling
1 [m] in reality = self.meterss [m] in simulation
1 [s] in reality = self.seconds [s] in simulation
1 [kg] in reality = self.kilograms [kg] in simulation
"""
def __init__(self, meters=1, seconds=1, kilograms=1):
super(SimulationUnitScaling, self).__init__()
self.meters = meters
self.seconds = seconds
self.kilograms = kilograms
@property
def hertz(self):
"""Hertz (frequency)
Scaled as self.hertz = 1/self.seconds
"""
return 1./self.seconds
@property
def newtons(self):
"""Newtons
Scaled as self.newtons = self.kilograms*self.meters/self.time**2
"""
return self.kilograms*self.acceleration
@property
def torques(self):
"""Torques
Scaled as self.torques = self.kilograms*self.meters**2/self.time**2
"""
return self.newtons*self.meters
@property
def velocity(self):
"""Velocity
Scaled as self.velocities = self.meters/self.seconds
"""
return self.meters/self.seconds
@property
def acceleration(self):
"""Acceleration
Scaled as self.gravity = self.meters/self.seconds**2
"""
return self.velocity/self.seconds
@property
def gravity(self):
"""Gravity
Scaled as self.gravity = self.meters/self.seconds**2
"""
return self.acceleration
@property
def volume(self):
"""Volume
Scaled as self.volume = self.meters**3
"""
return self.meters**3
@property
def density(self):
"""Density
Scaled as self.density = self.kilograms/self.meters**3
"""
return self.kilograms/self.volume
| StarcoderdataPython |
3478066 | <gh_stars>1-10
import os
import sys
import urllib
from urlparse import parse_qs
import xbmc
import xbmcaddon
import StorageServer
class SettingsValue(object):
"""
Contains value obtained from settings.
"""
def __init__(self, name, value):
"""
:param str name: Value name. It is not changeable later on.
:param str value: Settings value.
"""
self._name = name
self._value = value.strip()
def __str__(self):
"""
Return value as string.
:return: str
"""
return self._value
def __int__(self):
"""
Return value as int or return 0 for non-integer values.
:return: int
"""
try:
return int(self._value)
except ValueError:
return 0
def __float__(self):
"""
Return value as float or return 0 for non-float values.
:return: float
"""
try:
return float(self._value)
except ValueError:
return 0.0
def __bool__(self):
"""
Return value as boolean.
:return: bool
"""
return self._value.lower() == "true"
def setValue(self, value):
"""
Set new value. You still have to save it by setting it back to `Settings` class!
:param value: New value.
"""
self._value = value
class Settings(object):
"""
Class designed for accessing plugin configuration.
"""
def __getattr__(self, name):
raw_value = xbmcaddon.Addon().getSetting(name)
if 0 == len(raw_value):
raw_value = ""
return SettingsValue(name, raw_value)
def __setattr__(self, key, value):
"""
Save value to settings.
:param key: Setting name.
:param value: New value. May be string or SettingsValue
"""
xbmcaddon.Addon().setSetting(key, str(value))
def showDialog(self):
"""
Open dialog for user to change settings.
"""
xbmcaddon.Addon().openSettings()
def getCacheServer(timeout = 24):
"""
Return persistent cache for set timeout. After it, cache is cleared.
:param int timeout: Number of hours for cache persistence
:return: StorageServer
"""
return StorageServer.StorageServer("plugin_video_sledujufilmy", timeout)
def buildUrl(base_path, data):
"""
Create URL for links to other parts of this plugin.
:param base_path: Path to plugin root.
:param data: Additional data which is parsed into query string.
:return: str
"""
return base_path + "?" + urllib.urlencode(data)
def getMediaResource(resourceName):
"""
Return full path to file located in directory `media` or empty string for non-existent file.
:param str resourceName: Resource file name
:return: str
"""
path = os.path.join(
xbmcaddon.Addon().getAddonInfo("path"),
"resources",
"media",
resourceName
)
if os.path.exists(path):
return path
return ""
def parseQueryString():
"""
Return query string as a dictionary.
:return: dict
"""
# Query string is received as the third command line argument
query = parse_qs(sys.argv[2].lstrip("?"))
newDict = {}
for key, value in query.items():
newDict[key] = value[0] if isinstance(value, list) else value
return newDict
def showNotification(header, message):
"""
Show informative notification.
:param str header: Notification title
:param str message: Notification text
"""
xbmc.executebuiltin("Notification(%s, %s)" %(header, message))
| StarcoderdataPython |
6546024 | <filename>audit/middleware.py
from django.db.models.signals import post_init
from django.utils.functional import curry
from audit.signals import pass_audit_user
class AuditMiddleware(object):
"""Middleware that registers a post init handler for all models to ensure the actor of a model change is logged
properly.
"""
def process_request(self, request):
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
user_passer = curry(pass_audit_user, user)
post_init.connect(user_passer, weak=False, dispatch_uid=('audit_post_init_middleware', request)) | StarcoderdataPython |
9752098 | <filename>teledump/settings/ChatDumpMetaFile.py
""" This Module contains classes related to Metadata Files"""
import codecs
import errno
import json
import logging
import os.path
from . import ChatDumpSettings
from ..exceptions import MetaFileError
class ChatDumpMetaFile:
""" Metadata file CRUD """
key_version : int = 'version'
key_chatName : str = "chat-name"
key_LastMessageId : str = "latest-message-id"
key_exporter : str = "exporter-name"
key_exporterConfig : str = "exporter-config"
key_filter : str = "filter-name"
def __init__(self, path : str):
self._logger : logging.Logger = logging.getLogger(__name__)
self._path : str = path + '.meta'
self._data : dict = {}
def merge(self, settings: ChatDumpSettings) -> None:
if not self._data:
self._load()
settings.chatName = self._data[ChatDumpMetaFile.key_chatName]
settings.idLastMessage = self._data[ChatDumpMetaFile.key_LastMessageId]
settings.exporter = self._data[ChatDumpMetaFile.key_exporter]
settings.exporterConfig= self._data[ChatDumpMetaFile.key_exporterConfig]
settings.filter = self._data[ChatDumpMetaFile.key_filter]
def _load(self) -> None:
""" Loads metadata from file """
try:
self._logger.debug('Load metafile %s.', self._path)
with codecs.open(self._path, 'r', 'utf-8') as ff:
self._data = json.load(ff)
# TODO Validate Meta Dict
except OSError as ex:
msg = 'Unable to open the metadata file "{}". {}'.format(self._path, ex.strerror)
raise MetaFileError(msg) from ex
except ValueError as ex:
msg = 'Unable to load the metadata file "{}". AttributeError: {}'.format(self._path, ex)
raise MetaFileError(msg) from ex
def delete(self) -> None:
""" Delete metafile if running in CONTINUE mode """
try:
self._logger.debug('Delete old metadata file %s.', self._path)
os.remove(self._path)
except OSError as ex:
if ex.errno != errno.ENOENT:
msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)
raise MetaFileError(msg)
def save(self, data : dict) -> None:
""" Save metafile to file"""
try:
self._logger.debug('Save new metadata file %s.', self._path)
self._add_version()
self._add_key(data, ChatDumpMetaFile.key_chatName)
self._add_key(data, ChatDumpMetaFile.key_LastMessageId)
self._add_key(data, ChatDumpMetaFile.key_exporter)
self._add_key(data, ChatDumpMetaFile.key_exporterConfig)
self._add_key(data, ChatDumpMetaFile.key_filter)
with open(self._path, 'w') as mf:
json.dump(self._data, mf, indent=4, sort_keys=False)
except OSError as ex:
msg = 'Failed to write the metadata file. {}'.format(ex.strerror);
raise MetaFileError(msg)
def _add_key(self, data: dict, key: str) -> None:
if key in data:
self._data[key] = data[key]
def _add_version(self) -> None:
if not self._data:
self._data = {}
self._data[ChatDumpMetaFile.key_version] = 1
| StarcoderdataPython |
3273380 | """Nonholonomic vehicle system."""
import gym
from gym_socks.envs.dynamical_system import DynamicalSystem
import numpy as np
from scipy.integrate import solve_ivp
class NonholonomicVehicleEnv(DynamicalSystem):
"""Nonholonomic vehicle system.
Bases: :py:class:`gym_socks.envs.dynamical_system.DynamicalSystem`
A nonholonomic vehicle (car-like) is typically modeled using what are known as
"unicycle" dynamics. It is useful for modeling vehicles which can move forward and
backward, and incorporates a steering angle or heading. The inputs are the velocity
and change in steering angle.
"""
def __init__(self, seed=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.state_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.action_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(2,), dtype=np.float32
)
self.state = None
self.seed(seed=seed)
def step(self, action, time=0):
action = np.asarray(action, dtype=np.float32)
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
disturbance = self.generate_disturbance(time, self.state, action)
# solve the initial value problem
if self._euler is True:
next_state = self.state + self.sampling_time * self.dynamics(
time, self.state, action, disturbance
)
self.state = next_state
else:
# solve the initial value problem
sol = solve_ivp(
self.dynamics,
[time, time + self.sampling_time],
self.state,
args=(
action,
disturbance,
),
)
*_, self.state = sol.y.T
# correct the angle
if np.abs(self.state[2]) >= 2 * np.pi:
self.state[2] %= 2 * np.pi
observation = self.generate_observation(time, self.state, action)
cost = self.cost(time, self.state, action)
done = False
info = {}
return observation, cost, done, info
def generate_disturbance(self, time, state, action):
w = self.np_random.standard_normal(size=self.state_space.shape)
return 1e-2 * np.array(w)
def dynamics(self, time, state, action, disturbance):
x1, x2, x3 = state
u1, u2 = action
w1, w2, w3 = disturbance
dx1 = u1 * np.sin(x3) + w1
dx2 = u1 * np.cos(x3) + w2
dx3 = u2 + w3
return np.array([dx1, dx2, dx3], dtype=np.float32)
| StarcoderdataPython |
3498466 | <reponame>pkyIntelligence/FasterRCNN
from .evaluator import DatasetEvaluator, inference_on_dataset
from .testing import print_csv_format, verify_results
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| StarcoderdataPython |
9692910 | <filename>examples/AutomotiveCybersecurity/notebooks/_build/jupyter_execute/Summary & Further Resources.py
# Summary & Further Resources
That was a long tutorial! But hey, when was the last time you've seen a non-trivial one?
To recap, the H1ST.AI principles & ideas we've learned:
* Leverage use-case analysis to decompose problems and adopt different models at the right level of abstractions
* Encoding human experience as a model
* Combine human experience and data-driven insights to work harmoniously in a H1st Graph
Most importantly, we have used H1ST.AI to tackle a real-world challenging automotive cybersecurity problem, for which attack event labels are not available to start with, hence solving the Cold Start problem.
It is important to stress that this is still a toy example IDS and much more is needed to handle attacks (e.g. replacement attacks where a whole ECU can be compromised & normal messages silenced and there won’t be a zig-zag pattern) and of course on-device vs cloud deployment, OTA updates, etc. But it is clear adopting H1ST.AI makes the problem much more tractable and explainable.
H1ST.AI framework further provides productivity tools for a team of Data Scientists and domain experts to collaborate on such complex software projects. Especially, we’ve seen our own productivity vastly when moving from a spaghetti code jungle of ML to a more principled H1ST project structure and make use of H1ST Model API & repository as well as Graph.
Excited? [Star/fork our Github repo](https://github.com/h1st-ai/h1st), we're open-source! Especially check out the "Quick Start" section. | StarcoderdataPython |
9641799 | comanda = ""
while comanda != "quit":
comanda = input("> ").lower()
if comanda == "start":
print("Masina a pornit")
elif comanda == "stop":
print("Masina sa oprit")
elif comanda == "help":
print('''
Start - Masina porneste
Stop - Masina se opreste
quit - A iesi
''')
else:
print('Nu intaleg asta')
| StarcoderdataPython |
9723012 | # Modified by Augmented Startups & <NAME>
# October 2020
# Facial Recognition Attendence GUI
# Full Course - https://augmentedstartups.info/yolov4release
# *-
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication, QDialog
import resource
# from model import Model
from out_window import Ui_OutputDialog
class Ui_Dialog(QDialog):
def __init__(self):
super(Ui_Dialog, self).__init__()
loadUi("mainwindow.ui", self)
self.runButton.clicked.connect(self.runSlot)
self._new_window = None
self.Videocapture_ = None
def refreshAll(self):
"""
Set the text of lineEdit once it's valid
"""
self.Videocapture_ = "0"
@pyqtSlot()
def runSlot(self):
"""
Called when the user presses the Run button
"""
print("Clicked Run")
self.refreshAll()
print(self.Videocapture_)
ui.hide() # hide the main window
self.outputWindow_() # Create and open new output window
def outputWindow_(self):
"""
Created new window for vidual output of the video in GUI
"""
self._new_window = Ui_OutputDialog()
self._new_window.show()
self._new_window.startVideo(self.Videocapture_)
print("Video Played")
if __name__ == "__main__":
app = QApplication(sys.argv)
ui = Ui_Dialog()
ui.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1796964 | <filename>setup.py
"""
Copyright 2016 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'loadimpactcli'))
from version import __version__
setup(
name='loadimpact-cli',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
url='http://developers.loadimpact.com/',
packages=['loadimpactcli'],
py_modules=['loadimpactcli'],
license='LICENSE.txt',
description="The Load Impact CLI interfaces with Load Impact's cloud-based performance testing platform",
include_package_data=True,
data_files=[('', ['README.md'])],
install_requires=[
'setuptools>=18',
'click',
'loadimpact-v3',
'tzlocal',
'six',
'mock',
'enum34'
],
test_requires=['coverage'],
entry_points={
'console_scripts': [
'loadimpact=loadimpactcli.loadimpact_cli:run_cli',
],
},
test_suite='tests',
)
| StarcoderdataPython |
258517 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'range_pop.ui'
##
## Created by: Qt User Interface Compiler version 6.1.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
from . resources_rc import *
class Ui_range_pop(object):
def setupUi(self, range_pop):
if not range_pop.objectName():
range_pop.setObjectName(u"range_pop")
range_pop.setWindowModality(Qt.NonModal)
range_pop.resize(450, 130)
range_pop.setMinimumSize(QSize(450, 130))
range_pop.setSizeGripEnabled(False)
range_pop.setModal(False)
self.gridLayout = QGridLayout(range_pop)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.outerframe = QFrame(range_pop)
self.outerframe.setObjectName(u"outerframe")
self.outerframe.setMinimumSize(QSize(0, 0))
self.outerframe.setStyleSheet(u"#confirm_button, #discard_button, #reset_button{\n"
" font: 14pt \"Calibri\";\n"
" color: rgb(221, 221, 221);\n"
" border: none;\n"
" border-radius: 12px; \n"
" background-color: rgb(122, 115, 227);\n"
"}\n"
"#confirm_button:hover, #discard_button:hover, #reset_button:hover {\n"
" border: 2px solid rgb(193, 193, 255);\n"
"}\n"
"#confirm_button:pressed, #discard_button:pressed, #reset_button:pressed { \n"
" background-color: rgb(116, 174, 212);\n"
" border: 2px solid rgb(43, 50, 61);\n"
"}\n"
" #set_unit, #to_label, #plus_label, #set_value, #min_value, #max_value, #incre_value{\n"
" font: 16pt \"Calibri\";\n"
" color: rgb(221, 221, 221);\n"
" background-color: rgb(40, 44, 52);\n"
" border: none\n"
"}\n"
"#set_value:hover ,#min_value:hover , #max_value:hover , #incre_value:hover {\n"
" color: rgb(245, 245, 245);\n"
"}\n"
"#outerframe{\n"
" background-color: rgb(40, 44, 52);\n"
" border: 5px solid rgb(122, 115, 227);\n"
" border-radius: 30px;\n"
"}\n"
"#combobox{\n"
" font: 14pt \"Calibri\";\n"
" color: rgb(221, "
"221, 221);\n"
" background-color: rgb(27, 29, 35);\n"
" border-radius: 5px;\n"
" border: 3px solid rgb(27, 29, 35);\n"
" padding: 5px;\n"
"}\n"
"#combobox:hover{\n"
" border: 3px solid rgb(64, 71, 88);\n"
"}\n"
"#combobox::down-arrow {\n"
" image: url(:/icons/images/icons/cil-arrow-bottom.png);\n"
"}\n"
"#combobox::down-arrow:on {\n"
" image: url(:/icons/images/icons/cil-arrow-top.png);\n"
"}\n"
"#combobox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 25px;\n"
" border-left: 3px solid rgba(40, 44, 52, 150);\n"
" border-top-right-radius: 3px;\n"
" border-bottom-right-radius: 3px;\n"
"}\n"
"#combobox QAbstractItemView {\n"
" outline: none;\n"
" color: rgb(234, 194, 237);\n"
" background-color: rgb(27, 29, 35);\n"
" padding: 10px;\n"
" selection-color: rgb(234, 194, 237);\n"
" selection-background-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:0.8, y2:0.5, stop:0 rgba(122, 115, 227,160), stop:1 rgba(122, 115, 227, 20));\n"
"}")
self.outerframe.setFrameShape(QFrame.Box)
self.outerframe.setFrameShadow(QFrame.Raised)
self.horizontalLayoutWidget = QWidget(self.outerframe)
self.horizontalLayoutWidget.setObjectName(u"horizontalLayoutWidget")
self.horizontalLayoutWidget.setGeometry(QRect(0, 80, 451, 37))
self.horizontalLayout = QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.reset_button = QPushButton(self.horizontalLayoutWidget)
self.reset_button.setObjectName(u"reset_button")
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.reset_button.sizePolicy().hasHeightForWidth())
self.reset_button.setSizePolicy(sizePolicy)
self.reset_button.setMinimumSize(QSize(85, 32))
self.reset_button.setFocusPolicy(Qt.ClickFocus)
self.reset_button.setAutoDefault(False)
self.horizontalLayout.addWidget(self.reset_button)
self.discard_button = QPushButton(self.horizontalLayoutWidget)
self.discard_button.setObjectName(u"discard_button")
sizePolicy.setHeightForWidth(self.discard_button.sizePolicy().hasHeightForWidth())
self.discard_button.setSizePolicy(sizePolicy)
self.discard_button.setMinimumSize(QSize(85, 32))
self.discard_button.setFocusPolicy(Qt.ClickFocus)
self.discard_button.setAutoDefault(False)
self.horizontalLayout.addWidget(self.discard_button)
self.confirm_button = QPushButton(self.horizontalLayoutWidget)
self.confirm_button.setObjectName(u"confirm_button")
sizePolicy.setHeightForWidth(self.confirm_button.sizePolicy().hasHeightForWidth())
self.confirm_button.setSizePolicy(sizePolicy)
self.confirm_button.setMinimumSize(QSize(85, 32))
self.confirm_button.setFocusPolicy(Qt.ClickFocus)
self.confirm_button.setAutoDefault(False)
self.horizontalLayout.addWidget(self.confirm_button)
self.combobox = QComboBox(self.outerframe)
self.combobox.addItem("")
self.combobox.addItem("")
self.combobox.setObjectName(u"combobox")
self.combobox.setGeometry(QRect(30, 20, 100, 46))
sizePolicy.setHeightForWidth(self.combobox.sizePolicy().hasHeightForWidth())
self.combobox.setSizePolicy(sizePolicy)
self.combobox.setMinimumSize(QSize(100, 0))
self.combobox.setMaximumSize(QSize(16777215, 16777215))
self.stackedWidget = QStackedWidget(self.outerframe)
self.stackedWidget.setObjectName(u"stackedWidget")
self.stackedWidget.setGeometry(QRect(140, 10, 301, 66))
self.stackedWidget.setStyleSheet(u"background-color: transparent;")
self.set = QWidget()
self.set.setObjectName(u"set")
self.set_value = QDoubleSpinBox(self.set)
self.set_value.setObjectName(u"set_value")
self.set_value.setEnabled(True)
self.set_value.setGeometry(QRect(100, 15, 65, 30))
sizePolicy.setHeightForWidth(self.set_value.sizePolicy().hasHeightForWidth())
self.set_value.setSizePolicy(sizePolicy)
font = QFont()
font.setFamilies([u"Calibri"])
font.setPointSize(16)
font.setBold(False)
font.setItalic(False)
self.set_value.setFont(font)
self.set_value.setWrapping(False)
self.set_value.setFrame(False)
self.set_value.setAlignment(Qt.AlignCenter)
self.set_value.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.set_value.setDecimals(3)
self.set_value.setMinimum(-9.000000000000000)
self.set_value.setMaximum(9.000000000000000)
self.set_value.setSingleStep(0.100000000000000)
self.set_value.setValue(0.000000000000000)
self.set_unit = QLabel(self.set)
self.set_unit.setObjectName(u"set_unit")
self.set_unit.setGeometry(QRect(180, 15, 81, 31))
self.stackedWidget.addWidget(self.set)
self.range = QWidget()
self.range.setObjectName(u"range")
self.min_value = QDoubleSpinBox(self.range)
self.min_value.setObjectName(u"min_value")
self.min_value.setEnabled(True)
self.min_value.setGeometry(QRect(0, 15, 80, 30))
sizePolicy.setHeightForWidth(self.min_value.sizePolicy().hasHeightForWidth())
self.min_value.setSizePolicy(sizePolicy)
self.min_value.setFont(font)
self.min_value.setWrapping(False)
self.min_value.setFrame(False)
self.min_value.setAlignment(Qt.AlignCenter)
self.min_value.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.min_value.setDecimals(3)
self.min_value.setMinimum(-9.000000000000000)
self.min_value.setMaximum(9.000000000000000)
self.min_value.setSingleStep(0.100000000000000)
self.min_value.setValue(0.000000000000000)
self.max_value = QDoubleSpinBox(self.range)
self.max_value.setObjectName(u"max_value")
self.max_value.setEnabled(True)
self.max_value.setGeometry(QRect(100, 15, 80, 30))
sizePolicy.setHeightForWidth(self.max_value.sizePolicy().hasHeightForWidth())
self.max_value.setSizePolicy(sizePolicy)
self.max_value.setFont(font)
self.max_value.setWrapping(False)
self.max_value.setFrame(False)
self.max_value.setAlignment(Qt.AlignCenter)
self.max_value.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.max_value.setDecimals(3)
self.max_value.setMinimum(-9.000000000000000)
self.max_value.setMaximum(9.000000000000000)
self.max_value.setSingleStep(0.100000000000000)
self.max_value.setValue(0.000000000000000)
self.incre_value = QDoubleSpinBox(self.range)
self.incre_value.setObjectName(u"incre_value")
self.incre_value.setEnabled(True)
self.incre_value.setGeometry(QRect(210, 15, 80, 30))
sizePolicy.setHeightForWidth(self.incre_value.sizePolicy().hasHeightForWidth())
self.incre_value.setSizePolicy(sizePolicy)
self.incre_value.setFont(font)
self.incre_value.setWrapping(False)
self.incre_value.setFrame(False)
self.incre_value.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.incre_value.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.incre_value.setDecimals(3)
self.incre_value.setMinimum(-9.000000000000000)
self.incre_value.setMaximum(9.000000000000000)
self.incre_value.setSingleStep(0.100000000000000)
self.incre_value.setValue(0.000000000000000)
self.to_label = QLabel(self.range)
self.to_label.setObjectName(u"to_label")
self.to_label.setGeometry(QRect(80, 20, 20, 30))
self.to_label.setAlignment(Qt.AlignCenter)
self.plus_label = QLabel(self.range)
self.plus_label.setObjectName(u"plus_label")
self.plus_label.setGeometry(QRect(190, 15, 20, 30))
self.stackedWidget.addWidget(self.range)
self.gridLayout.addWidget(self.outerframe, 0, 0, 1, 1)
self.retranslateUi(range_pop)
self.stackedWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(range_pop)
# setupUi
def retranslateUi(self, range_pop):
range_pop.setWindowTitle(QCoreApplication.translate("range_pop", u"Range Input", None))
self.reset_button.setText(QCoreApplication.translate("range_pop", u"Reset", None))
self.discard_button.setText(QCoreApplication.translate("range_pop", u"Discard", None))
self.confirm_button.setText(QCoreApplication.translate("range_pop", u"Confirm", None))
self.combobox.setItemText(0, QCoreApplication.translate("range_pop", u"Set", None))
self.combobox.setItemText(1, QCoreApplication.translate("range_pop", u"Range", None))
self.set_unit.setText(QCoreApplication.translate("range_pop", u"\u00b0", None))
self.to_label.setText(QCoreApplication.translate("range_pop", u"~", None))
self.plus_label.setText(QCoreApplication.translate("range_pop", u"+", None))
# retranslateUi
| StarcoderdataPython |
3391642 | def f(a, b):
a += b
return a
# 数字没变化
x = 1
y = 2
print(f(x, y))
print(x, y)
# 列表参数发生变化
a = [1, 2]
b = [3, 4]
print(f(a, b))
print(a, b)
# 元组参数没变化
t = (10, 20)
u = (30, 40)
print(f(t, u))
print(t, u) | StarcoderdataPython |
4927861 | <filename>src/aiodynamo/utils.py
import base64
import datetime
import decimal
import logging
from collections import abc as collections_abc
from functools import reduce
from typing import Any, Callable, Dict, Mapping, Tuple, Union
from .types import SIMPLE_TYPES, AttributeType, DynamoItem, Item
logger = logging.getLogger("aiodynamo")
def py2dy(data: Union[Item, None]) -> Union[DynamoItem, None]:
if data is None:
return data
return serialize_dict(data)
def dy2py(data: DynamoItem, numeric_type: Callable[[str], Any]) -> Item:
return {key: deserialize(value, numeric_type) for key, value in data.items()}
def deserialize(value: Dict[str, Any], numeric_type: Callable[[str], Any]) -> Any:
if not value:
raise TypeError(
"Value must be a nonempty dictionary whose key " "is a valid dynamodb type."
)
tag, val = next(iter(value.items()))
try:
attr_type = AttributeType(tag)
except ValueError:
raise TypeError(f"Dynamodb type {tag} is not supported")
if attr_type in SIMPLE_TYPES:
return val
if attr_type is AttributeType.null:
return None
if attr_type is AttributeType.binary:
return base64.b64decode(val)
if attr_type is AttributeType.number:
return numeric_type(val)
if attr_type is AttributeType.string_set:
return set(val)
if attr_type is AttributeType.binary_set:
return {base64.b64decode(v) for v in val}
if attr_type is AttributeType.number_set:
return {numeric_type(v) for v in val}
if attr_type is AttributeType.list:
return [deserialize(v, numeric_type) for v in val]
if attr_type is AttributeType.map:
return {k: deserialize(v, numeric_type) for k, v in val.items()}
raise TypeError(f"Dynamodb type {attr_type} is not supported")
NUMERIC_TYPES = int, float, decimal.Decimal
def serialize(value: Any) -> Dict[str, Any]:
"""
Serialize a Python value to a Dynamo Value, removing empty strings.
"""
tag, value = low_level_serialize(value)
return {tag: value}
def low_level_serialize(value: Any) -> Tuple[str, Any]:
if value is None:
return "NULL", True
elif isinstance(value, bool):
return "BOOL", value
elif isinstance(value, NUMERIC_TYPES):
return "N", str(value)
elif isinstance(value, str):
return "S", value
elif isinstance(value, bytes):
return "B", base64.b64encode(value).decode("ascii")
elif isinstance(value, collections_abc.Set):
numeric_items, str_items, byte_items, total = reduce(
lambda acc, item: (
acc[0] + isinstance(item, NUMERIC_TYPES),
acc[1] + isinstance(item, str),
acc[2] + isinstance(item, bytes),
acc[3] + 1,
),
value,
(0, 0, 0, 0),
)
if numeric_items == total:
return "NS", [str(item) for item in value]
elif str_items == total:
return "SS", [item for item in value]
elif byte_items == total:
return (
"BS",
[base64.b64encode(item).decode("ascii") for item in value],
)
else:
raise TypeError(
f"Sets which are not entirely numeric, strings or bytes are not supported. value: {value!r}"
)
elif isinstance(value, collections_abc.Mapping):
return "M", serialize_dict(value)
elif isinstance(value, collections_abc.Sequence):
return "L", [item for item in map(serialize, value)]
else:
raise TypeError(f"Unsupported type {type(value)}: {value!r}")
def serialize_dict(value: Mapping[str, Any]) -> Dict[str, Dict[str, Any]]:
return {key: serialize(value) for key, value in value.items()}
def parse_amazon_timestamp(timestamp: str) -> datetime.datetime:
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=datetime.timezone.utc
)
| StarcoderdataPython |
12833323 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import (ScenarioTest, JMESPathCheck, ResourceGroupPreparer, StorageAccountPreparer, api_version_constraint)
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from azure.cli.core.profiles import ResourceType
class StorageLegalHold(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer()
def test_legal_hold(self, resource_group):
storage_account = self.create_random_name('clistorage', 20)
self.cmd('storage account create -g {} -n {} --kind StorageV2'.format(
resource_group, storage_account))
container_name = 'container1'
self.cmd('storage container create --account-name {} -n {} --metadata k1=v1 k2=v2'.format(storage_account, container_name))
self.cmd('storage container legal-hold show --account-name {} -c {} -g {}'.format(
storage_account, container_name, resource_group), checks=[
JMESPathCheck("tags", [])])
result = self.cmd('storage container legal-hold set --account-name {} -c {} -g {} --tags tag1 tag2'.format(
storage_account, container_name, resource_group)).get_output_in_json()
self.assertIn("tag1", result.get("tags"))
self.assertIn("tag2", result.get("tags"))
self.cmd('storage container legal-hold clear --account-name {} -c {} -g {} --tags tag1 tag2'.format(
storage_account, container_name, resource_group), checks=[
JMESPathCheck("tags", [])])
@AllowLargeResponse()
@ResourceGroupPreparer()
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location='eastus2euap')
@api_version_constraint(resource_type=ResourceType.MGMT_STORAGE, min_api='2021-06-01')
def test_legal_hold_with_allow_protected_append_writes_all(self, resource_group, storage_account):
container_name = 'container1'
self.cmd('storage container create --account-name {} -n {} --metadata k1=v1 k2=v2'.format(storage_account,
container_name))
self.cmd('storage container legal-hold show --account-name {} -c {} -g {}'.format(
storage_account, container_name, resource_group), checks=[
JMESPathCheck("tags", []),
JMESPathCheck("allowProtectedAppendWritesAll", None)
])
self.cmd('storage container legal-hold set --account-name {} -c {} -g {} --tags tag1 tag2 --w-all'.format(
storage_account, container_name, resource_group), checks=[
JMESPathCheck("tags", ['tag1', 'tag2']),
JMESPathCheck("allowProtectedAppendWritesAll", True)
])
self.cmd('storage container legal-hold clear --account-name {} -c {} -g {} --tags tag1 tag2'.format(
storage_account, container_name, resource_group), checks=[
JMESPathCheck("tags", []),
JMESPathCheck("allowProtectedAppendWritesAll", None)
])
self.cmd('storage container legal-hold set --account-name {} -c {} -g {} --tags tag3 tag4 --w-all false'.format(
storage_account, container_name, resource_group), checks=[
JMESPathCheck("tags", ['tag3', 'tag4']),
JMESPathCheck("allowProtectedAppendWritesAll", False)
])
| StarcoderdataPython |
4948681 | <reponame>sara-nl/data-exchange
import sys
from collections import Counter
file = open(sys.argv[1], "rt")
data = file.read()
words = data.split()
most_common = Counter(words).most_common(3)
print('Three most common words are :', most_common)
print('Number of words in text file :', len(words))
| StarcoderdataPython |
283405 | <reponame>Beit-Hatfutsot/mojp-dbs-pipelines
from datapackage_pipelines_mojp.common.processors.post import Processor
from .clearmash.test_convert import get_clearmash_convert_resource_data
from .test_sync import assert_sync_processor
from .common import (given_empty_elasticsearch_instance, get_mock_settings, assert_dict, es_doc,
assert_processor)
def assert_post_processor(input_data, parameters):
processor = Processor(parameters=dict({"resource": "dbs-docs-sync-log"}, **parameters),
datapackage={"resources": [{"name": "dbs-docs-sync-log"}]},
resources=[input_data],
settings=get_mock_settings())
return assert_processor(processor)
def test_delete():
es = given_empty_elasticsearch_instance()
dbs_docs = get_clearmash_convert_resource_data()
dbs_docs = [dbs_docs[0], dbs_docs[1]]
assert_dict(dbs_docs[0], {"source": "clearmash", "collection": "familyNames", "id": "115306"})
assert_dict(dbs_docs[1], {"source": "clearmash", "collection": "places", "id": "115325"})
dbs_docs.append(dict(dbs_docs[0], source="foobar", id="115308"))
dbs_docs.append(dict(dbs_docs[1], source="foobar", id="115309"))
sync_log = assert_sync_processor(dbs_docs)
assert_post_processor(sync_log, {"all_items_query": {"source": "clearmash", "collection": "familyNames"}})
# after sync and post, all items are in ES
assert_dict(es_doc(es, "clearmash", "115306"), {"source": "clearmash", "collection": "familyNames"})
assert_dict(es_doc(es, "clearmash", "115325"), {"source": "clearmash", "collection": "places"})
assert_dict(es_doc(es, "foobar", "115308"), {"source": "foobar", "collection": "familyNames"})
assert_dict(es_doc(es, "foobar", "115309"), {"source": "foobar", "collection": "places"})
# run sync again, but this time without the clearmash family name
dbs_docs = get_clearmash_convert_resource_data()
sync_log = assert_sync_processor([dbs_docs[1]])
# we are running over clearmash / familyNames - so only the family name will be deleted
assert_post_processor(sync_log, {"all_items_query": {"source": "clearmash", "collection": "familyNames"}})
# the family name was deleted
assert not es_doc(es, "clearmash", "115306")
# other items are not
assert es_doc(es, "clearmash", "115325")
assert es_doc(es, "foobar", "115308")
assert es_doc(es, "foobar", "115309")
| StarcoderdataPython |
8136072 | from tfidf import *
import re
import tempfile
from collections import Counter
def test_get_text():
tmpdir = tempfile.gettempdir()
xml = \
"""<?xml version="1.0" encoding="iso-8859-1" ?>
<newsitem itemid="99" id="root" date="1996-10-21" xml:lang="en">
<title>Cats Do Hip Hop</title>
<dateline>USA 1996-10-21</dateline>
<text>
<p>Check this out.</p>
<p>Hilarious.</p>
</text>
<link>http://www.huffingtonpost.co.uk/2014/06/06/kittens-dance-turn-it-down-for-what_n_5458093.html</link>
<metadata>
<codes class="bip:countries:1.0">
<code code="USA">
<editdetail attribution="Cat Reuters BIP Coding Group" action="confirmed" date="1996-10-21"/>
</code>
</codes>
<dc element="dc.date.created" value="1996-10-21"/>
<dc element="dc.source" value="Cat Reuters"/>
</metadata>
</newsitem>"""
fullpath = tmpdir+"/cat.xml"
xmlfile = open(fullpath, "w")
xmlfile.write(xml)
xmlfile.close()
expecting = ['Cats', 'Do', 'Hip', 'Hop', 'Check', 'this', 'out.', 'Hilarious.']
result = get_text(fullpath)
result.strip()
result = re.sub('[\n ]+', ' ', result)
result = result.strip()
result = result.split(" ")
assert expecting == result
def test_empty_word_list():
text = " "
expecting = []
assert expecting == words(text)
def test_simple_word_list():
text = "a big big cat dog big cat, the, 3.4 a."
expecting = ['big', 'big', 'cat', 'dog', 'big', 'cat', 'the']
assert expecting == words(text)
def test_words():
text = """
BELGIUM: EU sets ewe advance. The European set the second ewe premium
advance for farmers on Friday at 5.462 Ecus per ewe, an European Union
(EU) official said on Monday. "The first ewe premium -- paid in June --
was 6.902 Ecus per ewe and the estimated figure for the year now stands
at 18.206 Ecus per ewe," the official said. The premium is currently
based on an average price of 355 Ecus per 100 kilo for lamb meat which
is 21 higher than last year when the final premium figure paid
in February was 24.821 Ecus per ewe. -- <NAME>, Brussels Newsroom
+32 2 287 6800
"""
expecting = ['belgium', 'sets', 'ewe', 'advance', 'the', 'european',
'set', 'the', 'second', 'ewe', 'premium', 'advance',
'for', 'farmers', 'friday', 'ecus', 'per', 'ewe',
'european', 'union', 'official', 'said', 'monday',
'the', 'first', 'ewe', 'premium', 'paid', 'june',
'was', 'ecus', 'per', 'ewe', 'and', 'the',
'estimated', 'figure', 'for', 'the', 'year',
'now', 'stands', 'ecus', 'per', 'ewe', 'the',
'official', 'said', 'the', 'premium', 'currently',
'based', 'average', 'price', 'ecus', 'per', 'kilo',
'for', 'lamb', 'meat', 'which', 'higher',
'than', 'last', 'year', 'when', 'the', 'final',
'premium', 'figure', 'paid', 'february', 'was',
'ecus', 'per', 'ewe', 'john', 'white', 'brussels',
'newsroom']
result = words(text)
assert expecting == result
def test_simple_index():
tmpdir = tempfile.gettempdir()
xml = """
<newsitem>
<title>Premium price set</title>
<text>
<p>the official said and the premium was currently based on an average price.</p>
</text>
</newsitem>
"""
fullpath1 = tmpdir+"/1.xml"
save(xml, fullpath1)
xml = """
<newsitem>
<title>German consumer confidence rises </title>
<text>
<p>he said consumer confidence index was unchanged in September and rose one percent on.</p>
</text>
</newsitem>
"""
fullpath2 = tmpdir+"/2.xml"
save(xml, fullpath2)
(tf_map, df) = create_indexes([fullpath1, fullpath2])
tf_map = simplify_tf_map(tf_map)
map1 = tf_map[fullpath1]
expected = Counter({'premium': '0.1429', 'price': '0.1429', 'the': '0.1429',
'and': '0.0714', 'set': '0.0714', 'official': '0.0714',
'said': '0.0714',
'currently': '0.0714', 'based': '0.0714', 'was': '0.0714',
'average': '0.0714'})
assert map1 == expected
expected = Counter({'and': 2, 'said': 2, 'was': 2, 'index': 1, 'confidence': 1, 'set': 1, 'premium': 1, 'german': 1, 'rose': 1, 'price': 1, 'official': 1, 'percent': 1, 'consumer': 1, 'unchanged': 1, 'one': 1, 'september': 1, 'currently': 1, 'based': 1, 'the': 1, 'rises': 1, 'average': 1})
assert df == expected
map2 = tf_map[fullpath2]
expected = Counter({'confidence': '0.1333', 'consumer': '0.1333', 'and': '0.0667',
'index': '0.0667', 'said': '0.0667', 'german': '0.0667',
'rose': '0.0667',
'unchanged': '0.0667', 'percent': '0.0667', 'one': '0.0667',
'september': '0.0667', 'rises': '0.0667', 'was': '0.0667'})
assert map2 == expected
################# SUPPORT CODE #################
def simplify_tf_map(tf_map):
simpler = {}
for f in tf_map:
simpler[f] = simplify_tf(tf_map[f])
return simpler
def simplify_tf(tf):
simpler = Counter()
for t in tf:
simpler[t] = "%1.4f" % tf[t]
return simpler
def save(xml, fullpath):
xmlfile = open(fullpath, "w")
xmlfile.write(xml)
xmlfile.close()
| StarcoderdataPython |
1716828 | <reponame>dianapei/SC-projects
"""
SC101 Baby Names Project
Adapted from <NAME>'s Baby Names assignment by
<NAME>.
This program will show the rank line chart of the names from 1900 to 2010 searched by user
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
space = (width - GRAPH_MARGIN_SIZE * 2)//len(YEARS)
x_coordinate = GRAPH_MARGIN_SIZE + year_index * space
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
#################################
# draw the upper horizontal line
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, \
width=LINE_WIDTH)
# draw the lower horizontal line
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE,\
CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# draw the vertical lines and place the year next to each line
for year in YEARS:
x = get_x_coordinate(CANVAS_WIDTH, YEARS.index(year))
canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)
canvas.create_text(x, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, text=year, anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
#################################
rank_space = (CANVAS_HEIGHT-GRAPH_MARGIN_SIZE*2)/MAX_RANK
x = GRAPH_MARGIN_SIZE
y = 0
text = ''
# check which color should the line and text be
c_index = 0
for name in lookup_names:
# draw line if name in name_data
if name in name_data:
# dictionary to store the rank data x, y to draw line and text to add text next to data point
draw_data = {}
for year in YEARS:
# rank of the year out of 1000: y equals the lowest point and text is name with rank replaced by *
if str(year) not in name_data[name]:
y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
text = str(name + '*')
# rank within 1000: y changes with the ranking and text goes with the rank number
else:
y = int(GRAPH_MARGIN_SIZE + int(name_data[name][str(year)]) * rank_space)
text = name + ' ' + name_data[name][str(year)]
# get x of each year which align with the vertical lines
x = get_x_coordinate(CANVAS_WIDTH, YEARS.index(year))
# add list of year to draw_data
draw_data[year] = [x, y, text]
# add lines and text to the canvas
for year in draw_data:
# other than 1900, each year will draw a line to link its data point with previous year
if year != YEARS[0]:
# get index of current year from YEAR
year_index = YEARS.index(year)
# use year_index to know previous year and get the data point within draw_data
canvas.create_line(draw_data[YEARS[int(year_index)-1]][0], draw_data[YEARS[int(year_index)-1]][1],\
draw_data[year][0], draw_data[year][1], width=LINE_WIDTH, fill=COLORS[c_index])
# add text next to every data point
canvas.create_text(draw_data[year][0] + TEXT_DX, draw_data[year][1], text=draw_data[year][2],\
anchor=tkinter.SW, fill=COLORS[c_index])
# change color for next line and if already use all colors, start all over
c_index += 1
if c_index + 1 > len(COLORS):
c_index = 0
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
| StarcoderdataPython |
362969 | <reponame>runjerry/alf
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple parameterless action encoder."""
import tensorflow as tf
import tf_agents.specs.tensor_spec as tensor_spec
class SimpleActionEncoder(object):
"""A simple encoder for action.
Only supports one action (discrete or continuous).
If encode discrete action to one hot representation and use the original
continous actions. And output the concat of all of them
"""
def __init__(self, action_spec):
"""Create SimpleActionEncoder.
Args:
action_spec (nested BoundedTensorSpec): spec for actions
"""
def check_supported_spec(spec):
if tensor_spec.is_discrete(spec):
assert len(spec.shape) == 0 or \
(len(spec.shape) == 1 and spec.shape[0] == 1)
else:
assert len(spec.shape) == 1
tf.nest.map_structure(check_supported_spec, action_spec)
self._action_spec = action_spec
def __call__(self, inputs):
"""Generate encoded actions.
Args:
inputs (nested Tensor): action tensors.
Returns:
nested Tensor with the same structure as inputs.
"""
tf.nest.assert_same_structure(inputs, self._action_spec)
actions = inputs
def encode_one_action(action, spec):
if tensor_spec.is_discrete(spec):
if len(spec.shape) == 1:
action = tf.reshape(action, action.shape[:-1])
num_actions = spec.maximum - spec.minimum + 1
return tf.one_hot(indices=action, depth=num_actions)
else:
return action
actions = tf.nest.map_structure(encode_one_action, actions,
self._action_spec)
return tf.concat(tf.nest.flatten(actions), axis=-1)
| StarcoderdataPython |
3418195 | <gh_stars>1-10
"""validate.py.
Validates a WE1S manifest schema. Arguments can be file paths, urls, or dicts.
Usage:
python validate.py PATH_TO_SCHEMA PATH_TO_DATA
"""
# Python imports
import argparse
import json
import jsonschema
import requests
from jsonschema import Draft7Validator, FormatChecker, validate
# Validator class
class Validator():
"""Validate a manifest."""
def __init__(self, schema, data):
"""Instantiate the object."""
self.schema = self._load_json(schema)
self.data = self._load_json(data)
def _load_json(self, path):
"""Load json from dict, url, or filepath."""
if isinstance(path, dict):
return path
elif path.startswith('http'):
try:
return requests.get(path).json()
except:
print('Cannot find file at the designated location.')
return None
else:
try:
return json.loads(open(path).read())
except IOError:
print('Cannot find file at the designated location.')
return None
def validate(self):
"""Validate the data."""
if self.data:
try:
validate(self.data, self.schema, format_checker=FormatChecker())
print('Document is valid.')
except jsonschema.exceptions.ValidationError as err:
print('Document is not valid.')
print('Error(s):\n')
validator = Draft7Validator(self.schema)
for error in sorted(validator.iter_errors(err), key=str):
print(f'- Error: {error.message}.')
else:
print('No data was loaded into the validator. Please check your filepath.')
def main(args=None):
"""Perform the validation."""
# Read the CLI args
if args:
schema = args.schema
data = args.data
# Validate
validator = Validator(schema, data)
validator.validate()
if __name__ == '__main__':
# Parse the CLI
parser = argparse.ArgumentParser(description='Validate a WE1S manifest.')
parser.add_argument('schema',
help='Path to the manifest JSON Schema file.')
parser.add_argument('data',
help='Path to the JSON data file to be validated.')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3595866 | import numpy as np
from robosuite_extra.env_base import make
from robosuite_extra.wrappers import EEFXVelocityControl, GymWrapper, FlattenWrapper
from sim2real_policies.final_policy_testing.network_loading import load, load_model
import os
from sim2real_policies.final_policy_testing.epi_utils import EPIpolicy_rollout
import copy
REACH_GOALS= [ np.array([0.,0.]),np.array([-4.465e-2,5.85e-2]),np.array([8.37e-2,-5.78e-2])]
import pickle
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def grab_data(info, world_pose_in_base):
# Grab the data
eef_pos_in_world = info['eef_pos_in_world']
eef_vel_in_world = info['eef_vel_in_world']
goal_pos_in_world = info['goal_pos_in_world']
eef_pos_in_world = np.concatenate([eef_pos_in_world, [1.0]])
goal_pos_in_world = np.concatenate([goal_pos_in_world, [1.0]])
eef_pos_in_base = world_pose_in_base.dot(eef_pos_in_world)
eef_pos_in_base = eef_pos_in_base / eef_pos_in_base[3]
eef_vel_in_base = world_pose_in_base[:3, :3].dot(eef_vel_in_world)
goal_pos_in_base = world_pose_in_base.dot(goal_pos_in_world)
goal_pos_in_base = goal_pos_in_base / goal_pos_in_base[3]
return eef_pos_in_base, eef_vel_in_base, goal_pos_in_base
def find_zvalues(save_path):
render = False
# Parameters
horizon = 50
total_repeats =50
env_name = 'SawyerReach'
### Prepare Environment #####
env = make(
'SawyerReach',
gripper_type="PushingGripper",
parameters_to_randomise=[],
randomise_initial_conditions=True,
table_full_size=(0.8, 1.6, 0.719),
use_camera_obs=False,
use_object_obs=True,
reward_shaping=True,
use_indicator_object=False,
has_renderer=render,
has_offscreen_renderer=False,
render_collision_mesh=False,
render_visual_mesh=True,
control_freq=10,
horizon=100,
ignore_done=False,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
pid=True,
success_radius=0.01
)
env = FlattenWrapper(GymWrapper(EEFXVelocityControl(env, dof=3,max_action = 0.1),),keys='task-state',add_info= True)
env.reset()
z_values = []
for param_iteration in range(3):
state_dim = 6
action_dim = 3
z_values.append([])
### setting up the env with different but fixed parameters###
if(param_iteration == 1):
parameter_ranges = env.parameter_sampling_ranges
max_parameters = dict([(k,v[-1]*env.factors_for_param_randomisation[k]) for k,v in parameter_ranges.items()])
env.set_dynamics_parameters(max_parameters)
elif(param_iteration == 2):
parameter_ranges = env.parameter_sampling_ranges
min_parameters = dict([(k,v[0]*env.factors_for_param_randomisation[k]) for k, v in parameter_ranges.items()])
env.set_dynamics_parameters(min_parameters)
env.reset()
if (render):
env.viewer.set_camera(camera_id=0)
env.render()
###############
################# SETTING UP THE POLICY #################
method = 'EPI'
alg_name = 'epi_td3'
embed_dim = 10
traj_l = 10
NO_RESET = True
embed_input_dim = traj_l*(state_dim+action_dim)
ori_state_dim = state_dim
state_dim += embed_dim
# choose which randomisation is applied
number_random_params = 14
folder_path = '../../../../sawyer/src/sim2real_dynamics_sawyer/assets/rl/'+method +'/' + alg_name + '/model/'
path = folder_path + env_name + str(
number_random_params) + '_' + alg_name
embed_model = load_model(model_name='embedding', path=path, input_dim = embed_input_dim, output_dim = embed_dim )
embed_model.cuda()
epi_policy_path = folder_path + env_name + str(number_random_params) + '_' + 'epi_ppo_epi_policy'
epi_policy = load(path=epi_policy_path, alg='ppo', state_dim=ori_state_dim, action_dim=action_dim )
policy = load(path=path, alg=alg_name, state_dim=state_dim,
action_dim=action_dim)
#########################################################
for repeat in range(total_repeats):
#Reset environment
obs = env.reset()
i=0
mujoco_start_time = env.sim.data.time
if NO_RESET:
i = traj_l - 1
traj, [last_obs, last_state] = EPIpolicy_rollout(env, epi_policy, obs,
mujoco_start_time=mujoco_start_time,
logger=None, data_grabber=None,
max_steps=traj_l,
params=None) # only one traj; pass in params to ensure it's not changed
state_action_in_traj = np.array(traj)[:, :-1] # remove the rewards
embedding = embed_model(state_action_in_traj.reshape(-1))
embedding = embedding.detach().cpu().numpy()
obs = last_obs # last observation
env.set_state(last_state) # last underlying state
else:
traj, [last_obs, last_state] = EPIpolicy_rollout(env, epi_policy, obs,
mujoco_start_time=mujoco_start_time,
logger=None, data_grabber=None,
max_steps=traj_l,
params=None) # only one traj; pass in params to ensure it's not changed
state_action_in_traj = np.array(traj)[:, :-1] # remove the rewards
embedding = embed_model(state_action_in_traj.reshape(-1))
embedding = embedding.detach().cpu().numpy()
params = env.get_dynamics_parameters()
env.randomisation_off()
env.set_dynamics_parameters(params) # same as the rollout env
obs = env.reset()
env.randomisation_on()
z_values[param_iteration].append(embedding)
# z is embedding of initial trajectory for each episode, so no need to run task-policy rollout below
while (True):
############# CHOOSING THE ACTION ##############
obs = np.concatenate((obs, embedding))
action = policy.get_action(obs)
################################################
next_obs, reward, done, info = env.step(action)
obs = next_obs
if(render):
env.render()
i += 1
if (i >= horizon):
break
z_values = np.array(z_values)
env.close()
if(not os.path.exists(save_path)):
os.mkdir(save_path)
pickle.dump(z_values, open(os.path.join(save_path, 'z_values_array.pckl'), 'wb'))
return z_values
def plot_tsne(z_values,save_path):
tsne_calculator = TSNE(n_iter = 3000, n_iter_without_progress= 600, perplexity =5.)
fig = plt.figure()
colors = plt.get_cmap('rainbow')(np.linspace(0.2, 1, 3))
for param_set_idx in range(z_values.shape[0]):
zs = copy.deepcopy(z_values[param_set_idx,:,:])
zs_2d = tsne_calculator.fit_transform(zs)
zs_2d_x = [zs_2d[i][0] for i in range(zs_2d.shape[0])]
zs_2d_y = [zs_2d[i][1] for i in range(zs_2d.shape[0])]
plt.scatter(zs_2d_x, zs_2d_y, c = [colors[param_set_idx]]*len(zs_2d_y), label='params set {}'.format(param_set_idx+1))
plt.legend()
plt.axis('off')
plt.savefig(save_path+'tsne_plot.pdf')
plt.savefig(save_path+'tsne_plot.svg')
plt.show()
plt.close()
if __name__ == "__main__":
save_path = '../../../../data/reaching/epi_tsne_results/'
save_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), save_path)
#z_values = find_zvalues(save_path)
#
z_values = pickle.load(open(os.path.join(save_path, 'z_values_array.pckl'), 'rb'))
plot_tsne(copy.deepcopy(z_values),save_path)
| StarcoderdataPython |
49027 | <filename>Provinces and Gold/prov.py
"""https://open.kattis.com/problems/provincesandgold"""
from collections import OrderedDict
vic, tres = OrderedDict(), OrderedDict()
vic = {"Province": 8, "Duchy": 5, "Estate": 2}
tres = {"Gold": 6, "Silver": 3, "Copper": 0}
inp = list(map(int, input().split()))
money = inp[0] * 3 + inp[1] * 2 + inp[2]
options = []
for coin, cost in tres.items():
if money >= cost:
options.append(coin)
break
for prov, cost in vic.items():
if money >= cost:
options.insert(0, prov)
break
if len(options) == 2:
print(options[0], "or", options[1])
else:
print(options[0])
| StarcoderdataPython |
6695062 | <filename>newsCrawler/crawler/spiders/crawler.py<gh_stars>0
# This file defines Crawler for News Website.
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from crawler.items import NewsItem
import json, os, logging, re
class NewsSpider(CrawlSpider):
'''
DESCRIPTION:
------------
* This class inherits the 'CrawlSpider' class of Scrapy.
* It defines crawler for BBC News Website.
'''
name = 'crawler' # Crawler Name
rules = [] # Rule to be used for scraping the entire website.
def generateCrawlingRule(self):
'''
DESCRIPTION:
-----------
This function generates the crawling rules using file
specified 'rules.json' by end user.
'''
for rule in self.ruleFile["rules"]:
allow_r = ()
if 'allow' in rule.keys():
allow_r = [a for a in rule['allow']]
deny_r = ()
if 'deny' in rule.keys():
deny_r = [d for d in rule['deny']]
restrict_xpaths_r = ()
if 'restrict_xpaths' in rule.keys():
restrict_xpaths_r = [rx for rx in rule['restrict_xpaths']]
NewsSpider.rules.append(Rule(
LinkExtractor(
allow=allow_r,
deny=deny_r,
restrict_xpaths=restrict_xpaths_r,
),
follow=rule['follow'],
callback=rule['callback']
))
def readVisitedURLS(self):
'''
DESCRIPTION:
-----------
* This function reads the URLs already scraped, from file
'Output/visited_urls.txt'. And assign list of scraped URLS
to 'self.visitedUrls'.
* If no such file exists then, self.visitedUrls is set to
empty list.
* File 'Output/visited_urls.txt', is opened and file handler
is assigned to 'self.urlFile', for updating the visiting urls
as the new urls are scraped.
* 'Output/visited_urls.txt', keeps track of news URLS already
scraped, in order to avoid scraping of same URL multiple times.
'''
visitedUrlFile = 'Output/visited_urls.txt'
try:
fileUrls = open(visitedUrlFile, 'r')
except IOError:
self.visitedUrls = []
else:
self.visitedUrls = [url.strip() for url in fileUrls.readlines()]
fileUrls.close()
finally:
if not os.path.exists('Output/'):
os.makedirs('Output/')
self.urlFile = open(visitedUrlFile, 'a')
def __init__(self,filename='',*args,**kwargs):
'''
DESCRIPTION:
------------
Constructor of News Spider.
'''
# File which defines rules for extracting desired
# data from News website.
self.ruleFile = json.load(open(filename))
logging.info("RuleFile is "+ filename)
self.allowed_domains = self.ruleFile['allowed_domains']
self.start_urls = self.ruleFile['start_urls']
self.generateCrawlingRule()
self.readVisitedURLS()
super(NewsSpider, self).__init__()
def getTitle(self,hxs):
'''
DESCRIPTION:
-----------
This function fetches the title of news article being crawled.
PARAMETERS:
-----------
1. hxs: Web page selector of news article being crawled.
RETURNS:
--------
title of news article being crawled or an empty string if no
title is fetched from web page.
'''
for xpath in self.ruleFile['paths']['title'] :
logging.info("xpath for title is " + xpath)
title= hxs.xpath(xpath).extract()
if title :
return re.sub("- BBC Sport$","",title[0])
if not title:
return ''
def getAuthor(self,hxs):
'''
DESCRIPTION:
-----------
This function fetches the title of news article being crawled.
PARAMETERS:
-----------
1. hxs: Web page selector of news article being crawled.
RETURNS:
--------
title of news article being crawled or an empty string if no
title is fetched from web page.
'''
for xpath in self.ruleFile['paths']['author'] :
#logging.info("xpath for title is " + xpath)
author= hxs.xpath(xpath).extract()
if author :
return author
if not author:
return ''
def parseItems(self, response):
'''
DESCRIPTION:
-----------
* This function is called for parsing every URL encountered,
starting from 'start_urls'.
* In this function required information is fetched from
the web page and stored in NewsItem object.
PARAMETERS:
----------
1. response object of Web page.
'''
if str(response.url) not in self.visitedUrls:
try:
logging.info('Parsing URL: ' + str(response.url))
newsItem = NewsItem()
hxs = scrapy.Selector(response) #Webpage selector
#Fetch URL, title and author
newsItem['newsUrl'] = response.url
newsItem['newsHeadline'] = self.getTitle(hxs)
newsItem['author'] = self.getAuthor(hxs)
# Write visited url tp self.urlFile
self.urlFile.write(str(response.url) + '\n')
yield newsItem
except Exception as e:
logging.info("Exception while parsing URL Response : "+ str(e))
def close(spider, reason):
spider.urlFile.close()
| StarcoderdataPython |
3452759 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 08:38:30 2019
@author: dordoloy
"""
import os
import pika
import config
import time
count = 0
def callback(ch, method, properties, body):
##
#Function that print the received message and count the number of message received
#Args:
# @param ch
# @param method
# @param properties
# @param body
#Returns nothing
global count
count += 1
print("[{0}] Received %r".format(count) % body)
#print('[x] message Processed, acknowledging (to delete the message from queue)')
ch.basic_ack(method.delivery_tag)
def sleep_callback(ch, method, properties, body):
##
#Function that print the received message and count the number of message received
#Args:
# @param ch
# @param method
# @param properties
# @param body
#Returns nothing
global count
count += 1
time.sleep(0.5)
print("[{0}] Received %r".format(count) % body)
#print('[x] message Processed, acknowledging (to delete the message from queue)')
ch.basic_ack(method.delivery_tag)
def simple_queue_read(concurrecy, sleep = False):
##
#Function that publish a message
#Args:
# @param concurrency
# @param sleep
#Returns nothing
amqp_url=config.amqp_url
# Parse CLODUAMQP_URL (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL',amqp_url)
params = pika.URLParameters(url)
params.socket_timeout = 5
connection = pika.BlockingConnection(params) # Connect to CloudAMQP
channel = connection.channel()
channel.queue_declare(queue='presentation')
if sleep:
channel.basic_consume(queue='presentation',
on_message_callback=sleep_callback,
auto_ack=False)
else:
channel.basic_consume(queue='presentation',
on_message_callback=callback,
auto_ack=False)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
| StarcoderdataPython |
5160114 | from plugins.folio.login import folio_login
def test_folio_login():
assert folio_login
| StarcoderdataPython |
1673718 | import copy
import logging
import random
logging.getLogger('transformers').setLevel(level=logging.WARNING)
from nltk.tokenize import sent_tokenize
import torch
import tqdm
from transformers import BertForMaskedLM, BertTokenizer, AdamW, get_linear_schedule_with_warmup
from utils import (
BertInput,
Defaults,
batch_data,
mask_tokens_evenly,
mask_tokens_randomly,
get_input_tensors,
determine_correctness,
measure_relative,
measure_improve,
clean_text,
truncate_list_of_lists,
truncate_sentence_and_summary,
NOT_MASKED,
TOKEN_TYPE_A,
LABEL_IGNORE,
BERT_MAX_TOKENS,
P_TOKEN_REPLACE,
P_TOKEN_ORIGINAL,
TOKEN_REPLACE_RANGE,
)
class Blanc:
"""An abstract superclass containing shared functionality between BlancHelp and BlancTune.
measure ('relative' or 'improve') is a choice of how the success of inference is measured.
Add '-counts' to return also counts: 'relative-counts' or 'improve-counts'.
"""
def __init__(
self,
gap=Defaults.gap,
base=Defaults.base,
model_name=Defaults.model_name,
measure=Defaults.measure,
min_token_length_normal=Defaults.min_token_length_normal,
min_token_length_lead=Defaults.min_token_length_lead,
min_token_length_followup=Defaults.min_token_length_followup,
device=Defaults.device,
inference_batch_size=Defaults.inference_batch_size,
inference_mask_evenly=Defaults.inference_mask_evenly,
len_sent_allow_cut=Defaults.len_sent_allow_cut,
):
"""This class should not be instantiated directly: instead use BlancHelp or BlancTune"""
self.gap = gap
self.base = base
self.model_name = model_name
self.measure = measure
self.min_token_length_normal = min_token_length_normal
self.min_token_length_lead = min_token_length_lead
self.min_token_length_followup = min_token_length_followup
self.device = device
self.inference_batch_size = inference_batch_size
self.inference_mask_evenly = inference_mask_evenly
self.len_sent_allow_cut = len_sent_allow_cut
self.model_tokenizer = BertTokenizer.from_pretrained(model_name)
def eval_once(self, doc, summary):
"""Calculate the BLANC score for a single doc with a single summary.
Args:
doc (str): The input document
summary (str): The input summary for the input document
Returns:
score (float): The BLANC score for the input
"""
(doc_score, total_unks) = self.eval_summaries_for_docs([doc], [[summary]])
(score,) = doc_score
return score, total_unks
def eval_pairs(self, docs, summaries):
"""Calculate the BLANC score for multiple docs, each with a single summary
Args:
docs (List[str]): A list of input documents
summaries (List[str]): The input summary for each input document
Returns:
scores (List[float]): The BLANC scores for the inputs
"""
doc_summaries = [[summary] for summary in summaries]
full_scores, total_unks = self.eval_summaries_for_docs(docs, doc_summaries)
scores = [score for score, in full_scores]
return scores, total_unks
def eval_summaries_for_docs(self, docs, doc_summaries):
"""Calculate the BLANC score for multiple docs, each with multiple summaries
Args:
docs (List[str]): A list of input documents
doc_summaries (List[List[str]]): A list of summaries for every input document
Returns:
scores (List[List[float]]): A list of blanc scores corresponding to each summary for
each document
"""
raise NotImplementedError()
def get_inputs_for_sentence(self, sent_tokens, summary_tokens):
"""Used by subclasses to specify inference inputs corresponding to a sentence
Args:
sent_tokens (List[str]): list of tokens corresponding to sentence
summary_tokens (List[str]): list of tokens corresponding to a summary
sep (List[str]): List of tokens corresponding to a separator between summary and sentence
Returns:
inputs (List[BertInput]): a list of masked token inputs to BERT
answers (List[Dict[int, str]]): a list of "answer" dicts, where each answer dict maps
token indices corresponding to masked tokens back to their original token.
"""
raise NotImplementedError()
def mask_and_infer(self, model, docs, doc_summaries, loading_bar=True, sep=None):
"""Run the given model on masked versions of the provided doc_summaries and collect model
output
Args:
model (BertForMaskedLM): a BERT for masked language modeling torch model
docs (List[str]): A list of input documents
doc_summaries (List[List[str]]): A list of summaries for every input document
loading_bar (bool): whether or not to use a tqdm loading bar to show progress
sep (str): Separator between the inference help (summary) and a sentence from the doc
Returns:
all_outputs (List[List[List[Dict[int, str]]]]): for each doc, for each summary for the
doc, for each input sequence for the summary, we have a dict mapping indices to
model predictions
all_answers (List[List[List[Dict[int, str]]]]): for each doc, for each summary for the
doc, for each input sequence for the summary, we have a dict mapping indices to
original tokens
"""
# Prepare inputs
all_inputs, all_answers = [], []
for doc, summaries in zip(docs, doc_summaries):
doc_inputs, doc_answers = [], []
for summary in summaries:
summary_inputs, summary_answers = self.get_inference_inputs(doc, summary, sep)
doc_inputs.append(summary_inputs)
doc_answers.append(summary_answers)
all_inputs.append(doc_inputs)
all_answers.append(doc_answers)
# Run inference in batches
inputs_per_summary_per_doc = [
[len(inputs) for inputs in summary_input] for summary_input in all_inputs
]
collapsed_inputs = sum(sum(all_inputs, []), [])
batched_inputs = batch_data(collapsed_inputs, self.inference_batch_size)
iterator = tqdm.tqdm(batched_inputs, disable=not loading_bar)
batched_outputs = [self.run_inference_batch(model, batch) for batch in iterator]
collapsed_outputs = sum(batched_outputs, [])
# Regroup outputs
i = 0
all_outputs = []
for inputs_per_summary in inputs_per_summary_per_doc:
doc_outputs = []
for num_inputs in inputs_per_summary:
doc_outputs.append(collapsed_outputs[i : i + num_inputs])
i += num_inputs
all_outputs.append(doc_outputs)
return all_outputs, all_answers
def get_inference_inputs(self, doc, summary=None, sep=None):
"""Get the inference inputs for a document, which possibly includes a summary
Args:
doc (str): an input document
summary (str): an optional input summary
sep (str): Separator between the inference help (summary) and a sentence from the doc
Returns:
summary_inputs (List[BertInput]): a list of BertInputs for inference
summary_answers (List[Dict[int, str]]): each dict maps token indices back to their
original token
"""
doc = clean_text(doc)
doc_sents = sent_tokenize(doc)
doc_sent_tokens = [self.model_tokenizer.tokenize(sent) for sent in doc_sents]
summary_sent_tokens = None
if summary:
summary = clean_text(summary)
summary_sents = sent_tokenize(summary)
summary_sent_tokens = [self.model_tokenizer.tokenize(sent) for sent in summary_sents]
if not summary_sent_tokens:
summary_sent_tokens = [[]]
len_sep = 0
if sep:
len_sep = len(sep)
summary_inputs, summary_answers = [], []
half_num_sents = len(doc_sent_tokens)
truncate_bottom = True
for i_sent, sent_tokens in enumerate(doc_sent_tokens):
if i_sent > half_num_sents:
truncate_bottom = False
sent_tokens, summary_tokens = truncate_sentence_and_summary(
sent=sent_tokens,
summary=summary_sent_tokens,
len_sep=len_sep,
len_sent_allow_cut=self.len_sent_allow_cut,
truncate_bottom=truncate_bottom,
)
# now it is assured that everything fits into the allowed input size:
assert len(sent_tokens) + len(summary_tokens) + len_sep + 2 <= BERT_MAX_TOKENS
inputs, answers = self.get_inputs_for_sentence(sent_tokens, summary_tokens)
summary_inputs += inputs
summary_answers += answers
return summary_inputs, summary_answers
def assemble_inference_input(self, answers, sent_tokens, help_tokens=None, help_sep=None):
"""Given input tokens, assemble them into the tensors used by the model for inference
Args:
answers (Dict[int, str]): a mapping of input token indices to their original value
sent_tokens (List[str]): tokens corresponding to an input sentence
help_tokens (List[str]): tokens corresponding to an input summary or filler
help_sep (List[str]): tokens to put between the summary/filler and the sentence
Returns:
model_input (BertInput): an input to the BERT model
shifted_answers (Dict[int, str]): the input answers but with shifted indices that take
into account the summary/filler and starting CLS token
Raises:
ValueError: if the sentence itself is longer than the BERT_MAX_TOKENS limit, we raise
this error as opposed to truncating the sentence
"""
if not help_tokens:
help_tokens = []
if not help_sep:
help_sep = []
all_tokens = (
[self.model_tokenizer.cls_token]
+ help_tokens
+ help_sep
+ sent_tokens
+ [self.model_tokenizer.sep_token]
)
input_ids = self.model_tokenizer.convert_tokens_to_ids(all_tokens)
token_type_ids = [TOKEN_TYPE_A] * len(all_tokens)
attention_mask = [NOT_MASKED] * len(all_tokens)
offset = 1 + len(help_tokens) + len(help_sep)
shifted_answers = {}
for idx, token in answers.items():
shifted_answers[idx + offset] = token
model_input = BertInput(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
labels=None,
masked_idxs=list(shifted_answers.keys()),
)
return model_input, shifted_answers
def run_inference_batch(self, model, batch):
"""Run an inference batch through the provided model
Args:
model (BertForMaskedLM): a BERT for masked language modeling torch model
batch (List[BertInput]): the input batch to run through the model
Returns:
all_predictions (List[Dict[int, str]]): predicted tokens for every masked token in
the inputs
"""
input_ids, attention_mask, token_type_ids, _ = get_input_tensors(
batch, device=self.device, tokenizer=self.model_tokenizer,
)
with torch.no_grad():
(model_output_batch,) = model(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
)
all_predictions = []
for model_input, model_output in zip(batch, model_output_batch):
predictions = {}
for idx in model_input.masked_idxs:
predicted_id = model_output[idx].argmax()
(predicted_token,) = self.model_tokenizer.convert_ids_to_tokens([predicted_id])
predictions[idx] = predicted_token
all_predictions.append(predictions)
return all_predictions
def mask_input_tokens(self, tokens, is_finetune):
"""Given a list of tokens, produce maskings for them
Args:
tokens (List[str]): a sequence of wordpiece tokens
is_finetune (bool): whether or not these tokens are going to be used for finetuning
Returns:
masked_inputs (List[List[str]]): a list of token sequences, where each token sequence
contains some masked tokens.
all_answers (List[Dict[int, str]]): a list of "answer" dicts, where each answer dict maps
token indices corresponding to masked tokens back to their original token.
"""
if is_finetune:
even_masking = self.finetune_mask_evenly
else:
even_masking = self.inference_mask_evenly
min_token_lengths = (
self.min_token_length_normal,
self.min_token_length_lead,
self.min_token_length_followup,
)
if even_masking:
return mask_tokens_evenly(
tokens=tokens,
gap=self.gap,
min_token_lengths=min_token_lengths,
mask_token=self.model_tokenizer.mask_token,
)
else:
return mask_tokens_randomly(
tokens=tokens,
min_token_lengths=min_token_lengths,
mask_token=self.model_tokenizer.mask_token,
)
def judge_output(self, base_output, assisted_output, base_answers, assisted_answers):
"""Given a model's predicted tokens with and without assistance, as well as the correct
token predictions, produce the BLANC score
Args:
base_outputs (List[Dict[int, str]]): outputs without using "help" or "tune." Each list
represents a different input masking, and each dict maps indices to model
predictions.
assisted_outputs (List[Dict[int, str]]): outputs using "help" or "tune." Each list
represents a different input masking, and each dict maps indices to model
predictions.
base_answers (List[Dict[int, str]]): answers without using "help" or "tune." Each list
represents a different input masking, and each dict maps indices to original
tokens.
assisted_answers (List[Dict[int, str]]): answers using "help" or "tune." Each
list represents a different input masking, and each dict maps indices to original
tokens.
Returns:
score (float): the BLANC score, if the measure is 'relative' or 'improve'.
score, S (tuple of float and list): the BLANC score and counts,
if the measure is 'relative-counts' or 'improve-counts'.
"""
base_correctness, _ = determine_correctness(base_output, base_answers)
assisted_correctness, cnt_unks = determine_correctness(assisted_output, assisted_answers)
for i in range(len(base_answers)):
bas_ans = base_answers[i]
for key, value in bas_ans.items():
assist_out = assisted_output[i]
base_out = base_output[i]
print(key, '--', value, '--', assist_out.get(key, 0), '--', base_out.get(key, 0))
S = [[0, 0], [0, 0]]
for base_correct, assisted_correct in zip(base_correctness, assisted_correctness):
S[int(base_correct)][int(assisted_correct)] += 1
measure_split = self.measure.split("-")
if measure_split[0] == 'relative':
result = measure_relative(S)
if self.measure == 'relative-counts':
result = result, S
elif measure_split[0] == 'improve':
result = measure_improve(S)
if self.measure == 'improve-counts':
result = result, S
else:
raise NotImplementedError(f'unknown measure {self.measure}')
return result, cnt_unks
def init_model(self, device):
"""Initialize the language model and send it to the given device
Args:
device (str): torch device (usually "cpu" or "cuda")
Returns:
model (BertForMaskedLM): a BERT for masked language modeling torch model
"""
if self.base == 1:
model = BertForMaskedLM.from_pretrained(self.model_name).to(device)
print('Using base model')
else:
model = BertForMaskedLM.from_pretrained('./bert_model/').to(device)
print('Using further pretrained BERT model')
model.eval()
return model
class BlancHelp(Blanc):
"""BLANC-help, as defined in the BLANC paper."""
def __init__(
self,
gap=Defaults.gap,
base=Defaults.base,
model_name=Defaults.model_name,
measure=Defaults.measure,
min_token_length_normal=Defaults.min_token_length_normal,
min_token_length_lead=Defaults.min_token_length_lead,
min_token_length_followup=Defaults.min_token_length_followup,
device=Defaults.device,
inference_batch_size=Defaults.inference_batch_size,
inference_mask_evenly=Defaults.inference_mask_evenly,
len_sent_allow_cut=Defaults.len_sent_allow_cut,
filler_token=Defaults.filler_token,
help_sep=Defaults.help_sep,
):
"""See CLI documentation (blanc --help) for information about each arg"""
super().__init__(
gap=gap,
base=base,
model_name=model_name,
measure=measure,
min_token_length_normal=min_token_length_normal,
min_token_length_lead=min_token_length_lead,
min_token_length_followup=min_token_length_followup,
device=device,
inference_batch_size=inference_batch_size,
inference_mask_evenly=inference_mask_evenly,
len_sent_allow_cut=len_sent_allow_cut,
)
self.filler_token = filler_token
self.help_sep = self.model_tokenizer.tokenize(help_sep)
self.model = self.init_model(self.device)
def eval_summaries_for_docs(self, docs, doc_summaries):
"""Calculate the BLANC score for multiple docs, each with multiple summaries.
See documentation in superclass.
"""
all_outputs, all_answers = self.mask_and_infer(
self.model, docs, doc_summaries, sep=self.help_sep
)
total_unks = 0
all_scores = []
for doc_outputs, doc_answers in zip(all_outputs, all_answers):
dock_unks = 0
doc_scores = []
for summary_output, summary_answers in zip(doc_outputs, doc_answers):
help_output = [out for i, out in enumerate(summary_output) if i % 2 == 0]
filler_output = [out for i, out in enumerate(summary_output) if i % 2 == 1]
help_answers = [answer for i, answer in enumerate(summary_answers) if i % 2 == 0]
filler_answers = [answer for i, answer in enumerate(summary_answers) if i % 2 == 1]
score, cnt_unks = self.judge_output(filler_output, help_output, filler_answers, help_answers)
doc_scores.append(score)
dock_unks += cnt_unks
all_scores.append(doc_scores)
total_unks += dock_unks
return all_scores, total_unks
def get_inputs_for_sentence(self, sent_tokens, summary_tokens):
"""Get inference inputs corresponding to a given sentence. For BLANC-help, we get several
maskings for each sentence, and for each of these maskings we have an input with the
summary prepended, and an input with a filler prepended. See documentation in superclass.
"""
sent_maskings, init_answers = self.mask_input_tokens(sent_tokens, is_finetune=False)
filler_tokens = [self.filler_token] * len(summary_tokens)
inputs, final_answers = [], []
for sent_masking, init_answer in zip(sent_maskings, init_answers):
help_input, help_answers = self.assemble_inference_input(
answers=init_answer,
sent_tokens=sent_masking,
help_tokens=summary_tokens,
help_sep=self.help_sep,
)
filler_input, filler_answers = self.assemble_inference_input(
answers=init_answer,
sent_tokens=sent_masking,
help_tokens=filler_tokens,
help_sep=self.help_sep,
)
inputs += [help_input, filler_input]
final_answers += [help_answers, filler_answers]
return inputs, final_answers
class BlancTune(Blanc):
"""BLANC-tune, as defined in the BLANC paper."""
def __init__(
self,
model_name=Defaults.model_name,
measure=Defaults.measure,
gap=Defaults.gap,
min_token_length_normal=Defaults.min_token_length_normal,
min_token_length_lead=Defaults.min_token_length_lead,
min_token_length_followup=Defaults.min_token_length_followup,
device=Defaults.device,
inference_batch_size=Defaults.inference_batch_size,
inference_mask_evenly=Defaults.inference_mask_evenly,
finetune_batch_size=Defaults.finetune_batch_size,
finetune_epochs=Defaults.finetune_epochs,
finetune_mask_evenly=Defaults.finetune_mask_evenly,
len_sent_allow_cut=Defaults.len_sent_allow_cut,
finetune_chunk_size=Defaults.finetune_chunk_size,
finetune_chunk_stride=Defaults.finetune_chunk_stride,
learning_rate=Defaults.learning_rate,
warmup_steps=Defaults.warmup_steps,
):
"""See CLI documentation (blanc --help) for information about each arg"""
super().__init__(
model_name=model_name,
measure=measure,
gap=gap,
min_token_length_normal=min_token_length_normal,
min_token_length_lead=min_token_length_lead,
min_token_length_followup=min_token_length_followup,
device=device,
inference_batch_size=inference_batch_size,
inference_mask_evenly=inference_mask_evenly,
len_sent_allow_cut=len_sent_allow_cut,
)
self.finetune_batch_size = finetune_batch_size
self.finetune_epochs = finetune_epochs
self.finetune_mask_evenly = finetune_mask_evenly
self.finetune_chunk_size = finetune_chunk_size
self.finetune_chunk_stride = finetune_chunk_stride
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.base_model = self.init_model(self.device)
def eval_summaries_for_docs(self, docs, doc_summaries):
"""Calculate the BLANC score for multiple docs, each with multiple summaries.
See documentation in superclass.
"""
base_outputs, base_answers = self.mask_and_infer(self.base_model, docs, doc_summaries)
finetuned_outputs, finetuned_answers = [], []
model_cpu = self.init_model(device='cpu')
for doc, summaries in tqdm.tqdm(zip(docs, doc_summaries), total=len(docs)):
finetuned_doc_outputs, finetuned_doc_answers = [], []
for summary in summaries:
model_copy = copy.deepcopy(model_cpu)
finetuned_model = model_copy.to(self.device)
self.finetune(finetuned_model, summary)
(finetuned_summary_output,), (finetuned_summary_answer,) = self.mask_and_infer(
finetuned_model, [doc], [[summary]], loading_bar=False
)
finetuned_doc_outputs += finetuned_summary_output
finetuned_doc_answers += finetuned_summary_answer
del finetuned_model
torch.cuda.empty_cache()
finetuned_outputs.append(finetuned_doc_outputs)
finetuned_answers.append(finetuned_doc_answers)
all_scores = [
[
self.judge_output(
base_summary_output,
finetuned_summary_output,
base_summary_answers,
finetuned_summary_answers,
)
for (
base_summary_output,
base_summary_answers,
finetuned_summary_output,
finetuned_summary_answers,
) in zip(
base_doc_output, base_doc_answers, finetuned_doc_output, finetuned_doc_answers,
)
]
for (
base_doc_output,
base_doc_answers,
finetuned_doc_output,
finetuned_doc_answers,
) in zip(
base_outputs, base_answers, finetuned_outputs, finetuned_answers,
)
]
return all_scores
def get_inputs_for_sentence(self, sent_tokens, summary_tokens):
"""Get inference inputs corresponding to a given sentence. For BLANC-tune, we get several
maskings for each sentence, and each masking is a single input. See documentation in
superclass.
"""
sent_maskings, init_answers = self.mask_input_tokens(sent_tokens, is_finetune=False)
inputs, final_answers = [], []
for sent_idx, (sent_masking, init_answer) in enumerate(zip(sent_maskings, init_answers)):
input_, answers = self.assemble_inference_input(
answers=init_answer, sent_tokens=sent_masking,
)
inputs.append(input_)
final_answers.append(answers)
return inputs, final_answers
def finetune(self, model, summary):
"""Finetune the given model on a "dataset" produced from chunks of the given summary.
Args:
model (BertForMaskedLM): a BERT for masked language modeling torch model
summary (str): the summary to finetune on
"""
model.train()
all_inputs = self.prepare_finetuning_data(summary)
input_batches = batch_data(all_inputs, self.finetune_batch_size)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
],
"weight_decay": 1e-2,
},
{
"params": [
p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.learning_rate, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.warmup_steps,
num_training_steps=len(input_batches) * self.finetune_epochs,
)
for epoch in range(self.finetune_epochs):
for input_batch in input_batches:
input_ids, attention_mask, token_type_ids, labels = get_input_tensors(
input_batch, device=self.device, tokenizer=self.model_tokenizer,
)
model.zero_grad()
optimizer.zero_grad()
loss, _ = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
masked_lm_labels=labels,
)
loss.backward()
optimizer.step()
scheduler.step()
model.eval()
def prepare_finetuning_data(self, summary):
"""Create a finetuning dataset using chunks of the given summary
Args:
summary (str): the input summary to finetune on
Returns:
model_inputs (List[BertInput]): a list of inputs to use as the finetuning dataset
"""
summary_tokens = self.model_tokenizer.tokenize(summary)
model_inputs = []
for start_token in range(0, len(summary_tokens), self.finetune_chunk_stride):
end_token = start_token + self.finetune_chunk_size
chunk_tokens = summary_tokens[start_token:end_token]
model_inputs += self.assemble_finetuning_input(chunk_tokens)
return model_inputs
def assemble_finetuning_input(self, chunk_tokens):
"""Given input tokens, assemble them into the tensors used by the model for finetuning
Args:
chunk_tokens (List[str]): a token sequence
Returns:
model_inputs (List[BertInput]): BertInputs corresponding to different maskings of
chunk_tokens
"""
all_input_tokens, all_answers = self.mask_input_tokens(chunk_tokens, is_finetune=True)
all_input_tokens = [
[self.model_tokenizer.cls_token] + tokens + [self.model_tokenizer.sep_token]
for tokens in all_input_tokens
]
all_input_ids = [
self.model_tokenizer.convert_tokens_to_ids(tokens) for tokens in all_input_tokens
]
all_labels = [[LABEL_IGNORE] * len(tokens) for tokens in all_input_tokens]
model_inputs = []
for input_ids, answers, labels in zip(all_input_ids, all_answers, all_labels):
for original_idx, token in answers.items():
idx = original_idx + 1 # accounting for starting CLS token
(original_token_id,) = self.model_tokenizer.convert_tokens_to_ids([token])
labels[idx] = original_token_id
random_number = random.random()
if random_number < P_TOKEN_REPLACE:
# replace with a random token
input_ids[idx] = random.randint(*TOKEN_REPLACE_RANGE)
elif random_number < P_TOKEN_ORIGINAL + P_TOKEN_REPLACE:
# use original token
input_ids[idx] = original_token_id
attention_mask = [NOT_MASKED] * len(input_ids)
token_type_ids = [TOKEN_TYPE_A] * len(input_ids)
model_input = BertInput(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=labels,
masked_idxs=None,
)
model_inputs.append(model_input)
return model_inputs
| StarcoderdataPython |
1837436 | class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
ans = nums[0] + nums[1] + nums[2]
nums.sort()
for i in range(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
l = i + 1
r = len(nums) - 1
while l < r:
sum = nums[i] + nums[l] + nums[r]
if sum == target:
return sum
if abs(sum - target) < abs(ans - target):
ans = sum
if sum < target:
l += 1
else:
r -= 1
return ans
| StarcoderdataPython |
9616363 | <filename>MONFG.py
import time
import argparse
import numpy as np
import pandas as pd
from utils import *
from games import *
from no_com_agent import NoComAgent
from comp_action_agent import CompActionAgent
from coop_action_agent import CoopActionAgent
from coop_policy_agent import CoopPolicyAgent
from optional_com_agent import OptionalComAgent
from pessimistic_agent import PessimisticAgent
from non_stationary_agent import NonStationaryAgent
def get_communicator(episode, agents, alternate=False):
"""
This function selects the communicator.
:param episode: The current episode.
:param agents: The agents in the game.
:param alternate: Alternate the leader or always the same.
:return: The id of the communicating agent and the communicating agent itself.
"""
if alternate:
communicator = episode % len(agents)
else:
communicator = 0
communicating_agent = agents[communicator]
return communicator, communicating_agent
def select_actions(agents, message):
"""
This function selects an action from each agent's policy.
:param agents: The list of agents.
:param message: The message from the leader.
:return: A list of selected actions.
"""
selected = []
for agent in agents:
selected.append(agent.select_action(message))
return selected
def calc_payoffs(agents, actions, payoff_matrices):
"""
This function will calculate the payoffs of the agents.
:param agents: The list of agents.
:param actions: The action that each agent chose.
:param payoff_matrices: The payoff matrices.
:return: A list of received payoffs.
"""
payoffs = []
for payoff_matrix in payoff_matrices:
payoffs.append(payoff_matrix[actions[0]][actions[1]]) # Append the payoffs from the actions.
return payoffs
def calc_returns(payoffs, agents, rollouts):
"""
This function will calculate the scalarised expected returns for each agent.
:param payoffs: The payoffs obtained by the agents.
:param agents: The agents in this experiment
:param rollouts: The amount of rollouts that were performed.
:return: A list of scalarised expected returns.
"""
returns = []
for idx, payoff_hist in enumerate(payoffs):
payoff_sum = np.sum(payoff_hist, axis=0)
avg_payoff = payoff_sum / rollouts
ser = agents[idx].u(avg_payoff)
returns.append(ser)
return returns
def calc_action_probs(actions, num_actions, rollouts):
"""
This function will calculate the empirical action probabilities.
:param actions: The actions performed by each agent over the rollout period.
:param num_actions: The number of possible actions.
:param rollouts: The number of rollouts that were performed.
:return: The action probabilities for each agent.
"""
all_probs = []
for action_hist in actions:
probs = np.zeros(num_actions)
for action in action_hist:
probs[action] += 1
probs = probs / rollouts
all_probs.append(probs)
return all_probs
def calc_com_probs(messages, rollouts):
"""
This function will calculate the empirical communication probabilities.
:param messages: The messages that were sent.
:param rollouts: The number of rollouts that were performed.
:return: The communication probabilities for each agent.
"""
com = sum(message is not None for message in messages)
no_com = (rollouts - com)
return [com/rollouts, no_com/rollouts]
def update(agents, communicator, message, actions, payoffs):
"""
This function gets called after every episode so that agents can update their internal mechanisms.
:param agents: A list of agents.
:param communicator: The id of the communicating agent.
:param message: The message that was sent.
:param actions: A list of each action that was chosen, indexed by agent.
:param payoffs: A list of each payoff that was received, indexed by agent.
:return:
"""
for idx, agent in enumerate(agents):
agent.update(communicator, message, actions, payoffs[idx])
def reset(experiment, num_agents, u_lst, num_actions, num_objectives, alpha_q, alpha_theta, alpha_msg, alpha_decay, opt=False):
"""
This function will create new agents that can be used in a new trial.
:param experiment: The type of experiments we are running.
:param num_agents: The number of agents to create.
:param u_lst: A list of utility functions to use per agent.
:param num_actions: The number of actions each agent can take.
:param num_objectives: The number of objectives they have.
:param alpha_q: The learning rate for the Q values.
:param alpha_theta: The learning rate for theta.
:param alpha_msg: The learning rate for learning a messaging strategy in the optional communication experiments.
:param alpha_decay: The learning rate decay.
:param opt: A boolean that decides on optimistic initialization of the Q-tables.
:return:
"""
agents = []
for ag, u_str in zip(range(num_agents), u_lst):
u, du = get_u_and_du(u_str) # The utility function and derivative of the utility function for this agent.
if experiment == 'no_com':
new_agent = NoComAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
elif experiment == 'comp_action':
new_agent = CompActionAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
elif experiment == 'pessimistic':
new_agent = PessimisticAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
elif experiment == 'non_stationary':
new_agent = NonStationaryAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
elif experiment == 'coop_action':
new_agent = CoopActionAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
elif experiment == 'coop_policy':
new_agent = CoopPolicyAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
elif experiment == 'opt_comp_action':
no_com_agent = NoComAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
com_agent = CompActionAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
new_agent = OptionalComAgent(no_com_agent, com_agent, ag, u, du, alpha_q, alpha_msg, alpha_decay,
num_objectives, opt)
elif experiment == 'opt_coop_action':
no_com_agent = NoComAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
com_agent = CoopActionAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
new_agent = OptionalComAgent(no_com_agent, com_agent, ag, u, du, alpha_q, alpha_msg, alpha_decay,
num_objectives, opt)
elif experiment == 'opt_coop_policy':
no_com_agent = NoComAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
com_agent = CoopPolicyAgent(ag, u, du, alpha_q, alpha_theta, alpha_decay, num_actions, num_objectives, opt)
new_agent = OptionalComAgent(no_com_agent, com_agent, ag, u, du, alpha_q, alpha_msg, alpha_decay,
num_objectives, opt)
else:
raise Exception(f'No experiment of type {experiment} exists')
agents.append(new_agent)
return agents
def run_experiment(experiment, runs, episodes, rollouts, payoff_matrices, u, alternate, opt_init):
"""
This function will run the requested experiment.
:param experiment: The type of experiment we are running.
:param runs: The number of different runs.
:param episodes: The number of episodes in each run.
:param rollouts: The rollout period for the policies.
:param payoff_matrices: The payoff matrices for the game.
:param u: A list of utility functions to use for the agents.
:param alternate: Alternate commitment between players.
:param opt_init: A boolean that decides on optimistic initialization of the Q-tables.
:return: A log of payoffs, a log for action probabilities for both agents and a log of the state distribution.
"""
# Setting hyperparameters.
num_agents = 2
num_actions = payoff_matrices[0].shape[0]
num_objectives = 2
alpha_q = 0.2
alpha_theta = 0.005
alpha_msg = 0.005
alpha_decay = 1
# Setting up lists containing the results.
returns_log = [[] for _ in range(num_agents)]
action_probs_log = [[] for _ in range(num_agents)]
com_probs_log = [[] for _ in range(num_agents)]
state_dist_log = np.zeros((num_actions, num_actions))
start = time.time()
for run in range(runs):
print("Starting run: ", run)
agents = reset(experiment, num_agents, u, num_actions, num_objectives, alpha_q, alpha_theta, alpha_msg,
alpha_decay, opt_init)
for episode in range(episodes):
# We keep the actions and payoffs of this episode so that we can later calculate the SER.
ep_actions = [[] for _ in range(num_agents)]
ep_payoffs = [[] for _ in range(num_agents)]
ep_messages = []
communicator, communicating_agent = get_communicator(episode, agents, alternate)
for rollout in range(rollouts): # Required to evaluate the SER and action probabilities.
message = communicating_agent.get_message()
actions = select_actions(agents, message)
payoffs = calc_payoffs(agents, actions, payoff_matrices)
# Log the results of this roll
for idx in range(num_agents):
ep_actions[idx].append(actions[idx])
ep_payoffs[idx].append(payoffs[idx])
ep_messages.append(message)
# Update the agent after the episode
# We use the last action and payoff to update the agent. It doesn't really matter which rollout we select
# to update our agent as the agent doesn't learn any new information during the rollout.
last_actions = np.array(ep_actions)[:, -1]
last_payoffs = np.array(ep_payoffs)[:, -1]
last_message = ep_messages[-1]
if experiment == 'non_stationary':
last_message = communicating_agent.msg_policy
update(agents, communicator, last_message, last_actions, last_payoffs) # Update the agents.
# Get the necessary results from this episode.
action_probs = calc_action_probs(ep_actions, num_actions, rollouts)
returns = calc_returns(ep_payoffs, agents, rollouts)
com_probs = calc_com_probs(ep_messages, rollouts)
# Append the logs.
for idx in range(num_agents):
returns_log[idx].append([run, episode, returns[idx]])
prob_log = [run, episode] + action_probs[idx].tolist()
action_probs_log[idx].append(prob_log)
com_log = [run, episode] + com_probs
com_probs_log[communicator].append(com_log)
# If we are in the last 10% of episodes we build up a state distribution log.
# This code is specific to two player games.
if episode >= 0.9 * episodes:
state_dist = np.zeros((num_actions, num_actions))
for a1, a2 in zip(ep_actions[0], ep_actions[1]):
state_dist[a1, a2] += 1
state_dist /= rollouts
state_dist_log += state_dist
end = time.time()
elapsed_mins = (end - start) / 60.0
print("Minutes elapsed: " + str(elapsed_mins))
return returns_log, action_probs_log, com_probs_log, state_dist_log
def save_data(path, name, returns_log, action_probs_log, com_probs_log, state_dist_log, runs, episodes):
"""
This function will save all of the results to disk in CSV format for later analysis.
:param path: The path to the directory in which all files will be saved.
:param name: The name of the experiment.
:param returns_log: The log for the returns.
:param action_probs_log: The log for the action probabilities.
:param action_probs_log: The log for the communication probabilities.
:param state_dist_log: The state distribution log in the last 10% of episodes.
:param runs: The number of trials that were ran.
:param episodes: The number of episodes in each run.
:return: /
"""
print("Saving data to disk")
num_agents = len(returns_log) # Extract the number of agents that were in the experiment.
num_actions = len(action_probs_log[0][0]) - 2 # Extract the number of actions that were possible in the experiment.
returns_columns = ['Trial', 'Episode', 'Payoff']
action_columns = [f'Action {a + 1}' for a in range(num_actions)]
action_columns = ['Trial', 'Episode'] + action_columns
com_columns = ['Trial', 'Episode', 'Communication', 'No communication']
for idx in range(num_agents):
df_r = pd.DataFrame(returns_log[idx], columns=returns_columns)
df_a = pd.DataFrame(action_probs_log[idx], columns=action_columns)
df_r.to_csv(f'{path}/{name}_{game}_A{idx + 1}_returns.csv', index=False)
df_a.to_csv(f'{path}/{name}_{game}_A{idx + 1}_probs.csv', index=False)
if name in ['opt_comp_action', 'opt_coop_action', 'opt_coop_policy']:
for idx in range(num_agents):
df = pd.DataFrame(com_probs_log[idx], columns=com_columns)
df.to_csv(f'{path}/{name}_{game}_A{idx + 1}_com.csv', index=False)
state_dist_log /= runs * (0.1 * episodes)
df = pd.DataFrame(state_dist_log)
df.to_csv(f'{path}/{name}_{game}_states.csv', index=False, header=False)
print("Finished saving data to disk")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--game', type=str, default='game1', help="which MONFG game to play")
parser.add_argument('--u', type=str, default=['u1', 'u2'], nargs='+',
help="Which utility functions to use per player")
parser.add_argument('--experiment', type=str, default='pessimistic', help='The experiment to run.')
parser.add_argument('--alternate', type=bool, default=False, help="Alternate commitment between players.")
parser.add_argument('--runs', type=int, default=100, help="number of trials")
parser.add_argument('--episodes', type=int, default=5000, help="number of episodes")
parser.add_argument('--rollouts', type=int, default=100, help="Rollout period for the policies")
# Optimistic initialization can encourage exploration.
parser.add_argument('--opt_init', action='store_true', help="optimistic initialization")
args = parser.parse_args()
# Extracting the arguments.
game = args.game
u = args.u
experiment = args.experiment
alternate = args.alternate
runs = args.runs
episodes = args.episodes
rollouts = args.rollouts
opt_init = args.opt_init
# Starting the experiments.
payoff_matrices = get_monfg(game)
data = run_experiment(experiment, runs, episodes, rollouts, payoff_matrices, u, alternate, opt_init)
returns_log, action_probs_log, com_probs_log, state_dist_log = data
# Writing the data to disk.
path = create_game_path('data', experiment, game, opt_init)
mkdir_p(path)
save_data(path, experiment, returns_log, action_probs_log, com_probs_log, state_dist_log, runs, episodes)
| StarcoderdataPython |
5049828 | <reponame>leeh8911/ML-From-Scratch
from __future__ import print_function, division
from sklearn import datasets
import math
import matplotlib.pyplot as plt
import numpy as np
import progressbar
from sklearn.datasets import fetch_mldata
from mlfromscratch.deep_learning.optimizers import Adam
from mlfromscratch.deep_learning.loss_functions import CrossEntropy, SquareLoss
from mlfromscratch.deep_learning.layers import Dense, Dropout, Flatten, Activation, Reshape, BatchNormalization
from mlfromscratch.deep_learning import NeuralNetwork
class Autoencoder():
"""An Autoencoder with deep fully-connected neural nets.
Training Data: MNIST Handwritten Digits (28x28 images)
"""
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.img_dim = self.img_rows * self.img_cols
self.latent_dim = 128 # The dimension of the data embedding
optimizer = Adam(learning_rate=0.0002, b1=0.5)
loss_function = SquareLoss
self.encoder = self.build_encoder(optimizer, loss_function)
self.decoder = self.build_decoder(optimizer, loss_function)
self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
self.autoencoder.layers.extend(self.encoder.layers)
self.autoencoder.layers.extend(self.decoder.layers)
print ()
self.autoencoder.summary(name="Variational Autoencoder")
def build_encoder(self, optimizer, loss_function):
encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
encoder.add(Dense(512, input_shape=(self.img_dim,)))
encoder.add(Activation('leaky_relu'))
encoder.add(BatchNormalization(momentum=0.8))
encoder.add(Dense(256))
encoder.add(Activation('leaky_relu'))
encoder.add(BatchNormalization(momentum=0.8))
encoder.add(Dense(self.latent_dim))
return encoder
def build_decoder(self, optimizer, loss_function):
decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
decoder.add(Dense(256, input_shape=(self.latent_dim,)))
decoder.add(Activation('leaky_relu'))
decoder.add(BatchNormalization(momentum=0.8))
decoder.add(Dense(512))
decoder.add(Activation('leaky_relu'))
decoder.add(BatchNormalization(momentum=0.8))
decoder.add(Dense(self.img_dim))
decoder.add(Activation('tanh'))
return decoder
def train(self, n_epochs, batch_size=128, save_interval=50):
mnist = fetch_mldata('MNIST original')
X = mnist.data
y = mnist.target
# Rescale [-1, 1]
X = (X.astype(np.float32) - 127.5) / 127.5
for epoch in range(n_epochs):
# Select a random half batch of images
idx = np.random.randint(0, X.shape[0], batch_size)
imgs = X[idx]
# Train the Autoencoder
loss, _ = self.autoencoder.train_on_batch(imgs, imgs)
# Display the progress
print ("%d [D loss: %f]" % (epoch, loss))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
self.save_imgs(epoch, X)
def save_imgs(self, epoch, X):
r, c = 5, 5 # Grid size
# Select a random half batch of images
idx = np.random.randint(0, X.shape[0], r*c)
imgs = X[idx]
# Generate images and reshape to image shape
gen_imgs = self.autoencoder.predict(imgs).reshape((-1, self.img_rows, self.img_cols))
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
plt.suptitle("Autoencoder")
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("ae_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
ae = Autoencoder()
ae.train(n_epochs=200000, batch_size=64, save_interval=400)
| StarcoderdataPython |
3341489 | <reponame>Daniel-Vital/magicroot
"""
This file contains the functions used to compute useful values for predetermined dataframe structures
"""
from . import format
from datetime import timedelta
import numpy as np
import pandas as pd
def duration(df, dt_begin, dt_end, computed_column='duration', days=False, *args, **kwargs):
"""
Computes duration in days between two dates
:param df: Dataframe
:column dt_begin: column(s) should be in the table
:column dt_end: column(s) should be in the table
base to compute
:param dt_begin: str
column with the begin date
:param dt_end: str
column with the end date
:param computed_column: str, default 'maturity'
column with the name to give to the column with the computed duration
:return: Dataframe
:column previous: all column(s) previously in the table
:column duration_column: computed column
result table
"""
multiplier = 1 if days else 365
return format.as_date(df, [dt_begin, dt_end], *args, **kwargs).assign(
**{
computed_column: lambda x: np.maximum((x[dt_end] - x[dt_begin]).dt.days / multiplier, 0)
}
)
def date_perc(df, dt_begin, dt_end, dt_ref, duration_column='duration_pct', *args, **kwargs):
"""
Computes percentage of a date between to other dates
:param df: Dataframe
:column dt_begin: column(s) should be in the table
:column dt_end: column(s) should be in the table
base to compute
:param dt_begin: str
column with the begin date
:param dt_end: str
column with the end date
:param dt_ref: str
column with the end date
:param duration_column: str, default 'maturity'
column with the name to give to the column with the computed duration
:return: Dataframe
:column previous: all column(s) previously in the table
:column duration_column: computed column
result table
"""
return format.as_date(df, [dt_begin, dt_end, dt_ref], *args, **kwargs).assign(
**{
duration_column: lambda x:
duration(x, dt_begin, dt_ref)['duration'] / duration(x, dt_begin, dt_end)['duration']
}
)
def maturity(from_date, to_date):
"""
Computes maturity
:param from_date: str
Series with the begin date
:param to_date: str
Series with the end date
:return: func to be applied to a Dataframe
"""
return (to_date - from_date).where(from_date > to_date, timedelta(days=0))
def discount_rate(with_rate, with_maturity, days_in_year=365):
"""
Computes discount rate
:param with_rate: str
Series with the spot rate
:param with_maturity: str
Series with the maturity
:param days_in_year: int
constant with the days in the year
:return: func to be applied to a Dataframe
"""
return lambda x: 1 / (1 + with_rate).pow(with_maturity.dt.days / days_in_year)
def discounted_cashflows(df, cashflow_columns, disc_rate_column, prefix='disc_', suffix=''):
"""
Discounts cashflows
:param df: Dataframe
:column cashflow_columns: column(s) should be in the table
:column disc_rate_column: column(s) should be in the table
base to compute
:param cashflow_columns: list
containing columns with cashflows to discount
:param disc_rate_column: str
Column with the discount rate
:param prefix: str, default 'disc_'
column with the prefix to add to the column names with the discounted cashflows
:param suffix: str, default ''
column with the suffix to add to the column names with the discounted cashflows
:return: Dataframe
:column previous: all column(s) previously in the table
:column prefix + cashflow_columns: computed columns
result table
"""
return df.assign(
**{
prefix + column + suffix: df[column] * df[disc_rate_column]
for column in cashflow_columns
}
)
def discounted_columns_pairs(cashflow_columns, prefix, suffix):
"""
Computes a dictionary with the undiscounted version of columns as keys and the discounted version as values
:param cashflow_columns: list
undiscounted cashflow columns
:param prefix: str
prefix used to mark discounted columns
:param suffix: str
prefix used to mark discounted columns
:return: a dictionary with the undiscounted version of columns as keys and the discounted version as values
"""
return {
undiscounted_column: prefix + undiscounted_column + suffix for undiscounted_column in cashflow_columns
}
def discounted_components(df, cashflow_columns, prefix='comp_', suffix=''):
"""
Computes discounted amounts from cashflows
:param df: Dataframe
:column cashflow_columns: column(s) should be in the table
base to compute
:param cashflow_columns: list
containing columns with cashflows to discount
:param prefix: str, default 'comp_'
column with the prefix to add to the column names with the discounted amounts from cashflows
:param suffix: str, default ''
column with the suffix to add to the column names with the discounted cashflows
:return: Dataframe
:column previous: all column(s) previously in the table
:column prefix + cashflow_columns: computed columns
result table
"""
return df.assign(
**{
prefix + disc_cashflow_column + suffix: lambda x: x[disc_cashflow_column] - x[cashflow_column]
for cashflow_column, disc_cashflow_column in cashflow_columns.items()
}
)
def intersection_days(*args, shift_days=0):
"""
Computes the intersection days between all given time windows
All windows should be provided in the format ('begin column', 'end column')
:return:
"""
return lambda df: np.minimum(
*[df[window[1]] for window in args]
) - np.maximum(
*[df[window[0]] for window in args]
) + timedelta(days=shift_days)
def union_days(*args, shift_days=0):
"""
Computes the union days between all given time windows
All windows should be provided in the format ('begin column', 'end column')
:return:
"""
return lambda df: np.maximum(
*[df[window[1]] for window in args]
) - np.minimum(
*[df[window[0]] for window in args]
) + timedelta(days=shift_days)
def intersection_days_perc(*args, shift_days=0):
"""
Computes the intersection days percentage between all given time windows
All windows should be provided in the format ('begin column', 'end column')
:return:
"""
return lambda df: intersection_days(*args, shift_days=shift_days)(df) / union_days(*args, shift_days=shift_days)(df)
def eom(df, columns=None, prefix='eom_', suffix='', *args, **kwargs):
return format.as_date(df, columns, *args, **kwargs).assign(
**{
prefix + column + suffix: lambda x: (x[column] - pd.to_timedelta(1, unit='day')) + pd.offsets.MonthEnd()
for column in columns
}
)
| StarcoderdataPython |
11209506 | <reponame>a-sk/templet-python
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{{ PROJECT_NAME }}/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='{{ PROJECT_NAME }}',
version=version,
url='',
license='{{ license }}',
author='{{ author }}',
author_email='{{ email }}',
description='{{ description }}',
long_description=open('README.md').read(),
packages=['{{ PROJECT_NAME }}'],
include_package_data=True,
install_requires=[
]
)
| StarcoderdataPython |
4998784 | from setuptools import setup
setup(
name = 'enginemonitor',
version = '1.0.0',
license = 'license name, e.g. MIT',
description = '',
author = '<NAME>',
author_email = '<EMAIL>',
package_dir={'':'src'},
install_requires=[
'psutil',
'pymongo',
'wmi'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.7',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
]
) | StarcoderdataPython |
6483806 | <gh_stars>0
from motor.motor_asyncio import AsyncIOMotorClient
from odmantic import AIOEngine
connection_details = "mongodb://mongodb:27017"
client = AsyncIOMotorClient(connection_details)
engine = AIOEngine(motor_client=client, database="guestbook") | StarcoderdataPython |
6546712 | <gh_stars>10-100
import sys
import os
stage='/Users/Shared/Jenkins/Home/workspace/Test1/'
stage1='/Users/Shared/Jenkins/Home/workspace/Test2/'
sys.path.append(stage)
sys.path.append(stage1)
import time
import numpy as np
import pandas as pd
from datetime import datetime as dt
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import heapq
import collections
def load_bench(bench):
mu = 100
tmp = pd.read_csv(bench, header=0, low_memory=False)
tmp.set_index('Date', inplace=True)
tmp = tmp['Adj Close'][1001:]
bench = mu * (1 + tmp.pct_change()).cumprod()
# self.bench = self.mu * np.diff(tmp, axis=0).cumsum()
print('bench', bench)
pd.DataFrame(bench).to_csv('bench.csv')
return bench
def load_csv_test(fname):
tmp = pd.read_csv(fname, header=0, low_memory=False)
print(tmp.head())
tmp.replace(0, np.nan, inplace=True)
tmp.dropna(axis=1, how='any', inplace=True)
print('effect check', tmp.shape)
tickers_list = tmp.columns.values
print('ticker_list', len(tickers_list[1:]), tickers_list[1:])
tmp_tstr = tmp['Unnamed: 0']
# tmp_t = [dt.strptime(tmp_tstr[i], '%Y.%m.%d') for i in range(len(tmp_tstr))]
# tmp_t = [dt.strptime(tmp_tstr[i], '%m/%d/%y') for i in range(len(tmp_tstr))]
tmp_t = [dt.strptime(tmp_tstr[i], '%Y-%m-%d') for i in range(len(tmp_tstr))]
tmp_p = tmp.iloc[:, 1:]
all_t = np.array(tmp_t) # [::-1]
all_p = np.array(tmp_p) # .reshape((1, -1))[0] # [::-1]
print('all_p shape', all_p.shape)
return all_t, all_p, tickers_list
class TradingRRL(object):
def __init__(self, T=1000, thisT = 1000, M=300, thisM = 300, N=0, init_t=10000, mu=10000, sigma=0.04, rho=1.0, n_epoch=10):
self.T = T
self.thisT = thisT
self.M = M
self.thisM = thisM
self.N = N
self.TOP = 20
self.threshold = 0.0
self.init_t = init_t
self.mu = mu
self.sigma = sigma
self.rho = rho
self.all_t = None
self.all_p = None
self.t = None
self.p = None
self.bench = None
self.r = None
self.x = np.zeros([T, M + 2])
self.F = np.zeros((T + 1, N))
self.FS = np.zeros((T + 1, N))
self.R = np.zeros((T, N))
self.w = np.ones((M + 2, N))
self.w_opt = np.ones((M + 2, N))
self.epoch_S = pd.DataFrame()
self.n_epoch = n_epoch
self.progress_period = 100
self.q_threshold = 0.5
self.b = np.ones((T+1, N))
self.total = None
self.bench = None
self.tickers_list = None
self.ticker_data = collections.defaultdict(dict)
def quant(self, f):
fc = f.copy()
fc[np.where(np.abs(fc) < self.q_threshold)] = 0
#return np.sign(fc)
return fc
def softmax(self, x):
l2_norm = np.sqrt(x*x).sum()
return x/l2_norm
#e_x = np.exp(x)
#return e_x / e_x.sum()
def set_t_p_r(self, train_phase=True):
if train_phase:
self.t = self.all_t[self.init_t:self.init_t + self.T + self.M + 1]
self.p = self.all_p[self.init_t:self.init_t + self.T + self.M + 1,:] ## TODO: add column dimension for assets > 1
print('p dimension', self.p.shape)
#self.r = -np.diff(self.p, axis=0)
firstr = np.zeros((1, self.p.shape[1]))
self.r = np.diff(self.p, axis=0)/self.p[:-1]
self.r = np.concatenate((firstr, self.r), axis=0)
print('r dimension', self.r.shape)
pd.DataFrame(self.r).to_csv("smallr.csv", header=False, index=False)
else:
self.t = self.all_t[self.init_t:self.init_t + self.thisT + self.thisM + 1]
self.p = self.all_p[self.init_t:self.init_t + self.thisT + self.thisM + 1,:] ## TODO: add column dimension for assets > 1
print('p dimension', self.p.shape)
# self.r = -np.diff(self.p, axis=0)
firstr = np.zeros((1, self.p.shape[1]))
self.r = np.diff(self.p, axis=0) / self.p[:-1]
self.r = np.concatenate((firstr, self.r), axis=0)
def set_x_F(self, train_phase=True):
if train_phase:
for i in range(self.T - 1, -1, -1):
self.x[i] = np.zeros(self.M + 2)
self.x[i][0] = 1.0
self.x[i][self.M + 2 - 1] = self.F[i+1,-1] ## TODO: i used -1 on column
for j in range(1, self.M + 2 - 1, 1):
#self.x[i][j] = self.r[i+ j - 1,0] ## TODO: i used -1 on column:
self.x[i,j] = self.r[i + (j-1), -1] ## TODO: i used -1 on column; and must deal with j
self.F[i] = self.quant(np.tanh(np.dot(self.x[i], self.w)+self.b[i])) ## TODO: test this
else:
thisw = np.ones((self.thisM+2, self.N))
self.x = np.zeros([self.thisT, self.thisM + 2])
self.F = np.zeros((self.thisT + 1, self.N))
for i in range(self.thisT - 1, -1, -1):
self.x[i] = np.zeros(self.thisM + 2)
self.x[i][0] = 1.0
self.x[i][self.thisM + 2 - 1] = self.F[i+1,-1] ## TODO: i used -1 on column
for j in range(1, self.thisM + 2 - 1, 1):
#self.x[i][j] = self.r[i+ j - 1,0] ## TODO: i used -1 on column:
self.x[i,j] = self.r[i + (j-1), -1] ## TODO: i used -1 on column; and must deal with j
self.F[i] = self.quant(np.tanh(np.dot(self.x[i], thisw)+self.b[i])) ## TODO: test this
def calc_R(self):
#self.R = self.mu * (np.dot(self.r[:self.T], self.F[:,1:]) - self.sigma * np.abs(-np.diff(self.F, axis=1)))
#self.R = self.mu * (self.r[:self.T] * self.F[1:]) - self.sigma * np.abs(-np.diff(self.F, axis=0))
#self.R = self.mu * (np.multiply(self.F[1:,], np.reshape(self.r[:self.T], (self.T, -1)))) * (self.sigma) * np.abs(-np.diff(self.F, axis=0))
self.R = ((np.multiply(self.F[1:, ], np.reshape(0+self.r[:self.T], (self.T, -1)))) * (1-self.sigma * np.abs(-np.diff(self.F, axis=0))))
pd.DataFrame(self.R).to_csv('R.csv')
def calc_sumR(self):
self.sumR = np.cumsum(self.R[::-1], axis=0)[::-1] ## TODO: cumsum axis
#self.sumR = np.cumprod(self.R[::-1], axis=0)[::-1] ## TODO: cumsum axis
self.sumR2 = np.cumsum((self.R[::-1] ** 2), axis=0)[::-1] ## TODO: cumsum axis
#self.sumR2 = np.cumprod((self.R[::-1] ** 2), axis=0)[::-1] ## TODO: cumsum axis
#print('cumprod', self.sumR)
def calc_dSdw(self, train_phase=True):
if not train_phase:
self.T = self.thisT
self.M = self.thisM
self.set_x_F(train_phase=train_phase)
self.calc_R()
self.calc_sumR()
self.Sall = np.empty(0) # a list of period-to-date sharpe ratios, for all n investments
self.dSdw = np.zeros((self.M + 2, self.N))
for j in range(self.N):
self.A = self.sumR[0,j] / self.T
self.B = self.sumR2[0,j] / self.T
#self.A = self.sumR / self.T
#self.B = self.sumR2 / self.T
self.S = self.A / np.sqrt(self.B - (self.A ** 2))
#self.S = ((self.B[1:,j]*np.diff(self.A[:,j], axis=0)-0.5*self.A[1:,j]*np.diff(self.B[:,j], axis=0))/ (self.B[1,j] - (self.A[1,j] ** 2))**(3/2))[1]
#self.S = (self.B[1,j] - (self.A[1,j] ** 2))**(3/2)
#print('sharpe checl', np.isnan(self.r).sum())
self.dSdA = self.S * (1 + self.S ** 2) / self.A
self.dSdB = -self.S ** 3 / 2 / self.A ** 2
self.dAdR = 1.0 / self.T
self.dBdR = 2.0 / self.T * self.R[:,j]
self.dRdF = -self.mu * self.sigma * np.sign(-np.diff(self.F, axis=0))
self.dRdFp = self.mu * self.r[:self.T] + self.mu * self.sigma * np.sign(-np.diff(self.F, axis=0)) ## TODO: r needs to be a matrix if assets > 1
self.dFdw = np.zeros(self.M + 2)
self.dFpdw = np.zeros(self.M + 2)
#self.dSdw = np.zeros((self.M + 2, self.N)) ## TODO: should not have put this here. this resets everytime
self.dSdw_j = np.zeros(self.M + 2)
for i in range(self.T - 1, -1, -1):
if i != self.T - 1:
self.dFpdw = self.dFdw.copy()
self.dFdw = (1 - self.F[i,j] ** 2) * (self.x[i] + self.w[self.M + 2 - 1,j] * self.dFpdw)
self.dSdw_j += (self.dSdA * self.dAdR + self.dSdB * self.dBdR[i]) * (
self.dRdF[i,j] * self.dFdw + self.dRdFp[i,j] * self.dFpdw)
self.dSdw[:, j] = self.dSdw_j
self.Sall = np.append(self.Sall, self.S)
def update_w(self):
self.w += self.rho * self.dSdw
def get_investment_weights(self, train_phase=True):
if not train_phase:
self.FS = np.zeros((self.thisT + 1, self.N))
for i in range(self.FS.shape[0]):
self.FS[i] = np.multiply(self.F[i], self.Sall)
tmp = np.apply_along_axis(self.select_n, 1, self.FS) # TODO: conisder taking the abs(): magnitutde
F1 = np.apply_along_axis(self.softmax, 1, tmp)
print('MAKE F1', F1.shape)
print('see F1', F1)
print('see R', self.R)
mask = F1 != 0
_, j = np.where(mask)
for ji in set(j):
self.ticker_data[self.tickers_list[ji]]['inv weight'] = F1[-2, ji]
self.ticker_data[self.tickers_list[ji]]['return'] = self.R[-2, ji]
print(self.ticker_data)
return F1
def select_n(self, array):
threshold = max(heapq.nlargest(self.TOP, array)[-1], self.threshold)
new_array = [x if x >= threshold else 0 for x in array]
return new_array
def fit(self):
pre_epoch_times = len(self.epoch_S)
self.calc_dSdw()
print("Epoch loop start. Initial sharp's ratio is " + str(np.mean(self.Sall)) + ".")
print('s len', len(self.Sall))
self.S_opt = self.Sall
tic = time.clock()
for e_index in range(self.n_epoch):
self.calc_dSdw()
if np.mean(self.Sall) > np.mean(self.S_opt):
self.S_opt = self.Sall
self.w_opt = self.w.copy()
#self.Sall = np.apply_along_axis(self.select_n, 0, self.Sall) # TODO: don't do this here
self.epoch_S[e_index] = np.array(self.S_opt)
self.update_w()
if e_index % self.progress_period == self.progress_period - 1:
toc = time.clock()
print("Epoch: " + str(e_index + pre_epoch_times + 1) + "/" + str(
self.n_epoch + pre_epoch_times) + ". Shape's ratio: " + str(self.Sall[self.Sall.nonzero()].mean()) + ". Elapsed time: " + str(
toc - tic) + " sec.")
toc = time.clock()
print("Epoch: " + str(e_index + pre_epoch_times + 1) + "/" + str(
self.n_epoch + pre_epoch_times) + ". Shape's ratio after iteration: " + str(self.S_opt[self.S_opt.nonzero()].mean()) + ". Elapsed time: " + str(
toc - tic) + " sec.")
self.w = self.w_opt.copy()
self.calc_dSdw()
print("Epoch loop end. Optimized sharp's ratio is " + str(self.S_opt[self.S_opt.nonzero()].mean()) + ".")
print('first check', self.Sall)
print('now check', self.epoch_S)
print('R dimension', self.R.shape)
def save_weight(self, train_phase=True):
if train_phase:
self.F1 = self.get_investment_weights()
pd.DataFrame(self.w).to_csv("w.csv", header=False, index=False)
self.epoch_S.to_csv("epoch_S.csv", header=False, index=False)
pd.DataFrame(self.F).to_csv("f.csv", header=False, index=False)
pd.DataFrame(self.FS).to_csv("fs.csv", header=False, index=False)
pd.DataFrame(self.F1).to_csv("f1.csv", header=False, index=False)
else:
self.F1 = self.get_investment_weights(train_phase=False)
pd.DataFrame().from_dict(self.ticker_data).T.to_csv('ticker_data.csv')
def load_weight(self):
tmp = pd.read_csv("w.csv", header=None)
self.w = tmp.T.values[0]
def get_investment_sum(self, train_phase=True):
firstR = np.zeros((1,self.p.shape[1]))
self.R = np.concatenate((firstR, self.R), axis=0)
tmp = np.multiply(self.R, self.F1)
self.total = self.mu * ((1+tmp.sum(axis=1)).cumprod(axis=0))
print('iam here', self.total.shape, self.total)
if train_phase:
pd.DataFrame(self.total).to_csv('investment_sum.csv')
else:
pd.DataFrame(self.total).to_csv('investment_sum_testphase.csv')
def main():
#fname = '../../util/stock_dfs/A.csv'
#fname = 'USDJPY30.csv'
bench = stage+'SPY.csv'
fname = stage1+'all_data_todate.csv'
all_t, all_p, tickers_list = load_csv_test(fname)
bench = load_bench(bench)
init_t = 1001 #1001
M = 200
thisM = 20
T = 1000
thisT = all_p.shape[0]-(init_t+T+M)-thisM
N = all_p.shape[1]
mu = 100#bench[init_t]
sigma = 0.04
rho = 1.0
n_epoch = 100
# RRL agent with initial weight.
ini_rrl = TradingRRL(T, thisT, M, thisM, N, init_t, mu, sigma, rho, n_epoch) ## TODO: init_t is really a change point!!!
ini_rrl.all_t = all_t
ini_rrl.all_p = all_p
ini_rrl.bench = bench
ini_rrl.tickers_list = tickers_list
ini_rrl.set_t_p_r()
ini_rrl.calc_dSdw()
# RRL agent for training
rrl = TradingRRL(T, thisT, M, thisM, N, init_t, mu, sigma, rho, n_epoch)
rrl.all_t = ini_rrl.all_t
rrl.all_p = ini_rrl.all_p
rrl.tickers_list = ini_rrl.tickers_list
rrl.set_t_p_r()
rrl.fit()
rrl.save_weight()
rrl.get_investment_sum()
# Plot results.
# Training for initial term T.
fig, ax = plt.subplots(nrows=2, figsize=(15, 10))
t = np.linspace(0, ini_rrl.bench.shape[0], ini_rrl.bench.shape[0])
print('x len', len(t[init_t:init_t + rrl.T+1]))
print('y len', rrl.total.shape[0])
print('x1 len', len(t[init_t:init_t + rrl.T + 1]))
print('y2 len', ini_rrl.bench.shape[0])
ax[0].plot(t[:ini_rrl.T], ini_rrl.bench[:ini_rrl.T], color='red', label='Benchmark: training phase')
ax[0].plot(t[ini_rrl.T:], ini_rrl.bench[ini_rrl.T:], color='purple', label='Benchmark: after training phase')
ax[0].set_xlabel("time")
ax[0].set_ylabel("SPY")
ax[0].grid(True)
ax[1].plot(t[:ini_rrl.T], ini_rrl.bench[:ini_rrl.T], color='red', label='Benchmark: before start of training')
ax[1].plot(t[ini_rrl.T:], ini_rrl.bench[ini_rrl.T:], color='orange',label='Benchmark: start training')
ax[1].plot(t[:rrl.T+1], rrl.total, color="blue", label="With optimized weights")
ax[1].set_xlabel("time")
ax[1].set_ylabel("Total Invested")
ax[1].legend(loc="best")
ax[1].grid(True)
plt.savefig("rrl_training.png", dpi=300)
# Prediction for next term T with optimized weight.
# RRL agent with initial weight.
ini_rrl_f = TradingRRL(T, thisT, M, thisM, N, init_t+T, mu, sigma, rho, n_epoch)
ini_rrl_f.all_t = ini_rrl.all_t
ini_rrl_f.all_p = ini_rrl.all_p
ini_rrl.tickers_list = ini_rrl.tickers_list
ini_rrl_f.set_t_p_r(train_phase=False)
ini_rrl_f.calc_dSdw(train_phase=False)
# RRL agent with optimized weight.
rrl_f = TradingRRL(T, thisT, M, thisM, N, init_t+T, mu, sigma, rho, n_epoch)
rrl_f.all_t = ini_rrl.all_t
rrl_f.all_p = ini_rrl.all_p
rrl_f.tickers_list = ini_rrl.tickers_list
rrl_f.set_t_p_r(train_phase=False)
rrl_f.w = rrl.w
rrl_f.calc_dSdw(train_phase=False)
rrl_f.save_weight(train_phase=False)
rrl_f.get_investment_sum(train_phase=False)
fig, ax = plt.subplots(nrows=2, figsize=(15, 10))
t = np.linspace(0, ini_rrl.bench.shape[0], ini_rrl.bench.shape[0])
print('what is this', ini_rrl.bench.shape)
print('len check', len(t))
print('check len1', len(t[:rrl_f.T]))
print('check len2', len(t[rrl_f.T:]))
print('check len3', len(ini_rrl.bench[:rrl_f.T]))
print('check len4', len(ini_rrl.bench[rrl_f.T:]))
ax[0].plot(t[:rrl_f.T], ini_rrl.bench[:rrl_f.T], color='red', label='Benchmark: training phase')
ax[0].plot(t[rrl_f.T:], ini_rrl.bench[rrl_f.T:], color='orange', label='Benchmark: post-training phase')
ax[0].set_xlabel("time")
ax[0].set_ylabel("SPY: benchmark")
ax[0].grid(True)
print('len check b', rrl.total.shape)
print('len check b1', rrl_f.total.shape)
ax[1].plot(t[:rrl_f.T], ini_rrl.bench[:rrl_f.T], color='red', label='Benchmark: training phase')
ax[1].plot(t[rrl_f.T:], ini_rrl.bench[rrl_f.T:], color='orange', label='Benchmark: post-training phase')
ax[1].plot(t[:rrl.total.shape[0]], rrl.total, color="blue", label="With optimized weights: before day 1000")
ax[1].plot(t[rrl.total.shape[0]:rrl.total.shape[0]+rrl_f.total.shape[0]], rrl_f.total, color="green", label="With optimized weights: before day 1000")
ax[1].set_xlabel("time")
ax[1].set_ylabel("Total Investment")
ax[1].legend(loc="best")
ax[1].grid(True)
plt.savefig("rrl_prediction.png", dpi=300)
fig.clear()
if __name__ == "__main__":
main() | StarcoderdataPython |
3501762 | <filename>{{cookiecutter.application_name}}/{{cookiecutter.application_name}}/factory.py
# -*- coding: utf-8 -*-
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
from {{cookiecutter.application_name}}.extensions import (
oauth
#add others as needed
)
# -*- coding: utf-8 -*-
'''The app module, containing the app factory function.'''
import os
from flask import Flask, render_template
from {{cookiecutter.application_name}}.extensions import (
oauth
#add others as needed
)
def create_app(config_filename):
''' An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
'''
app = Flask(__name__)
app.config.from_object(config_filename)
register_oauth(app)
register_errorhandlers(app)
register_blueprints(app)
return app
def register_oauth(app):
oauth.init_app(app)
#TODO maybe make the scope params more configurable?
registry = oauth.remote_app(
'registry',
consumer_key = app.config['REGISTRY_CONSUMER_KEY'],
consumer_secret = app.config['REGISTRY_CONSUMER_SECRET'],
request_token_params = {'scope': '{{cookiecutter.oauth_scopes}}' },
base_url = app.config['REGISTRY_BASE_URL'],
request_token_url = None,
access_token_method = 'POST',
access_token_url = '%s/oauth/token' % app.config['REGISTRY_BASE_URL'],
authorize_url = '%s/oauth/authorize' % app.config['REGISTRY_BASE_URL']
)
app.registry = registry
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_blueprints(app):
from {{cookiecutter.application_name}}.frontend.views import frontend
app.register_blueprint(frontend)
def register_extensions(app):
pass
| StarcoderdataPython |
1730341 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sugartensor as tf
num_blocks = 3 # dilated blocks
num_dim = 128 # latent dimension
#
# logit calculating graph using atrous/dilated convolution
#
def get_logit(x, voca_size):
# residual block
def res_block(tensor, size, rate, block, dim=num_dim):
with tf.sg_context(name='block_%d_%d' % (block, rate)):
# filter convolution
conv_filter = tensor.sg_aconv1d(size=size, rate=rate, act='tanh', bn=True, name='conv_filter')
# gate convolution
conv_gate = tensor.sg_aconv1d(size=size, rate=rate, act='sigmoid', bn=True, name='conv_gate')
# output by gate multiplying
out = conv_filter * conv_gate
# final output
out = out.sg_conv1d(size=1, dim=dim, act='tanh', bn=True, name='conv_out')
# residual and skip output
return out + tensor, out
# expand dimension
with tf.sg_context(name='front'):
z = x.sg_conv1d(size=1, dim=num_dim, act='tanh', bn=True, name='conv_in')
# dilated conv block loop
skip = 0 # skip connections
for i in range(num_blocks):
for r in [1, 2, 4, 8, 16]:
z, s = res_block(z, size=7, rate=r, block=i)
skip += s
# final logit layers
with tf.sg_context(name='logit'):
logit = (skip
.sg_conv1d(size=1, act='tanh', bn=True, name='conv_1')
.sg_conv1d(size=1, dim=voca_size, name='conv_2')
)
return logit
| StarcoderdataPython |
1968323 | # import os
# import cv2
# import numpy as np
#
# INPUT_VIDEO = 'test.mp4'
# OUTPUT_IMG = 'out_my_video'
# os.makedirs(OUTPUT_IMG, exist_ok=True)
#
#
# def print_image(img, frame_diff):
# """
# Place images side-by-side
# """
# new_img = np.zeros([img.shape[0], img.shape[1] * 2, img.shape[2]]) # [height, width*2, channel]
# new_img[:, :img.shape[1], :] = img # place color image on the left side
# new_img[:, img.shape[1]:, 0] = frame_diff # place gray image on the right side
# new_img[:, img.shape[1]:, 1] = frame_diff
# new_img[:, img.shape[1]:, 2] = frame_diff
# return new_img
#
#
# def main(video_path):
# cap = cv2.VideoCapture(video_path) # https://docs.opencv.org/4.0.0/d8/dfe/classcv_1_1VideoCapture.html
# last_gray = None
# idx = -1
# while (True):
# ret, frame = cap.read() # read frames
# idx += 1
# if not ret:
# print('Stopped reading the video (%s)' % video_path)
# break
#
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert color image to gray
#
# if last_gray is None:
# last_gray = gray
# continue
#
# diff = cv2.absdiff(gray,
# last_gray) # frame difference! https://docs.opencv.org/4.0.0/d2/de8/group__core__array.html#ga6fef31bc8c4071cbc114a758a2b79c14
# cv2.imwrite(os.path.join(OUTPUT_IMG, 'img_%06d.jpg' % idx), print_image(frame, diff))
# last_gray = gray
# print('Done image @ %d...' % idx)
# pass
# pass
#
#
# if __name__ == "__main__":
# print('Running frame difference algorithm on %s' % INPUT_VIDEO)
# main(video_path=INPUT_VIDEO)
# print('* Follow me @ ' + "\x1b[1;%dm" % (34) + ' https://www.facebook.com/minhng.info/' + "\x1b[0m")
# print('* Join GVGroup for discussion @ ' + "\x1b[1;%dm" % (
# 34) + 'https://www.facebook.com/groups/ip.gvgroup/' + "\x1b[0m")
# print('* Thank you ^^~')
#
# print('[NOTE] Run the following command to turn you images in to video:')
# print(
# 'ffmpeg -framerate 24 -f image2 -start_number 1 -i out_my_video/img_%*.jpg -crf 10 -q:v 5 -pix_fmt yuv420p out_video.mp4')
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
# parameters for loading data and images
detection_model_path = 'haarcascade/haarcascade_frontalface_default.xml'
# loading models
# load model facial_expression
model_facial_expression = model_from_json(open("model/fer.json", "r").read())
# load weights facial_expression
model_facial_expression.load_weights('model/fer.h5')
EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"]
face_detection = cv2.CascadeClassifier(detection_model_path)
path_video = "democlassroom.mp4"
video = cv2.VideoCapture(path_video)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = width, height
# Print out the resolution
print(repr(size))
# Set the number of frames and the background
FPS_SMOOTHING = 0.9
# ret, frame1 = video.read()
ret, frame2 = video.read()
frame1 = None
next_frame = 0
fps = 0.0
prev = time.time()
while video.isOpened():
status, color = "No Movement", (0, 255, 0)
no_movement_check = False
now = time.time()
fps = (fps * FPS_SMOOTHING + (1 / (now - prev)) * (1.0 - FPS_SMOOTHING))
print("fps: {:.1f}".format(fps))
ret, frame2 = video.read()
if frame2 is None:
break
if frame1 is None:
frame1 = frame2
difference = cv2.absdiff(frame1, frame2)
thresh = cv2.threshold(difference, 25, 255, cv2.THRESH_BINARY)[1]
gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, threshold = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilate = cv2.dilate(threshold, None, iterations=3)
contour, _ = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts= cv2.findContours(dilate.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
# for c in cnts:
# # if the contour is too small, ignore it
# # if cv2.contourArea(c) < args["min_area"]:
# # continue
# # compute the bounding box for the contour, draw it on the frame,
# # and update the text
# (x, y, w, h) = cv2.boundingRect(c)
# cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)
# status = "Occupied"
# no_movement_check = True
if cnts is not None:
status = "Occupied"
no_movement_check = True
if next_frame %2 == 0 and no_movement_check:
gray_face = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
faces = face_detection.detectMultiScale(gray_face, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
print("Kieru gì"+ str(type(faces)))
for (x, y, w, h) in faces:
if y+w >10 and x+h >10:
# cv2.rectangle(frame1, (x_f, y_f), (x_f + w_f, y_f + h_f), (255, 0, 0), 2)
roi = gray[y:y + h, x:x + w]
roi = cv2.resize(roi, (48, 48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = model_facial_expression.predict(roi)[0]
# emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
cv2.putText(frame1, label, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# cv2.drawContours(frame1, contour, -1, (0, 0, 255), 2)
cv2.putText(frame1, status, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)
cv2.putText(frame1, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame1.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
# cv2.putText(frame1, "Fps: " + str(difference), (7, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 255, 0), 1, cv2.LINE_AA)
cv2.imshow("image", frame1)
frame1 = frame2
next_frame +=1
if cv2.waitKey(40) == ord('q'):
break
video.release() | StarcoderdataPython |
3246871 | <filename>tests/plugins/test_liveme.py
from streamlink.plugins.liveme import LiveMe
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlLiveMe(PluginCanHandleUrl):
__plugin__ = LiveMe
should_match = [
"http://www.liveme.com/live.html?videoid=12312312312312312312",
"http://www.liveme.com/live.html?videoid=23123123123123123123&countryCode=undefined"
]
should_not_match = [
"http://www.liveme.com/",
"http://www.liveme.com/explore.html",
"http://www.liveme.com/media/play"
]
| StarcoderdataPython |
1701350 | import signal
import subprocess
import sys
import time
import atexit
import os
class DelayedKeyboardInterrupt(object):
def __enter__(self):
self.signal_received = False
self.old_handler = signal.signal(signal.SIGINT, self.handler)
def handler(self, sig, frame):
self.signal_received = (sig, frame)
# logging.debug('SIGINT received. Delaying KeyboardInterrupt.')
def __exit__(self, type, value, traceback):
signal.signal(signal.SIGINT, self.old_handler)
if self.signal_received:
self.old_handler(*self.signal_received)
class GPUEnergyEvaluator(object):
def __init__(self, subprocess_cmd=None, gpuid=0, watts_offset=False):
watts_idle = 0.0
if watts_offset:
# tic = time.time()
for _ in range(10):
watts_idle += float(subprocess.getoutput(
['nvidia-smi --id={} --format=csv,noheader --query-gpu=power.draw'.format(gpuid)])[:-1])
watts_idle /= 10.0
# print('watts_idle:{}, cost{}s'.format(watts_idle, time.time() - tic))
if subprocess_cmd is None:
self.subprocess_cmd = ['python3', os.path.realpath(__file__), str(gpuid), str(watts_idle)]
else:
self.subprocess_cmd = subprocess_cmd
self.p = None
def start(self):
self.p = subprocess.Popen(self.subprocess_cmd, stdout=subprocess.PIPE)
def end(self):
if self.p is not None:
# self.p.terminate()
self.p.send_signal(signal.SIGINT)
# self.p.terminate()
self.p.wait()
try:
energy = self.p.communicate()[0]
# energy = self.p.stdout.readline()
except subprocess.TimeoutExpired:
# ignore
pass
assert energy[-2:] == b"J\n"
return float(energy[:-2])
if __name__ == '__main__':
gpuid = int(sys.argv[1])
watts_idle = float(sys.argv[2])
cmdline = ['nvidia-smi --id={} --format=csv,noheader --query-gpu=power.draw'.format(gpuid)]
energy_used = 0.0
atexit.register(lambda: print("{}J".format(energy_used)))
int_lock = DelayedKeyboardInterrupt()
try:
time_a = time.time()
while True:
with int_lock:
cur_watts = float(subprocess.getoutput(cmdline)[:-1])
#time.sleep(0.001)
time_b = time.time()
energy_used += max(cur_watts - watts_idle, 0.0) * (time_b - time_a)
time_a = time_b
except KeyboardInterrupt:
# ignore
pass
| StarcoderdataPython |
9678872 | from __future__ import print_function
"""
Dec 23, 2014
to robertlugg
Hello Robert,
please accept my big excuses for distrubance but simply I did not found answer on google, no one to ask. I want to make
simple GUI program to make things easier (my job is microcontrollers and electronic). Even I`m not so young neither
familiar with modern programming languages (especially with OOP), I started to learn Python before few months and
I must admit, so far so good :) ....in console, of course. It is not easy, but somehow I make some progress in learning.
Now I am trying to make simple GUI utility which generate 8 byte long commands (then I pass it to another program and
look how projected electronic responding). But, I am not sure is it possible to do it with Easygui :(
When I enter all values I would like to have button "generate" on this form and then all values in range
(depending on value of offset, I add some value on some of bytes...I will write code later for it) and to print all
values in file (in binary form).
At this moment I do not understand few thing:
is it possible to limit length of every input on only for chars?
can chars be limited only to HEX chars (0-9 and A-F , I work only with HEX values)
for example sometimes I need to forbid entering Byte3 and Byte4 (because in some cases it is constants,
for example hex values BB....). How to show them in form, but with disabled entering?
I mean this byte already have values so I do not need to enter them.
Is it possible to put simple button on form (later will write code what program must to do when button is pressed)?
Please, can You explain me more about my questions...if it is possible, how, if not why not ?
Please, I`m trying to learn...so need to know how it works.
Please, excuse me for maybe stupid questions....but I simple do not know :-/, I must to ask someone.
Here is code (to see what I`m trying to make):
"""
__author__ = '<EMAIL> via sourceforge.com'
import re
import sys
sys.path.append('..')
from easygui import multenterbox
title = "HEX values generator"
fieldNames = ["Byte1 start", "Byte1 end", "Byte2 start", "Byte2 end", "Byte3 start", "Byte3 end", "Byte4 start", "Byte4 end", "OFFSET"]
fieldShouldEnter = [True]*len(fieldNames) # Assume all are required. You can modify this to specify which ones!
fieldStatuses = ['Required']*len(fieldNames)
fieldValues = ['']*len(fieldNames) # we start with blanks for the values
r = re.compile("^[0-9a-fA-F]{2,2}$") # Hex validation
while 1:
msg = list()
msg.append("Enter starting and ending values")
msg.append(" Field\t\tValue\tStatus")
for fieldName, fieldValue, fieldStatus in zip(fieldNames, fieldValues, fieldStatuses):
msg.append(" {0}\t{1}\t{2}".format(fieldName, fieldValue, fieldStatus))
msg_text = '\n'.join(msg)
previousFieldValues = fieldValues[:] # Save values just in case user typed them incorrectly
fieldValues = multenterbox(msg_text, title, fieldNames, fieldValues)
if fieldValues is None:
break # User hit Cancel button
# Clean fieldValues list
temp = list()
for fieldValue in fieldValues:
temp.append(fieldValue.strip())
fieldValues = temp
# Validate entries
for i, fieldValue in enumerate(fieldValues):
# If left empty, require reentry
if not len(fieldValue):
fieldStatuses[i] = 'ERROR. Required.'
continue
# If length is not exactly 2, re-enter:
if len(fieldValue) != 2:
fieldStatuses[i] = 'ERROR. Must be exactly 2 chars'
continue
if not r.match(fieldValue):
fieldStatuses[i] = 'ERROR. Must be a HEX number'
continue
if not fieldShouldEnter:
fieldValues[i] = previousFieldValues # Always restore "READ ONLY" fields to their default
fieldStatuses[i] = 'OK.' # All checks passed
if all([status == 'OK.' for status in fieldStatuses]):
break # no problems found, all statuses are 'OK'
print("Reply was:{}".format(fieldValues)) | StarcoderdataPython |
8048740 | class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
maxIncLength = 1
lengthCounter = 1
for i in range(1,len(nums)):
if nums[i] > nums[i-1]:
lengthCounter += 1
if lengthCounter > maxIncLength:
maxIncLength = lengthCounter
else:
lengthCounter = 1
return maxIncLength | StarcoderdataPython |
8197711 | # -*- coding: utf-8 -*-
import datetime
from unittest import skipIf
from django.test import TestCase, override_settings
from django.core import mail
from django.core.management import call_command
from django.utils import timezone
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from haystack.query import SearchQuerySet
from . import utils as test_utils
from spirit.core import tasks
from spirit.core.tests.models import TaskResultModel
from spirit.core.storage import spirit_storage
try:
@tasks.task_manager('celery')
def celery_task(s):
TaskResultModel.objects.create(result=s)
except ImportError:
celery_task = None
try:
@tasks.task_manager('huey')
def huey_task(s):
TaskResultModel.objects.create(result=s)
except ImportError:
huey_task = None
@tasks.task_manager(None)
def none_task(s):
TaskResultModel.objects.create(result=s)
try:
_periodic_task = tasks.periodic_task_manager('celery')
@_periodic_task(hour=10)
def celery_periodic_task(s):
TaskResultModel.objects.create(result=s)
except ImportError:
celery_periodic_task = None
try:
_periodic_task = tasks.periodic_task_manager('huey')
@_periodic_task(hour=10)
def huey_periodic_task(s):
TaskResultModel.objects.create(result=s)
except ImportError:
huey_periodic_task = None
_periodic_task = tasks.periodic_task_manager(None)
@_periodic_task(hour=10)
def none_periodic_task(s):
TaskResultModel.objects.create(result=s)
def rebuild_index():
call_command("rebuild_index", verbosity=0, interactive=False)
class TasksTests(TestCase):
@test_utils.immediate_on_commit
def test_task_manager_none(self):
none_task('none')
self.assertEqual(
TaskResultModel.objects.last().result, 'none')
@skipIf(celery_task is None, "Celery is not installed")
@test_utils.immediate_on_commit
def test_task_manager_celery(self):
celery_task('celery')
self.assertEqual(
TaskResultModel.objects.last().result, 'celery')
@skipIf(huey_task is None, "Huey is not installed")
@test_utils.immediate_on_commit
def test_task_manager_huey(self):
huey_task('huey')
self.assertEqual(
TaskResultModel.objects.last().result, 'huey')
@override_settings(DEFAULT_FROM_EMAIL="<EMAIL>")
@test_utils.immediate_on_commit
def test_send_email(self):
tasks.send_email(
subject="foobar_sub",
message="foobar_msg",
recipients=['<EMAIL>', '<EMAIL>'])
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "foobar_sub")
self.assertEqual(mail.outbox[1].subject, "foobar_sub")
self.assertEqual(mail.outbox[0].from_email, "<EMAIL>")
self.assertEqual(mail.outbox[1].from_email, "<EMAIL>")
self.assertEqual(mail.outbox[0].body, "foobar_msg")
self.assertEqual(mail.outbox[1].body, "foobar_msg")
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
self.assertEqual(mail.outbox[1].to, ['<EMAIL>'])
@override_settings(ST_TASK_MANAGER='test')
@test_utils.immediate_on_commit
def test_search_index_update(self):
rebuild_index()
topic = test_utils.create_topic(test_utils.create_category())
tasks.search_index_update(topic.pk)
sq = SearchQuerySet().models(topic.__class__)
self.assertEqual([s.object for s in sq], [topic])
@override_settings(ST_TASK_MANAGER=None)
@test_utils.immediate_on_commit
def test_search_index_update_no_task_manager(self):
rebuild_index()
topic = test_utils.create_topic(test_utils.create_category())
tasks.search_index_update(topic.pk)
sq = SearchQuerySet().models(topic.__class__)
self.assertEqual([s.object for s in sq], [])
@test_utils.immediate_on_commit
def test_none_periodic_task(self):
none_periodic_task('none')
self.assertEqual(
TaskResultModel.objects.last().result, 'none')
@skipIf(celery_periodic_task is None, "Celery is not installed")
@test_utils.immediate_on_commit
def test_celery_periodic_task(self):
celery_periodic_task('celery')
self.assertEqual(
TaskResultModel.objects.last().result, 'celery')
@skipIf(huey_periodic_task is None, "Huey is not installed")
@test_utils.immediate_on_commit
def test_huey_periodic_task(self):
huey_periodic_task('huey')
self.assertEqual(
TaskResultModel.objects.last().result, 'huey')
@test_utils.immediate_on_commit
def test_full_search_index_update(self):
rebuild_index()
at_date = timezone.now() - datetime.timedelta(days=99)
test_utils.create_topic(
category=test_utils.create_category(reindex_at=at_date),
last_active=at_date,
reindex_at=at_date)
topic = test_utils.create_topic(test_utils.create_category())
tasks.full_search_index_update()
sq = SearchQuerySet().models(topic.__class__)
self.assertEqual([s.object for s in sq], [topic])
@test_utils.with_test_storage
@test_utils.immediate_on_commit
@override_settings(ST_ALLOWED_AVATAR_FORMAT=('gif',))
def test_make_avatars(self):
test_utils.clean_media()
content = (
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
file = SimpleUploadedFile(
'foo.gif', content=content, content_type='image/gif')
user = test_utils.create_user()
user.st.avatar = file
user.st.save()
self.assertTrue(spirit_storage.exists(user.st.avatar.name))
tasks.make_avatars(user.pk)
# original image is deleted
self.assertFalse(spirit_storage.exists(user.st.avatar.name))
user.refresh_from_db()
self.assertTrue(spirit_storage.exists(user.st.avatar.name))
self.assertEqual(
user.st.avatar.name,
'spirit/avatars/{}/pic_test.jpg'.format(user.pk))
self.assertTrue(spirit_storage.exists(
'spirit/avatars/{}/pic_test_small_test.jpg'.format(user.pk)))
@test_utils.immediate_on_commit
@override_settings(
ST_TASK_MANAGER='tests',
ST_SITE_URL='https://tests.com/',
DEFAULT_FROM_EMAIL='<EMAIL>')
def test_notify_reply(self):
user1 = test_utils.create_user()
user2 = test_utils.create_user()
user3 = test_utils.create_user()
user1.st.notify = user1.st.Notify.IMMEDIATELY | user1.st.Notify.REPLY
user1.st.save()
user2.st.notify = (
user1.st.Notify.IMMEDIATELY |
user1.st.Notify.REPLY |
user1.st.Notify.MENTION)
user2.st.save()
user3.st.notify = user3.st.Notify.IMMEDIATELY | user3.st.Notify.REPLY
user3.st.save()
comment = test_utils.create_comment()
comment.user.st.notify = user1.st.notify
comment.user.st.save()
test_utils.create_notification(
comment, user1, is_read=False, action='reply')
test_utils.create_notification(
user=user1, is_read=False, action='reply')
test_utils.create_notification(
comment, user2, is_read=False, action='reply')
test_utils.create_notification(
comment, user3, is_read=True, action='reply')
test_utils.create_notification(
comment, comment.user, is_read=False, action='reply')
test_utils.create_notification(
user=user3, is_read=False, action='reply')
test_utils.create_notification(is_read=True, action='reply')
test_utils.create_notification(is_read=False, action='reply')
test_utils.create_notification(
comment, is_read=False, action='mention')
test_utils.create_notification(
comment, is_read=False, action=None)
test_utils.create_notification(
comment, is_read=False, action='reply', is_active=False)
test_utils.create_notification(
comment, is_read=True, action='reply', is_active=False)
user4 = test_utils.create_user()
user5 = test_utils.create_user()
user4.st.notify = user4.st.Notify.WEEKLY | user4.st.Notify.REPLY
user4.st.save()
user5.st.notify = (
user5.st.Notify.NEVER |
user5.st.Notify.REPLY |
user5.st.Notify.MENTION)
user5.st.save()
test_utils.create_notification(
comment, user4, is_read=False, action='reply')
test_utils.create_notification(
comment, user5, is_read=False, action='reply')
user6 = test_utils.create_user()
user6.st.notify = user1.st.notify
user6.st.save()
user7 = test_utils.create_user()
user7.st.notify = user1.st.notify
user7.st.save()
user8 = test_utils.create_user()
user8.st.notify = user1.st.notify
user8.st.save()
test_utils.create_notification(
comment, user6, is_read=False, action='reply', is_active=False)
test_utils.create_notification(
comment, user7, is_read=False, action='mention')
test_utils.create_notification(
comment, user8, is_read=False, action=None)
user9 = test_utils.create_user()
user9.st.notify = user1.st.notify
user9.st.save()
comment2 = test_utils.create_comment(topic=comment.topic)
test_utils.create_notification(
comment2, user9, is_read=False, action='reply')
tasks.notify_reply(comment_id=comment.pk)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject, "{user} commented on {topic}".format(
user=comment.user.st.nickname, topic=comment.topic.title))
self.assertEqual(
mail.outbox[1].subject, "{user} commented on {topic}".format(
user=comment.user.st.nickname, topic=comment.topic.title))
self.assertEqual(mail.outbox[0].from_email, '<EMAIL>')
self.assertEqual(mail.outbox[1].from_email, '<EMAIL>')
self.assertIn(
'https://tests.com' + comment.get_absolute_url(),
mail.outbox[0].body)
self.assertIn(
'https://tests.com' + comment.get_absolute_url(),
mail.outbox[1].body)
self.assertEqual(mail.outbox[0].to, [user2.email])
self.assertEqual(mail.outbox[1].to, [user1.email])
@test_utils.immediate_on_commit
@override_settings(
ST_SITE_URL='https://tests.com/',
DEFAULT_FROM_EMAIL='<EMAIL>')
def test_notify_reply_no_tm(self):
user1 = test_utils.create_user()
user1.st.notify = user1.st.Notify.IMMEDIATELY | user1.st.Notify.REPLY
user1.st.save()
comment = test_utils.create_comment()
test_utils.create_notification(
comment, user1, is_read=False, action='reply')
with override_settings(ST_TASK_MANAGER=None):
tasks.notify_reply(comment_id=comment.pk)
self.assertEqual(len(mail.outbox), 0)
with override_settings(ST_TASK_MANAGER='test'):
tasks.notify_reply(comment_id=comment.pk)
self.assertEqual(len(mail.outbox), 1)
@test_utils.immediate_on_commit
@override_settings(
ST_TASK_MANAGER='tests',
ST_SITE_URL='https://tests.com/',
DEFAULT_FROM_EMAIL='<EMAIL>')
def test_notify_mention(self):
user1 = test_utils.create_user()
user2 = test_utils.create_user()
user3 = test_utils.create_user()
user1.st.notify = user1.st.Notify.IMMEDIATELY | user1.st.Notify.MENTION
user1.st.save()
user2.st.notify = (
user1.st.Notify.IMMEDIATELY |
user1.st.Notify.REPLY |
user1.st.Notify.MENTION)
user2.st.save()
user3.st.notify = user1.st.notify
user3.st.save()
comment = test_utils.create_comment()
comment.user.st.notify = user1.st.notify
comment.user.st.save()
test_utils.create_notification(
comment, user1, is_read=False, action='mention')
test_utils.create_notification(
user=user1, is_read=False, action='mention')
test_utils.create_notification(
comment, user2, is_read=False, action='mention')
test_utils.create_notification(
comment, user3, is_read=True, action='mention')
test_utils.create_notification(
comment, comment.user, is_read=False, action='mention')
test_utils.create_notification(
user=user3, is_read=False, action='mention')
test_utils.create_notification(is_read=True, action='mention')
test_utils.create_notification(is_read=False, action='mention')
test_utils.create_notification(
comment, is_read=False, action='reply')
test_utils.create_notification(
comment, is_read=False, action=None)
test_utils.create_notification(
comment, is_read=False, action='mention', is_active=False)
test_utils.create_notification(
comment, is_read=True, action='mention', is_active=False)
user4 = test_utils.create_user()
user5 = test_utils.create_user()
user4.st.notify = user4.st.Notify.WEEKLY | user4.st.Notify.MENTION
user4.st.save()
user5.st.notify = (
user5.st.Notify.NEVER |
user5.st.Notify.REPLY |
user5.st.Notify.MENTION)
user5.st.save()
test_utils.create_notification(
comment, user4, is_read=False, action='mention')
test_utils.create_notification(
comment, user5, is_read=False, action='mention')
user6 = test_utils.create_user()
user6.st.notify = user1.st.notify
user6.st.save()
user7 = test_utils.create_user()
user7.st.notify = user1.st.notify
user7.st.save()
user8 = test_utils.create_user()
user8.st.notify = user1.st.notify
user8.st.save()
test_utils.create_notification(
comment, user6, is_read=False, action='mention', is_active=False)
test_utils.create_notification(
comment, user7, is_read=False, action='reply')
test_utils.create_notification(
comment, user8, is_read=False, action=None)
user9 = test_utils.create_user()
user9.st.notify = user1.st.notify
user9.st.save()
comment2 = test_utils.create_comment(topic=comment.topic)
test_utils.create_notification(
comment2, user9, is_read=False, action='mention')
tasks.notify_mention(comment_id=comment.pk)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject, "{user} mention you on {topic}".format(
user=comment.user.st.nickname, topic=comment.topic.title))
self.assertEqual(
mail.outbox[1].subject, "{user} mention you on {topic}".format(
user=comment.user.st.nickname, topic=comment.topic.title))
self.assertEqual(mail.outbox[0].from_email, '<EMAIL>')
self.assertEqual(mail.outbox[1].from_email, '<EMAIL>')
self.assertIn(
'https://tests.com' + comment.get_absolute_url(),
mail.outbox[0].body)
self.assertIn(
'https://tests.com' + comment.get_absolute_url(),
mail.outbox[1].body)
self.assertEqual(mail.outbox[0].to, [user2.email])
self.assertEqual(mail.outbox[1].to, [user1.email])
@test_utils.immediate_on_commit
@override_settings(
ST_TASK_MANAGER='tests',
ST_SITE_URL='https://tests.com/',
DEFAULT_FROM_EMAIL='<EMAIL>')
def test_notify_weekly(self):
user1 = test_utils.create_user()
user2 = test_utils.create_user()
user3 = test_utils.create_user()
user4 = test_utils.create_user()
user5 = test_utils.create_user()
user1.st.notify = user1.st.Notify.WEEKLY | user1.st.Notify.MENTION
user1.st.save()
user2.st.notify = (
user1.st.Notify.WEEKLY |
user1.st.Notify.REPLY |
user1.st.Notify.MENTION)
user2.st.save()
user3.st.notify = user3.st.Notify.WEEKLY | user3.st.Notify.REPLY
user3.st.save()
user4.st.notify = (
user4.st.Notify.WEEKLY |
user4.st.Notify.REPLY |
user4.st.Notify.MENTION)
user4.st.save()
user5.st.notify = (
user5.st.Notify.WEEKLY |
user5.st.Notify.REPLY |
user5.st.Notify.MENTION)
user5.st.save()
test_utils.create_notification(
user=user1, is_read=False, action='mention')
test_utils.create_notification(
user=user2, is_read=False, action='mention')
test_utils.create_notification(
user=user2, is_read=False, action='mention')
test_utils.create_notification(
user=user3, is_read=False, action='reply')
test_utils.create_notification(
user=user4, is_read=False, action='reply')
test_utils.create_notification(
user=user5, is_read=False, action='reply')
test_utils.create_notification(
user=user5, is_read=False, action='mention')
test_utils.create_notification(
user=user5, is_read=False, action='mention')
test_utils.create_notification(is_read=True, action='mention')
test_utils.create_notification(is_read=False, action='mention')
test_utils.create_notification(is_read=False, action='reply')
test_utils.create_notification(is_read=False, action=None)
test_utils.create_notification(
is_read=False, action='mention', is_active=False)
test_utils.create_notification(
is_read=True, action='mention', is_active=False)
comment2 = test_utils.create_comment()
test_utils.create_notification(
comment2, user1, is_read=False, action='mention')
test_utils.create_notification(
comment2, user2, is_read=False, action='mention')
user6 = test_utils.create_user()
user6.st.notify = user1.st.notify
user6.st.save()
test_utils.create_notification(
user=user6, is_read=False, action='mention', is_active=False)
test_utils.create_notification(
user=user6, is_read=False, action='reply', is_active=False)
test_utils.create_notification(
user=user6, is_read=False, action='reply')
test_utils.create_notification(
user=user6, is_read=True, action='reply')
test_utils.create_notification(
user=user6, is_read=True, action='mention')
test_utils.create_notification(
user=user6, is_read=False, action=None)
user7 = test_utils.create_user()
user7.st.notify = (
user7.st.Notify.IMMEDIATELY |
user7.st.Notify.REPLY |
user7.st.Notify.MENTION)
user7.st.save()
test_utils.create_notification(
user=user7, is_read=False, action='reply')
test_utils.create_notification(
user=user7, is_read=True, action='reply')
tasks.notify_weekly()
self.assertEqual(len(mail.outbox), 5)
self.assertEqual(mail.outbox[0].subject, 'New notifications')
self.assertEqual(mail.outbox[1].subject, 'New notifications')
self.assertEqual(mail.outbox[0].from_email, '<EMAIL>')
self.assertEqual(mail.outbox[1].from_email, '<EMAIL>')
self.assertIn(
'https://tests.com' + reverse('spirit:topic:notification:index'),
mail.outbox[0].body)
self.assertIn(
'https://tests.com' + reverse('spirit:topic:notification:index'),
mail.outbox[1].body)
self.assertEqual(mail.outbox[0].to, [user5.email])
self.assertEqual(mail.outbox[1].to, [user4.email])
self.assertEqual(mail.outbox[2].to, [user3.email])
self.assertEqual(mail.outbox[3].to, [user2.email])
self.assertEqual(mail.outbox[4].to, [user1.email])
| StarcoderdataPython |
98956 | <filename>src/toja/tasks/search.py
import dramatiq
import hashlib
from sqlalchemy import and_
from .middleware import DBSessionMiddleware
from ..config import SOURCE_METADATA, JOKE_METADATA, ANNOTATIONS
from ..models import Image
from ..search import Joke, Autosuggest
from ..util import extract_text, extract_annotations
@dramatiq.actor()
def index_all():
"""Index all jokes that have a final :class:`~toja.models.transcription.Transcription`.
This is a dramatiq Actor, so can be run in the background.
"""
dbsession = DBSessionMiddleware.dbsession()
for joke in dbsession.query(Image).filter(Image.type == 'joke'):
index_joke(joke.id)
@dramatiq.actor()
def index_joke(jid):
"""Index a single joke :class:`~toja.models.image.Image` with the id `jid`.
This is a dramatiq Actor, so can be run in the background.
"""
dbsession = DBSessionMiddleware.dbsession()
db_joke = dbsession.query(Image).filter((and_(Image.id == jid,
Image.type == 'joke',
Image.status == 'final'))).first()
if db_joke and 'text' in db_joke.attributes:
joke = Joke(text=extract_text(db_joke.attributes['text']),
meta={'id': db_joke.id})
for field in SOURCE_METADATA:
value = db_joke.attribute('source.{0}'.format(field['name']))
if value:
if field['type'] == 'date':
if len(value.split('-')) < 3:
value = '{0}{1}'.format(value, '-01' * (3 - len(value.split('-'))))
joke[field['name']] = value
for field in JOKE_METADATA:
if field['name'] in db_joke.attributes:
joke[field['name']] = db_joke.attributes[field['name']]
if field['type'] == 'multitext':
# Index auto-suggestion values
for value in db_joke.attributes[field['name']]:
m = hashlib.sha256()
m.update('{0}-{1}'.format(field['name'], value).encode('utf-8'))
autosuggest = Autosuggest(category=field['name'],
value=value,
value_suggests=value,
meta={'id': m.hexdigest()})
autosuggest.save()
for annotation in ANNOTATIONS:
if 'attrs' in annotation:
annotations = extract_annotations(db_joke.attributes['text'], annotation['name'])
for field in annotation['attrs']:
for entry in annotations:
if 'marks' in entry:
for mark in entry['marks']:
if 'attrs' in mark and 'settings' in mark['attrs'] and \
field['name'] in mark['attrs']['settings']:
if field['type'] == 'multitext':
for value in mark['attrs']['settings'][field['name']]:
m = hashlib.sha256()
m.update('{0}-{1}'.format(field['name'], value).encode('utf-8'))
autosuggest = Autosuggest(category=field['name'],
value=value,
value_suggests=value,
meta={'id': m.hexdigest()})
autosuggest.save()
if field['type'] == 'singletext':
value = mark['attrs']['settings'][field['name']]
m = hashlib.sha256()
m.update('{0}-{1}'.format(field['name'], value).encode('utf-8'))
autosuggest = Autosuggest(category=field['name'],
value=value,
value_suggests=value,
meta={'id': m.hexdigest()})
autosuggest.save()
joke.save()
else:
try:
db_joke = Joke.get(id=jid)
db_joke.delete()
except Exception:
pass
| StarcoderdataPython |
3466618 | import itertools
from pathlib import PosixPath
from typing import Generator, List
import ants
import numpy as np
import torch
import torchio as tio
from omegaconf.dictconfig import DictConfig
from rich.progress import track
from hsf.augmentation import get_augmented_subject
from hsf.engines import InferenceEngine
def mri_to_subject(mri: PosixPath) -> tio.Subject:
"""
Loads the MRI data from the given path and returns the preprocessed subject.
Args:
mri (PosixPath): Path to the MRI data.
second_mri (Optional[PosixPath]): Path to the second MRI data.
Returns:
tio.Subject: The preprocessed MRI data.
"""
subject = tio.Subject(mri=tio.ScalarImage(mri))
preprocessing_pipeline = tio.Compose([
# tio.ToCanonical(),
tio.ZNormalization(),
tio.EnsureShapeMultiple(8),
])
return preprocessing_pipeline(subject)
def to_ca_mode(logits: torch.Tensor, ca_mode: str = "1/2/3") -> torch.Tensor:
"""
Converts the logits to the ca_mode.
Args:
logits (torch.Tensor): The logits.
ca_mode (str): The cornu ammoni division mode.
Returns:
torch.Tensor: The corrected logits.
"""
# 0: bg, 1: dg, 2: ca1, 3: ca2, 4: ca3, 5: sub
if not ca_mode:
# Whole hippocampus
_pre = logits[:, :1, :, :, :]
_in = torch.sum(logits[:, 1:, :, :, :], dim=1, keepdim=True)
return torch.cat([_pre, _in], dim=1)
elif ca_mode == "1/2/3":
# identity
return logits
elif ca_mode == "1/23":
# ca1; ca2+ca3
_pre = logits[:, :3, :, :, :]
_in = logits[:, 3:4, :, :, :] + logits[:, 4:5, :, :, :]
_post = logits[:, 5:, :, :, :]
return torch.cat([_pre, _in, _post], dim=1)
elif ca_mode == "123":
# ca1+ca2+ca3
_pre = logits[:, :2, :, :, :]
_in = logits[:,
2:3, :, :, :] + logits[:,
3:4, :, :, :] + logits[:,
4:5, :, :, :]
_post = logits[:, 5:, :, :, :]
return torch.cat([_pre, _in, _post], dim=1)
else:
raise ValueError(
f"Unknown `ca_mode` ({ca_mode}). `ca_mode` must be 1/2/3, 1/23 or 123"
)
def predict(mris: list,
engine: InferenceEngine,
ca_mode: str = "1/2/3") -> torch.Tensor:
"""
Predict a segmentation from a subject.
Args:
mris (List[tio.Subject]): List of loaded torchio mris.
engine (InferenceEngine): HSF's Inference Engine.
ca_mode (str, optional): The cornu ammoni division mode.
Defaults to "1/2/3".
Returns:
torch.Tensor: Segmentations.
"""
inp = np.stack([mri.mri.data.numpy() for mri in mris])
logits = engine(inp)
logits = to_ca_mode(torch.tensor(logits[0]), ca_mode)
results = []
for lab, aug in zip(logits, mris):
lm_temp = tio.LabelMap(tensor=torch.rand(1, 1, 1, 1),
affine=aug.mri.affine)
aug.add_image(lm_temp, 'label')
aug.label.set_data(lab)
back = aug.apply_inverse_transform(warn=True)
results.append(back.label.data)
return results
def segment(subjects: List[tio.Subject],
augmentation_cfg: DictConfig,
segmentation_cfg: DictConfig,
n_engines: int,
engines: Generator,
ca_mode: str = "1/2/3",
batch_size: int = 1) -> tuple:
"""
Segments the given subject.
Args:
subjects (List): List of the subject to segment.
augmentation_cfg (DictConfig): Augmentation configuration.
segmentation_cfg (DictConfig): Segmentation configuration.
engines (Generator[InferenceEngine]): Inference Engines.
ca_mode (str): The cornu ammoni division mode. Defaults to "1/2/3".
batch_size (int): Batch size. Defaults to 1.
Returns:
torch.Tensor: The segmented subject.
"""
subjects = list(
itertools.chain(*[
get_augmented_subject(subject, augmentation_cfg, segmentation_cfg)
for subject in subjects
]))
batched_subjects = [
subjects[x:x + batch_size] for x in range(0, len(subjects), batch_size)
]
results = []
n = 0
for engine in engines:
n += 1
for sub in track(
batched_subjects,
description=
f"Segmenting (TTA: {len(subjects)} | MODEL {n}/{n_engines})..."
):
results.extend(predict(sub, engine, ca_mode))
soft_predictions = torch.stack(results, dim=0)
hard_prediction = soft_predictions.argmax(dim=1).long().mode(dim=0).values
return soft_predictions, hard_prediction
def save_prediction(mri: PosixPath,
prediction: torch.Tensor,
suffix: str = "seg",
astype: str = "uint8") -> ants.ANTsImage:
"""
Saves the prediction to the given path.
Args:
mri (PosixPath): Path to the MRI data.
prediction (torch.Tensor): The prediction.
suffix (str): The suffix of the output file.
astype (str): The type of the output file.
Returns:
ants.ANTsImage: The predicted segmentation.
"""
raw_img = ants.image_read(str(mri))
array = prediction.numpy() * 1.
raw_segmentation = raw_img.new_image_like(array.squeeze())
extensions = "".join(mri.suffixes)
fname = mri.name.replace(extensions, "") + "_" + suffix + ".nii.gz"
output_path = mri.parent / fname
segmentation = raw_segmentation.astype(astype)
ants.image_write(segmentation, str(output_path))
return segmentation
| StarcoderdataPython |
11344551 | <gh_stars>0
import tensorflow as tf
import numpy as np
from skimage import filters, transform
import matplotlib.pyplot as plt
import math
from setup_cifar import CIFAR, CIFARModel
from setup_mnist import MNIST, MNISTModel
def eval(label, arr):
print(label + " count ", arr.shape[0], " max ", np.max(arr), " min ", np.min(arr), " mean ", np.mean(arr), " var ",
np.var(arr), " median ", np.median(arr))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)
def compare_result(tag, result):
softmax_scores = np.max(softmax(result), axis=1)
amr = np.argmax(result, axis=1)
aml = np.argmax(mnist.test_labels, axis=1)
# print(amr)
# print(aml)
wrong_indices = (amr != aml)
right_indices = ~wrong_indices
print("acc = %f" % (1 - np.sum(wrong_indices) / aml.shape[0]))
right_softmax_scores = softmax_scores[right_indices]
wrong_softmax_scores = softmax_scores[wrong_indices]
eval(tag + ' right', right_softmax_scores)
eval(tag + ' wrong', wrong_softmax_scores)
def show_images(images):
width = height = int(math.ceil(math.sqrt(images.shape[0])))
fig = plt.figure()
plt.subplots_adjust(wspace=1, hspace=1)
for i in range(height):
for j in range(width):
idx = (i * width) + j
if idx >= images.shape[0]:
plt.show()
return
img = images[idx].reshape([28, 28])
posp = fig.add_subplot(height, width, (i * width) + j + 1)
posp.imshow(img, cmap=plt.cm.gray)
plt.show()
# 迁移到新的闭源工程!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if __name__ == "__main__":
with tf.Session() as sess:
model = MNISTModel("models/mnist", sess)
mnist = MNIST()
np.random.seed(1234)
k = 0
params = []
for i in range(30):
print('it ' + str(i))
tfparam = np.array([[1.1, 0., 0.], [0., 1.1, 0.], [0., 0., 1.]])
tfseed = (np.random.rand(3, 3) - 0.5) * np.array([[0.2, 0.2, 6], [0.2, 0.2, 6], [0, 0, 0]])
print(tfseed)
tfparam += tfseed
tform = transform.AffineTransform(tfparam)
print(tfparam)
# variant_mnist = [transform.warp(x + 0.5, tform) for x in mnist.test_data[:10, :, :, :]]
variant_mnist = [transform.warp(x + 0.5, tform) for x in mnist.test_data]
variant_mnist = np.reshape(variant_mnist, (10000, 28, 28, 1)) - 0.5
mnist_test_result = model.model.predict(variant_mnist)
amr = np.argmax(mnist_test_result, axis=1)
aml = np.argmax(mnist.test_labels, axis=1)
wrong_indices = (amr != aml)
right_indices = ~wrong_indices
acc = (1 - np.sum(wrong_indices) / aml.shape[0])
print("acc = %f" % acc)
if acc > 0.95:
print('save #%d' % i)
params.append(tfparam)
np.save('exp_affine_in_%d.npy' % k, mnist_test_result)
print(mnist_test_result.shape)
k += 1
if k >= 10:
break
params = np.array(params)
print(params.shape)
np.save('tf_ood_params.npy', params)
# eval('mnist_test_result', mnist_test_result)
# compare_result('mnist', mnist_test_result)
#
# print(np.argmax(mnist_test_result[:10], axis=1))
#
# show_images(variant_mnist[:10, :, :, :])
| StarcoderdataPython |
9658870 | <reponame>vponomaryov/manila<filename>manila_tempest_tests/tests/api/test_share_groups.py
# Copyright 2016 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base
CONF = config.CONF
@testtools.skipUnless(
CONF.share.run_share_group_tests, 'Share Group tests disabled.')
@base.skip_if_microversion_lt(constants.MIN_SHARE_GROUP_MICROVERSION)
class ShareGroupsTest(base.BaseSharesTest):
"""Covers share group functionality."""
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_create_populate_delete_share_group_min(self):
# Create a share group
share_group = self.create_share_group(
cleanup_in_class=False,
version=constants.MIN_SHARE_GROUP_MICROVERSION)
keys = set(share_group.keys())
self.assertTrue(
constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS.issubset(keys),
'At least one expected element missing from share group '
'response. Expected %(expected)s, got %(actual)s.' % {
"expected": constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS,
"actual": keys}
)
# Populate
share = self.create_share(
share_group_id=share_group['id'],
cleanup_in_class=False,
version=constants.MIN_SHARE_GROUP_MICROVERSION,
experimental=True)
# Delete
params = {"share_group_id": share_group['id']}
self.shares_v2_client.delete_share(
share['id'],
params=params,
version=constants.MIN_SHARE_GROUP_MICROVERSION)
self.shares_client.wait_for_resource_deletion(share_id=share['id'])
self.shares_v2_client.delete_share_group(
share_group['id'], version=constants.MIN_SHARE_GROUP_MICROVERSION)
self.shares_v2_client.wait_for_resource_deletion(
share_group_id=share_group['id'])
# Verify
self.assertRaises(
lib_exc.NotFound,
self.shares_v2_client.get_share_group, share_group['id'])
self.assertRaises(
lib_exc.NotFound, self.shares_client.get_share, share['id'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_create_delete_empty_share_group_snapshot_min(self):
# Create base share group
share_group = self.create_share_group(
cleanup_in_class=False,
version=constants.MIN_SHARE_GROUP_MICROVERSION)
# Create share group snapshot
sg_snapshot = self.create_share_group_snapshot_wait_for_active(
share_group["id"],
cleanup_in_class=False,
version=constants.MIN_SHARE_GROUP_MICROVERSION)
keys = set(sg_snapshot.keys())
self.assertTrue(
constants.SHARE_GROUP_SNAPSHOT_DETAIL_REQUIRED_KEYS.issubset(keys),
'At least one expected element missing from share group snapshot '
'response. Expected %(e)s, got %(a)s.' % {
"e": constants.SHARE_GROUP_SNAPSHOT_DETAIL_REQUIRED_KEYS,
"a": keys})
sg_snapshot_members = sg_snapshot['members']
self.assertEmpty(
sg_snapshot_members,
'Expected 0 share_group_snapshot members, got %s' % len(
sg_snapshot_members))
# Delete snapshot
self.shares_v2_client.delete_share_group_snapshot(
sg_snapshot["id"], version=constants.MIN_SHARE_GROUP_MICROVERSION)
self.shares_v2_client.wait_for_resource_deletion(
share_group_snapshot_id=sg_snapshot["id"])
self.assertRaises(
lib_exc.NotFound,
self.shares_v2_client.get_share_group_snapshot,
sg_snapshot['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_create_share_group_from_empty_share_group_snapshot_min(self):
# Create base share group
share_group = self.create_share_group(
cleanup_in_class=False,
version=constants.MIN_SHARE_GROUP_MICROVERSION)
# Create share group snapshot
sg_snapshot = self.create_share_group_snapshot_wait_for_active(
share_group["id"], cleanup_in_class=False,
version=constants.MIN_SHARE_GROUP_MICROVERSION)
snapshot_members = sg_snapshot['members']
self.assertEmpty(
snapshot_members,
'Expected 0 share group snapshot members, got %s' %
len(snapshot_members))
new_share_group = self.create_share_group(
cleanup_in_class=False,
source_share_group_snapshot_id=sg_snapshot['id'],
version=constants.MIN_SHARE_GROUP_MICROVERSION)
new_shares = self.shares_v2_client.list_shares(
params={'share_group_id': new_share_group['id']},
version=constants.MIN_SHARE_GROUP_MICROVERSION, experimental=True)
self.assertEmpty(
new_shares, 'Expected 0 new shares, got %s' % len(new_shares))
msg = ('Expected source_ishare_group_snapshot_id %s '
'as source of share group %s' % (
sg_snapshot['id'],
new_share_group['source_share_group_snapshot_id']))
self.assertEqual(
new_share_group['source_share_group_snapshot_id'],
sg_snapshot['id'],
msg)
msg = ('Unexpected share_types on new share group. Expected '
'%s, got %s.' % (share_group['share_types'],
new_share_group['share_types']))
self.assertEqual(
sorted(share_group['share_types']),
sorted(new_share_group['share_types']), msg)
# Assert the share_network information is the same
msg = 'Expected share_network %s as share_network of cg %s' % (
share_group['share_network_id'],
new_share_group['share_network_id'])
self.assertEqual(
share_group['share_network_id'],
new_share_group['share_network_id'],
msg)
| StarcoderdataPython |
4956546 | """
Routines to analyze the data from the COSMOS program.
TO DO:
- persistence correction
"""
import threedhst
import astropy
from astropy.io import fits
from astropy.table import Table as table
import drizzlepac
from drizzlepac import astrodrizzle
import os
import glob
import numpy as np
def split_IMA(root='icxe15wwq', PATH='../RAW/'):
"""
Make FLTs from the individual reads.
root : str
Root name of the FLT that will be split into individual images.
PATH : str
Path to the directory where the clean RAW files are located.
"""
FLAT_F160W = fits.open(os.path.join(os.getenv('iref'),'uc721145i_pfl.fits'))[1].data
BP_MASK = fits.open('../REF/new_bp_Oct17.fits')[0].data
ima = fits.open(PATH+root+'_ima.fits.gz')
flt = fits.open(root+'_flt.fits')
orig_flt = fits.open(PATH+root+'_flt.fits.gz')
NSAMP = ima[0].header['NSAMP']
sh = ima['SCI',1].shape
cube = np.zeros((NSAMP, sh[0], sh[1]))
dq = np.zeros((NSAMP, sh[0], sh[1]), dtype=np.int)
time = np.zeros(NSAMP)
for i in range(NSAMP):
cube[NSAMP-1-i, :, :] = ima['SCI',i+1].data*ima['TIME',i+1].header['PIXVALUE']
dq[NSAMP-1-i, :, :] = ima['DQ',i+1].data
time[NSAMP-1-i] = ima['TIME',i+1].header['PIXVALUE']
diff = np.diff(cube, axis=0)
dt = np.diff(time)
readnoise_2D = np.zeros((1024,1024))
readnoise_2D[512: ,0:512] += ima[0].header['READNSEA']
readnoise_2D[0:512,0:512] += ima[0].header['READNSEB']
readnoise_2D[0:512, 512:] += ima[0].header['READNSEC']
readnoise_2D[512: , 512:] += ima[0].header['READNSED']
readnoise_2D = readnoise_2D**2
for j in range(1, NSAMP-1):
print '{}_{:02d}_flt.fits'.format(root,j)
sci = diff[j,:,:]
exptime = dt[j]
var = readnoise_2D + sci*FLAT_F160W
err = np.sqrt(var)/exptime
flt[0].header['EXPTIME'] = exptime
flt['SCI'].data = sci[5:-5,5:-5]/exptime
flt['ERR'].data = err[5:-5,5:-5]
flt['DQ'].data = dq[j+1][5:-5,5:-5]
#flt['DQ'].data = dq[-1][5:-5,5:-5]
flt['DQ'].data[BP_MASK == 1] += 4
### trun the 8192 cosmic ray flag to the standard 3096
flt['DQ'].data[(flt['DQ'].data & 8192) > 0] -= 4096
### remove the 32 flag, these are not consistently bad
flt['DQ'].data[(flt['DQ'].data & 32) > 0] -= 32
flt['SAMP'].data = np.zeros((1014,1014)) + 1.
flt['TIME'].data = np.zeros((1014,1014)) + exptime
flt[0].header['IMA2FLT'] = (1, 'FLT {} extracted from IMA file'.format(j))
print 'Writing {}_{:02d}_flt.fits'.format(root,j)
flt.writeto('{}_{:02d}_flt.fits'.format(root,j), clobber=True)
def make_pointing_asn(root='icxe15wwq', master_root='icxe15010'):
"""
Makes an new association table for the reads extracted from a given FLT.
root : str
Root name of the FLT that was split into individual images. Will be the root name of the new ASN.
master_root : str
Master ASN root.
"""
master_asn = fits.open('{}_asn.fits'.format(master_root))
files = glob.glob('{}_*_flt.fits'.format(root))
nrows = len(files)
#### Primary HDU
hdu = master_asn[0].copy()
tbhdu = fits.new_table(master_asn[1].columns, nrows=nrows+1, fill=True)
for i in range(nrows):
tbhdu.data[i] = (files[i].split('_flt')[0].upper(), 'EXP-DTH', True)
tbhdu.data[i+1] = (root.upper(), 'PROD-DTH', True)
tbhdu.header = master_asn[1].header.copy()
tbhdu.header.update('ASN_ID',root)
tbhdu.header.update('ASN_TAB','{}_asn.fits'.format(root))
#### Create HDUList and write it to output file
out_fits = fits.HDUList([hdu,tbhdu])
if 'EXTEND' not in hdu.header.keys():
hdu.header.update('EXTEND', True, after='NAXIS')
print 'Writing {}_asn.fits'.format(root)
out_fits.writeto('{}_asn.fits'.format(root), clobber=True)
def subtract_background_reads(root='icxe15wwq', master_root='icxe15010', subtract=False, reset_stars_dq=False):
"""
Subtract background from the individual reads. Uses the DRZ and SEG images produced in the
unicorn FLT background subtraction.
root : str
Root name of the ASN which describes the collection of images.
master_root : str
Root of the master ASN.
subtract : bool
By default it does not subtract the background but writes it to the header. Set to True to subtract background.
reser_strs_dq : bool
If reset_stars_dq = True it will reset cosmic rays within objects to 0 because the centers of stars are flagged.
"""
import threedhst
import stwcs
import scipy
import scipy.optimize
asn = threedhst.utils.ASNFile('{}_asn.fits'.format(root))
#print 'Read files...'
ref = fits.open('{}_drz_sci.fits'.format(master_root))
ref_wcs = stwcs.wcsutil.HSTWCS(ref, ext=0)
seg = fits.open('{}_drz_seg.fits'.format(master_root))
seg_data = np.cast[np.float32](seg[0].data)
yi, xi = np.indices((1014,1014))
#### Loop through FLTs
models = []
for exp in asn.exposures:
flt = fits.open('{}_flt.fits'.format(exp)) #, mode='update')
flt_wcs = stwcs.wcsutil.HSTWCS(flt, ext=1)
if exp == asn.exposures[0]:
print 'Segmentation image: {}_blot.fits'.format(exp)
blotted_seg = astrodrizzle.ablot.do_blot(seg_data, ref_wcs, flt_wcs, 1, coeffs=True, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)
mask = (blotted_seg == 0) & (flt['DQ'].data == 0) & (flt[1].data > -1) & (xi > 10) & (yi > 10) & (xi < 1004) & (yi < 1004)
mask &= (flt[1].data < 5*np.median(flt[1].data[mask]))
data_range = np.percentile(flt[1].data[mask], [2.5, 97.5])
mask &= (flt[1].data >= data_range[0]) & (flt[1].data <= data_range[1])
data_range = np.percentile(flt[2].data[mask], [0.05, 99.5])
mask &= (flt[2].data >= data_range[0]) & (flt[2].data <= data_range[1])
sky_level = np.median(flt[1].data[mask])
model = flt[1].data*0. + sky_level
# add header keywords of the fit components
flt = fits.open('{}_flt.fits'.format(exp), mode='update')
flt[1].header['MDRIZSKY'] = sky_level
if subtract:
flt[1].data -= model
flt[1].header['BG_SUB'] = 'Yes'
else:
flt[1].header['BG_SUB'] = 'No'
if reset_stars_dq:
flagged_stars = ((flt['DQ'].data & 4096) > 0) & (blotted_seg > 0)
flt['DQ'].data[flagged_stars] -= 4096
flt.flush()
print 'Background subtraction, {}_flt.fits: {}'.format(exp, sky_level)
def fix_cosmic_rays(root='icxe15wwq', master_root = 'icxe15010'):
""" Resets cosmic rays within the seg maps of objects and uses L.A.Cosmic to find them again.
root : str
master_root : str
"""
from cosmics import cosmics
import stwcs
asn = threedhst.utils.ASNFile('{}_asn.fits'.format(root))
ref = fits.open('{}_drz_sci.fits'.format(master_root))
ref_wcs = stwcs.wcsutil.HSTWCS(ref, ext=0)
seg = fits.open('{}_drz_seg.fits'.format(master_root))
seg_data = np.cast[np.float32](seg[0].data)
flt_full = fits.open('{}_flt.fits'.format(root))
flt_full_wcs = stwcs.wcsutil.HSTWCS(flt_full, ext=1)
blotted_seg = astrodrizzle.ablot.do_blot(seg_data, ref_wcs, flt_full_wcs, 1,
coeffs=True, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)
EXPTIME = flt_full[0].header['EXPTIME']
SKY = flt_full[0].header['BGCOMP1']
yi, xi = np.indices((1014,1014))
# Build the object :
#c = cosmics.cosmicsimage(flt_full[1].data*EXPTIME, gain=1.0, readnoise=2.2,
# sigclip = 5.0, sigfrac = 0.4, objlim = 5.0, satlevel=-1., pssl = SKY*EXPTIME)
c = cosmics.cosmicsimage(flt_full[1].data, gain=1.0, readnoise=0.12,
sigclip = 4.0, sigfrac = 0.5, objlim = 10.0, satlevel=-1., pssl = 0.)
# Run the full artillery :
c.run(maxiter = 4, verbose=True)
for exp in asn.exposures:
flt = fits.open('{}_flt.fits'.format(exp), mode = 'update')
flagged_stars = ((flt['DQ'].data & 4096) > 0) & (blotted_seg > 0)
flt['DQ'].data[flagged_stars] -= 4096
#new_cr = (c.mask == 1) & ((flt['DQ'].data & 4096) == 0) & ((blotted_seg == 0) | ((blotted_seg > 0) & (flt['SCI'].data < 1.))) & (xi > 915) & (yi < 295)
new_cr = (c.mask == 1) & ((flt['DQ'].data & 4096) == 0)
flt['DQ'].data[new_cr] += 4096
flt.flush()
def align_reads(root='icxe15wwq', threshold=3, final_scale=0.12, refimage='../REF/cosmos-wide_ACS.fits', master_catalog='../REF/IPAC_ACS.fits', align=True, refxcol = 5, refycol = 6):
from drizzlepac import tweakreg
asn = threedhst.utils.ASNFile('{}_asn.fits'.format(root))
catfile = '{}.catfile'.format(root)
fp = open(catfile,'w')
#drizzlepac.astrodrizzle.AstroDrizzle('{}_asn.fits'.format(root), output=root, clean=False, context=False,
# preserve=False, skysub=True, driz_separate=True, driz_sep_wcs=True, median=True, blot=True, driz_cr=True,
# driz_cr_corr=True, driz_combine=True)
for exp in asn.exposures:
flt = fits.open('{}_flt.fits'.format(exp), mode='update')
if flt[1].header['BG_SUB'] == 'No':
flt[1].data -= flt[1].header['MDRIZSKY']
flt[1].header['BG_SUB'] = 'Yes'
flt.flush()
se = threedhst.sex.SExtractor()
se.aXeParams()
se.copyConvFile()
se.options['CHECKIMAGE_TYPE'] = 'NONE'
se.options['FILTER'] = 'N'
se.options['WEIGHT_IMAGE'] = '{}_flt.fits[1]'.format(exp)
se.options['WEIGHT_TYPE'] = 'MAP_WEIGHT'
#
se.params['X_IMAGE'] = True; se.params['Y_IMAGE'] = True
se.params['MAG_AUTO'] = True
#
se.options['CATALOG_NAME'] = '{}_flt.cat'.format(exp)
se.options['DETECT_THRESH'] = '{}'.format(threshold)
se.options['ANALYSIS_THRESH'] = '{}' .format(threshold)
se.options['DETECT_MINAREA'] = '10'
#
se.sextractImage('{}_flt.fits[1]'.format(exp))
threedhst.sex.sexcatRegions('{}_flt.cat'.format(exp), '{}_flt.reg'.format(exp), format=1)
line = '{0}_flt.fits {0}_flt.cat\n'.format(exp)
fp.write(line)
fp.close()
files = glob.glob('{}_*_flt.fits'.format(root))
if align:
#### Make room for TWEAK wcsname
for exp in asn.exposures:
threedhst.prep_flt_astrodrizzle.clean_wcsname(flt='{}_flt.fits'.format(exp), wcsname='TWEAK')
threedhst.prep_flt_astrodrizzle.clean_wcsname(flt='{}_flt.fits'.format(exp), wcsname='OPUS')
tweakreg.TweakReg(files, refimage=refimage, updatehdr=True, updatewcs=True, catfile=catfile, xcol=2, ycol=3, xyunits='pixels', refcat=master_catalog, refxcol = refxcol, refycol = refycol, refxyunits='degrees', shiftfile=True, fitgeometry='shift',outshifts='{}_shifts.txt'.format(root), outwcs='{}_wcs.fits'.format(root), searchrad=5., tolerance=1., minobj = 5, xoffset = 0.0, yoffset = 0.0, wcsname='TWEAK', interactive=False, residplot='No plot', see2dplot=True, clean=True, headerlet=False, clobber=True)
# AstroDrizzle doesn't like the asn file here: '{}_asn.fits'.format(root)
# Temporaroly substituting with a list of files
#files = glob.glob('{}_*_flt.fits'.format(root))
drizzlepac.astrodrizzle.AstroDrizzle(files, output=root, clean=True, final_scale=final_scale,
final_pixfrac=0.8, context=False, resetbits=0, final_bits=576, driz_cr = True, driz_sep_bits=576,
preserve=False, wcskey='TWEAK', driz_cr_snr='8.0 5.0', driz_cr_scale = '2.5 0.7', skyuser = 'MDRIZSKY',
final_wcs=True)
for exp in asn.exposures:
flt = fits.open('{}_flt.fits'.format(exp), mode='update')
flt[1].data += flt[1].header['MDRIZSKY']
flt[1].header['BG_SUB'] = 'No'
flt.flush()
def prep_FLTs(root='icxe15010', refimage='../REF/cosmos-wide_ACS.fits', REF_CAT='../REF/IPAC_ACS.fits'):
import threedhst.prep_flt_astrodrizzle
outshifts = 'shifts_{}.txt'.format(root)
outwcs = 'shifts_{}_wcs.fits'.format(root)
#drizzlepac.astrodrizzle.AstroDrizzle(root+'_asn.fits', clean=False, context=False, preserve=False, skysub=True, driz_separate=True, driz_sep_wcs=True, median=True, blot=True, driz_cr=True, driz_cr_corr=True, driz_combine=True)
threedhst.prep_flt_astrodrizzle.subtract_flt_background(root=root)
drizzlepac.tweakreg.TweakReg(root+'_asn.fits', refimage=refimage, updatehdr = True, updatewcs = True,
writecat = False, clean = True, verbose = True, runfile = 'tweakreg.log',
wcsname = 'TWEAK', headerlet = False, shiftfile = True, outshifts = outshifts, outwcs = outwcs,
refcat = REF_CAT, refxcol = 5, refycol = 6, refxyunits = 'degrees', minobj = 5, searchrad = 1000.0,
searchunits = 'pixels',
use2dhist = True, see2dplot = False, separation = 0.5, tolerance = 1.0, xoffset = 0.0, yoffset = 0.0,
fitgeometry = 'shift', interactive=False, nclip = 3, sigma = 3.0, clobber=True)
drizzlepac.astrodrizzle.AstroDrizzle(root+'_asn.fits', clean=False, final_pixfrac=1.0, context=False, final_bits=576, resetbits=0, preserve=False, driz_cr_snr='8.0 5.0', driz_cr_scale = '2.5 0.7', wcskey= 'TWEAK')
def run_sextractor(mosaic='test1_drz_sci.fits', weight='test1_drz_wht.fits'):
import os
catalog = mosaic.replace('.fits','.cat')
segmentaion = mosaic.replace('.fits','_seg.fits')
sextr = "sex %s -c gyro.config -CATALOG_NAME %s -MAG_ZEROPOINT %f -BACK_TYPE AUTO,AUTO -WEIGHT_TYPE MAP_WEIGHT -WEIGHT_GAIN Y,Y -WEIGHT_IMAGE %s -GAIN_KEY EXPTIME*CCDGAIN -CHECKIMAGE_NAME %s,%s,%s" %(mosaic, catalog, 25.956, weight, mosaic.replace('.fits','_seg.fits'), mosaic.replace('.fits','_bg.fits'), mosaic.replace('.fits','_sub.fits'))
os.system(sextr)
def run_orbit(master_root='icxe15010', RAW_PATH = '../RAW/'):
asn = threedhst.utils.ASNFile('{}_asn.fits'.format(master_root))
for root in asn.exposures:
os.system('rsync -av {}/{}_flt.fits.gz .'.format(RAW_PATH, root))
os.system('gunzip -f {}_flt.fits.gz'.format(root))
prep_FLTs(root=master_root)
for root in asn.exposures:
split_IMA(root=root)
make_pointing_asn(root=root, master_root=master_root)
subtract_background_reads(root=root, master_root=master_root)
fix_cosmic_rays(root=asn.exposures[0], master_root=master_root)
align_reads(root=asn.exposures[0], align=False)
for root in asn.exposures[1:]:
fix_cosmic_rays(root=root, master_root=master_root)
align_reads(root=root)
#files = glob.glob(master_root[:6]+'*_*_flt.fits')
#drizzlepac.astrodrizzle.AstroDrizzle(files, output='test3', clean=True, final_scale=0.1, final_pixfrac=0.8, resetbits=0, context=False, preserve=False, skysub = True, skywidth = 0., skystat = '', skylower = None, skyupper = None, skyclip = 0, skylsigma = 0.0, skyusigma = 0.0, skyuser = 'MDRIZSKY', skyfile = '', wcskey = 'TWEAK', driz_separate = False, driz_sep_wcs = False, median = False, blot = False, driz_cr = False, driz_combine = True, final_wht_type = 'IVM', final_kernel = 'square', final_wt_scl = 'exptime', final_fillval = 0,final_bits = 576, final_units = 'cps', final_wcs = True, driz_sep_bits = 576, final_rot=0, final_ra=1.501375000000E+02, final_dec=2.597027777778E+00,driz_cr_snr='8.0 5.0', driz_cr_scale = '2.5 0.7', final_outnx=9100, final_outny=10200)
def make_cutouts(root='test6_drz',catalog='test6_drz_sci.cat', DIR='cosmos_wide_stamps/'):
import my_python.mk_region_file
img = fits.open(root+'_sci.fits')
wht = fits.open(root+'_wht.fits')
seg = fits.open(root+'_sci_seg.fits')
cat = table.read(catalog, format='ascii.sextractor')
index = np.where((cat['X_WORLD'] < 150.20664) & (cat['Y_WORLD'] < 2.5588802) & (cat['FLUX_RADIUS'] > 2.))[0]
my_python.mk_region_file.mk_region_file_from_lists(cat['X_WORLD'][index],cat['Y_WORLD'][index],outfile = 'size', printids='no', color='cyan')
sz=40.
im_shape = np.shape(img[0].data)
if not os.path.exists(DIR):
os.system('mkdir {}'.format(DIR))
for ii in index:
print 'Making stamp for {}'.format(cat['NUMBER'][ii])
x = np.round(cat['X_IMAGE'][ii])
y = np.round(cat['Y_IMAGE'][ii])
if (x < sz) or (y < sz) or (x > im_shape[1]-sz) or (y > im_shape[0]-sz):
continue
stamp_img = img[0].data[(y-sz):(y+sz),(x-sz):(x+sz)]
stamp_wht = wht[0].data[(y-sz):(y+sz),(x-sz):(x+sz)]
stamp_seg = seg[0].data[(y-sz):(y+sz),(x-sz):(x+sz)]
out_img = fits.PrimaryHDU(stamp_img)
out_img.writeto('{}cosmos_wide_{:05d}_img.fits'.format(DIR, cat['NUMBER'][ii]))
out_wht = fits.PrimaryHDU(stamp_wht)
out_wht.writeto('{}cosmos_wide_{:05d}_wht.fits'.format(DIR, cat['NUMBER'][ii]))
out_seg = fits.PrimaryHDU(stamp_seg)
out_seg.writeto('{}cosmos_wide_{:05d}_seg.fits'.format(DIR, cat['NUMBER'][ii]))
| StarcoderdataPython |
3513678 | from wordcloud_fa import WordCloudFa
wodcloud = WordCloudFa(include_numbers=False, regexp=r"\w[\w']+")
text = ""
with open('english-example.txt', 'r') as file:
text = file.read()
wc = wodcloud.generate(text)
image = wc.to_image()
image.show()
image.save('english-example.png')
| StarcoderdataPython |
3525100 | <filename>iv/Arrays/coverpoints.py<gh_stars>1-10
class Points():
"""
Path Traversed
"""
def coverpoints(self, A, B):
"""
Function to find steps traversed
A is list of x coordinates
B is list of y coordinates
"""
# A = [0, 1, 1]
# B = [0, 1, 2]
n = len(A)
total_steps = 0
for i in range(n - 1):
x_steps = abs(A[i + 1] - A[i])
y_steps = abs(B[i + 1] - B[i])
steps = min(x_steps, y_steps) + abs(x_steps - y_steps)
# print(steps)
total_steps = total_steps + steps
# print(total_steps)
return total_steps
def main():
a = Points()
# A = [0, 1, 1]
# B = [0, 1, 2]
A = [4, 8, -7, -5, -13, 9, -7, 8]
B = [4, -15, -10, -3, -13, 12, 8, -8]
dist = a.coverpoints(A, B)
print(dist)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4835030 | <filename>buildall/core.py
import datetime
from pathlib import PosixPath as PythonPathClass
from subprocess import Popen as PythonPopenClass
# TODO: find a better value
END_OF_TIME = datetime.datetime(2100, 1, 1)
BEGINNING_OF_TIME = datetime.datetime(1970, 1, 1)
class BaseTask:
_indent_level = 0
silent = False
_child_tasks = []
def __str__(self):
return self.__class__.__name__
def build(self, *args):
raise NotImplementedError('You should implement your own build()')
def target(self):
return None
def debug(self, msg):
indent = self._indent_level * '\t'
if not self.silent:
print(indent + '<%s> ' % self + msg)
def set_indent_level(self, level):
self._indent_level = level
def is_up_to_date(self, dependencies_modification_time):
if self.modification_time < dependencies_modification_time:
self.debug('Target unsatisfied (%s). Will trigger the build !'
% self.modification_time)
return False
self.debug('Target is up-to-date')
return True
class Task(BaseTask):
@property
def modification_time(self):
mod_times = [target.modification_time for target in [self.target()] if
target is not None]
if not mod_times:
return BEGINNING_OF_TIME
return max(mod_times)
def make(self):
self.debug('')
newest_dependency_mod_time = BEGINNING_OF_TIME
build_params = []
for child in self._child_tasks:
child.set_indent_level(self._indent_level + 1)
dependency_mod_time = child.make()
newest_dependency_mod_time = max(newest_dependency_mod_time,
dependency_mod_time)
build_params.append(child.target())
if newest_dependency_mod_time == END_OF_TIME:
# self.debug('At least, one of the dependencies triggered the
# build')
self.build(*build_params)
self.debug('Regeneration succeeded !')
return END_OF_TIME
# self.debug('Cannot decide based on dependencies. Checking targets')
for target in [self.target()]:
if target is None:
continue
target.set_indent_level(self._indent_level + 1)
if not target.is_up_to_date(newest_dependency_mod_time):
self.build(*build_params)
self.debug('Regeneration succeeded !')
return END_OF_TIME
self.debug('Nothing to do !')
return self.modification_time
def __add__(self, other):
return TargetList() + self + other
def __lshift__(self, other):
# 'other' can be a task or a list of tasks
try:
iter(other)
self._child_tasks = other
except TypeError:
self._child_tasks = [other]
return self
class TargetList(list):
def __add__(self, other):
if isinstance(other, BaseTask):
self.append(other)
else:
super().__add__(other)
return self
class Path(PythonPathClass, BaseTask):
def target(self):
return self
@property
def modification_time(self):
if self.exists():
mod_ts = self.stat().st_mtime_ns
return datetime.datetime.fromtimestamp(mod_ts / 1000000000)
return datetime.datetime(1969, 12, 31)
def make(self):
mod_ts = self.stat().st_mtime_ns
mod_dt = datetime.datetime.fromtimestamp(mod_ts / 1000000000)
self.debug('Dependency file exists and its date is %s' % mod_dt)
return mod_dt
class Popen(PythonPopenClass, BaseTask):
def target(self):
return self
def __str__(self):
return self.__class__.__name__ + ' - ' + str(self.args)
@property
def modification_time(self):
if self.wait() == 0:
return END_OF_TIME
return datetime.datetime(1969, 12, 31)
def make(self):
if self.wait() == 0:
self.debug('Dependency build exited with return code 0 '
'=> satisfied')
return BEGINNING_OF_TIME
self.debug('Dependency build exited with return code !=0 '
'=> Will trigger ancestors build methods')
return END_OF_TIME
class BuildException(Exception):
pass | StarcoderdataPython |
5085935 | #!/usr/bin/env python
# Copyright (c) 2015 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import authd
import websockify
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-host", help="MKS proxy host (default 'localhost')",
default='localhost')
parser.add_argument("-port", help="MKS proxy port (default 6090)",
type=int, default=6090)
parser.add_argument("--web", help="web location")
args = parser.parse_args()
print('Starting MKS proxy on {0}:{1}'.format(args.host, args.port))
websockify.WebSocketProxy(
listen_host=args.host,
listen_port=args.port,
verbose=True,
web=args.web,
file_only=True,
RequestHandlerClass=authd.AuthdRequestHandler
).start_server()
| StarcoderdataPython |
5123325 | <reponame>thorwhalen/ut<filename>semantics/scrap.py
__author__ = 'thor'
from ut.util.decorators import autoargs
class A(object):
@autoargs()
def __init__(self, foo, path, debug=False):
pass
a = A('rhubarb', 'pie', debug=True)
assert(a.foo == 'rhubarb')
assert(a.path == 'pie')
assert(a.debug == True)
class B(object):
@autoargs()
def __init__(self, foo, path, debug=False, *args):
pass
a = B('rhubarb', 'pie', True, 100, 101)
assert(a.foo == 'rhubarb')
assert(a.path == 'pie')
assert(a.debug == True)
assert(a.args == (100, 101))
class C(object):
@autoargs()
def __init__(self, foo, path, debug=False, *args, **kw):
pass
a = C('rhubarb', 'pie', True, 100, 101, verbose=True)
assert(a.foo == 'rhubarb')
assert(a.path == 'pie')
assert(a.debug == True)
assert(a.verbose == True)
assert(a.args == (100, 101))
class C(object):
@autoargs('bar', 'baz', 'verbose')
def __init__(self, foo, bar, baz, verbose=False):
pass
a = C('rhubarb', 'pie', 1)
assert(a.bar == 'pie')
assert(a.baz == 1)
assert(a.verbose == False)
try:
getattr(a, 'foo')
except AttributeError:
print("Yep, that's what's expected!")
class C(object):
@autoargs(exclude=('bar', 'baz', 'verbose'))
def __init__(self, foo, bar, baz, verbose=False):
pass
a = C('rhubarb', 'pie', 1)
assert(a.foo == 'rhubarb')
try:
getattr(a, 'bar')
except AttributeError:
print("Yep, that's what's expected!")
| StarcoderdataPython |
5146380 | from typing import Optional
from pydantic import BaseModel, EmailStr, Field, HttpUrl, SecretStr
# Shared properties
class UserBase(BaseModel):
email: Optional[EmailStr] = Field(None, example="<EMAIL>")
username: Optional[str] = Field(None, example="perryshari")
bio: Optional[str] = None
image: Optional[str] = None
# Properties to receive via API on creation
class UserCreate(UserBase):
email: EmailStr = Field(..., example="<EMAIL>")
username: str = Field(..., example="perryshari")
password: SecretStr = Field(..., example="<PASSWORD>")
# Properties shared by models stored in DB
class UserInDBBase(UserBase):
id: Optional[int] = None
class Config:
orm_mode = True
# Additional properties stored in DB
class UserInDB(UserInDBBase):
hashed_password: str
# Additional properties to return via API
# class User(UserInDBBase):
# pass
class UserWithToken(UserBase):
token: str = Field(
...,
example="<KEY>",
# noqa
)
class UserResponse(BaseModel):
user: UserWithToken
class LoginUser(BaseModel):
email: str
password: <PASSWORD>
class Config:
schema_extra = {
"example": {
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
}
# Properties to receive via API on update
class UserUpdate(UserBase):
password: Optional[str] = Field(None, example="<PASSWORD>")
class UserInUpdate(BaseModel):
username: Optional[str] = None
email: Optional[EmailStr] = None
password: Optional[str] = None
bio: Optional[str] = None
image: Optional[HttpUrl] = None
| StarcoderdataPython |
6557342 | <gh_stars>0
print('Load trained simple AE..')
model_name = '20200302_firstAE_model.pt'
model_fn = os.path.join(model_bib_path, model_name)
print(model_fn)
from models.SimpleAutoEncoder import SimpleAutoEncoder
torch.manual_seed(42)
num_inpus = len(df_data_anormal.columns)
val_lambda = 42 * 0.01
model = SimpleAutoEncoder(num_inputs=num_inpus, val_lambda=val_lambda)
print(model)
| StarcoderdataPython |
3417828 | <filename>imageInImage.py
from utils.display import display_image
from utils.manipulateImage import rotate_image_by
from utils.processImage import match_templates
from sound.notificationSounds import play_audio_file
from sound.notificationSounds import text_to_speech
# the image filepath
filepath_main = './images/RaspberryPi.png'
filepath_subimage_1 = './images/usb_a.png'
filepath_subimage_2 = './images/chipset.png'
# read the image
main_image = cv2.imread(filepath_main)
subimage = cv2.imread(filepath_subimage_2)
# list of comparison methods for wasy selection
# methods = ["cv2.TM_CCOEFF", "cv2.TM_CCOEFF_NORMED", "cv2.TM_CCORR",
# "cv2.TM_CCORR_NORMED", "cv2.TM_SQDIFF", "cv2.TM_SQDIFF_NORMED"]
methods = ["cv2.TM_CCOEFF"]
# # start sound
# start = play_audio_file("./sound/Script_started.wav")
# start.play()
# start.close()
#
match_templates(main_image, subimage, methods)
# # end sound
# end = play_audio_file("./sound/Script_finished.wav")
# end.play()
# end.close()
# text_to_speech("this is text to speech")
# rotated_image = rotate_image_by(90, subimage)
# match_templates(main_image, rotated_image, methods)
# rotated_image = rotate_image_by(180, subimage)
# match_templates(main_image, rotated_image, methods)
# rotated_image = rotate_image_by(270, subimage)
# match_templates(main_image, rotated_image, methods)
| StarcoderdataPython |
3533019 | <filename>tests/test_distmatrix.py
import numpy as np
import os
import unittest
from alfpy import word_distance
from alfpy.utils import distmatrix
from . import utils
class TestDistMatrix(unittest.TestCase):
def setUp(self):
id_list = ['seq1', 'seq2', 'seq3']
data = np.array([[0, 0.3531587, 0.35509333],
[0.3531587, 0, 0.295394],
[0.35509333, 0.295394, 0.]
])
self.matrix = distmatrix.Matrix(id_list, data)
self.output_filename = utils.get_test_data('distmatrix.txt')
def test_format(self):
exp = [
' 3',
'seq1 0.0000000 0.3531587 0.3550933',
'seq2 0.3531587 0.0000000 0.2953940',
'seq3 0.3550933 0.2953940 0.0000000'
]
self.assertEqual(self.matrix.format(), "\n".join(exp))
def test_format_decimal3(self):
exp = [
' 3',
'seq1 0.000 0.353 0.355',
'seq2 0.353 0.000 0.295',
'seq3 0.355 0.295 0.000'
]
self.assertEqual(self.matrix.format(3), "\n".join(exp))
def test_min(self):
self.assertEqual(self.matrix.min(), 0)
def test_max(self):
self.assertEqual(self.matrix.max(), 0.35509332999999998)
def test_is_zero(self):
self.assertFalse(self.matrix.is_zero())
def test_normalize(self):
self.matrix.normalize()
exp = [
" 3",
"seq1 0.0000000 0.9945518 1.0000000",
"seq2 0.9945518 0.0000000 0.8318771",
"seq3 1.0000000 0.8318771 0.0000000",
]
self.assertEqual(self.matrix.format(), "\n".join(exp))
def test_write_to_file_phylip(self):
oh = open(self.output_filename, 'w')
self.matrix.write_to_file(oh)
oh.close()
fh = open(self.output_filename)
result = fh.read()
fh.close()
os.remove(self.output_filename)
exp = [
' 3',
'seq1 0.0000000 0.3531587 0.3550933',
'seq2 0.3531587 0.0000000 0.2953940',
'seq3 0.3550933 0.2953940 0.0000000\n'
]
self.assertEqual(result, "\n".join(exp))
def test_write_to_file_pairwise(self):
oh = open(self.output_filename, 'w')
self.matrix.write_to_file(oh, 'pairwise')
oh.close()
fh = open(self.output_filename)
result = fh.read()
fh.close()
os.remove(self.output_filename)
exp = [
"seq1\tseq2\t0.3531587",
"seq1\tseq3\t0.3550933",
"seq2\tseq3\t0.2953940\n"
]
self.assertEqual(result, "\n".join(exp))
def test_write_to_file_pairwise_decimal3(self):
oh = open(self.output_filename, 'w')
self.matrix.write_to_file(oh, 'pairwise', 3)
oh.close()
fh = open(self.output_filename)
result = fh.read()
fh.close()
os.remove(self.output_filename)
exp = [
"seq1\tseq2\t0.353",
"seq1\tseq3\t0.355",
"seq2\tseq3\t0.295\n"
]
self.assertEqual(result, "\n".join(exp))
def test_iter(self):
exp = [(0, 1, 'seq1', 'seq2', 0.35315869999999999),
(0, 2, 'seq1', 'seq3', 0.35509332999999998),
(1, 2, 'seq2', 'seq3', 0.29539399999999999)]
self.assertEqual(list(self.matrix), exp)
def test_create_matrix(self):
l = [[3, 6, 4, 1, 3, 4, 3, 0, 1, 1, 6, 4, 5, 0, 3, 4],
[0, 3, 0, 3, 0, 0, 0, 2, 9, 0, 3, 3, 0, 6, 3, 6],
[9, 0, 0, 3, 0, 0, 0, 2, 6, 0, 3, 3, 0, 3, 3, 3]]
vector = np.array(l)
dist = word_distance.Distance(vector, 'minkowski')
id_list = ['seq1', 'seq2', 'seq3']
matrix = distmatrix.create(id_list, dist)
exp = [
' 3',
'seq1 0.0000000 14.6969385 14.1774469',
'seq2 14.6969385 0.0000000 10.8166538',
'seq3 14.1774469 10.8166538 0.0000000'
]
self.assertEqual(matrix.format(), "\n".join(exp))
def test_highcharts(self):
self.assertEqual(len(self.matrix.highcharts()), 3)
def test_read_highcharts_matrix(self):
id_list = ['seq1', 'seq2', 'seq3']
data = [[0, 1, 0.35, 0.19], [0, 2, 1.0, 0.55], [1, 2, 0.88, 0.48]]
matrix = distmatrix.read_highcharts_matrix(id_list, data)
md5 = utils.calc_md5(matrix.format())
self.assertEqual(md5, "476c8f5d284a84ee3c7c419bde2d7658")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1676235 | <gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__date__ = "17 Nov 2018"
# !!! SEE CODERULES.TXT !!!
from silx.gui import qt
from ..core import singletons as csi
from ..core import commons as cco
from .propWidget import QLineEditSelectRB, PropWidget
# from . import propsOfData as gpd
class ColumnFormatWidget(PropWidget):
def __init__(self, parent=None, node=None):
super(ColumnFormatWidget, self).__init__(parent, node)
self.shouldRemoveNonesFromProps = True
self.tabWidget = qt.QTabWidget(self)
self.tabWidget.setStyleSheet(
# "QTabBar::tab:selected {background: palette(window);}"
"QTabWidget>QWidget>QWidget{background: palette(window);}")
self.headerTab = self.makeHeaderTab()
self.tabWidget.addTab(self.headerTab, 'file header')
self.dataLocationTab = self.makeDataLocationTab()
ind = self.tabWidget.addTab(self.dataLocationTab, 'data location')
self.tabWidget.setTabToolTip(
ind, "Use context menu on one or more HDF5/SPEC datasets.\n"
"For column files use functions of variables `Col1`, `Col2` etc")
layout = qt.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.tabWidget)
layout.addStretch()
self.setLayout(layout)
self.tabWidget.setCurrentIndex(1)
self.registerPropGroup(
self, [self.headerTab, self.dataLocationTab], 'data format')
def makeHeaderTab(self):
self.headerNRB = qt.QRadioButton("has")
self.headerNEdit = QLineEditSelectRB(rb=self.headerNRB)
self.headerNEdit.setFixedWidth(28)
self.headerNEdit.setValidator(
qt.QIntValidator(0, cco.MAX_HEADER_LINES, self))
self.headerNLabel2 = qt.QLabel("lines")
self.headerSRB = qt.QRadioButton("has lines beginning with")
self.headerSEdit = QLineEditSelectRB(rb=self.headerSRB)
self.headerSEdit.setFixedWidth(16)
self.headerERB = qt.QRadioButton("ends with line containing")
self.headerEEdit = QLineEditSelectRB(rb=self.headerERB)
self.headerEEdit.setMinimumWidth(30)
self.headerSRB.setChecked(True)
headerLayoutN = qt.QHBoxLayout()
headerLayoutN.addWidget(self.headerNRB)
headerLayoutN.addWidget(self.headerNEdit)
headerLayoutN.addWidget(self.headerNLabel2)
headerLayoutN.addStretch()
headerLayoutS = qt.QHBoxLayout()
headerLayoutS.addWidget(self.headerSRB)
headerLayoutS.addWidget(self.headerSEdit)
headerLayoutS.addStretch()
headerLayoutE = qt.QHBoxLayout()
headerLayoutE.addWidget(self.headerERB)
headerLayoutE.addWidget(self.headerEEdit, 1)
headerLayoutE.addStretch()
headerLayout = qt.QVBoxLayout()
headerLayout.setContentsMargins(2, 2, 2, 2)
headerLayout.addLayout(headerLayoutN)
headerLayout.addLayout(headerLayoutS)
headerLayout.addLayout(headerLayoutE)
headerTab = qt.QWidget(self)
headerTab.setLayout(headerLayout)
headerTab.setSizePolicy(qt.QSizePolicy.Expanding, qt.QSizePolicy.Fixed)
self.headerKW = 'skiprows', 'comments', 'lastSkipRowContains'
self.fullHeaderKW = ['dataFormat.' + kw for kw in self.headerKW]
self.radioButtons = self.headerNRB, self.headerSRB, self.headerERB
self.edits = self.headerNEdit, self.headerSEdit, self.headerEEdit
self.registerExclusivePropGroup(
headerTab, [self.radioButtons, self.edits], 'file header',
props=self.fullHeaderKW, convertTypes=[int, None, None])
return headerTab
def makeDataLocationTab(self):
if self.node is None:
return
self.dataEdits = []
self.sliceEdits = []
dataLayout = qt.QVBoxLayout()
for ia, arrayName in enumerate(self.node.arrays):
role = self.node.getProp(arrayName, 'role')
if role.startswith('0'):
continue
arrayLayout = qt.QHBoxLayout()
arrayLayout.setContentsMargins(0, 0, 0, 0)
lbl = self.node.getProp(arrayName, 'qLabel')
unit = self.node.getProp(arrayName, 'qUnit')
strUnit = u"({0})".format(unit) if unit else ""
dataLabel = qt.QLabel(u"{0}{1}".format(lbl, strUnit))
dataEdit = qt.QLineEdit()
dataEdit.setMinimumWidth(62)
dataEdit.setSizePolicy(
qt.QSizePolicy.Expanding, qt.QSizePolicy.Fixed)
self.dataEdits.append(dataEdit)
sliceEdit = qt.QLineEdit()
sliceEdit.setSizePolicy(
qt.QSizePolicy.Expanding, qt.QSizePolicy.Fixed)
self.sliceEdits.append(sliceEdit)
sliceEdit.textChanged.connect(self._resizeToContent)
sliceEdit.hide()
arrayLayout.addWidget(dataLabel)
arrayLayout.addWidget(dataEdit, 1)
arrayLayout.addWidget(sliceEdit, 0)
if role.startswith('x'):
dataXLabelTimes = qt.QLabel(u"×")
self.dataXEditTimes = qt.QLineEdit()
self.dataXEditTimes.setFixedWidth(36)
arrayLayout.addWidget(dataXLabelTimes)
arrayLayout.addWidget(self.dataXEditTimes)
self.registerPropWidget(
(dataXLabelTimes, self.dataXEditTimes),
dataXLabelTimes.text(),
'dataFormat.xFactor', convertType=float, skipDefault=1,
textFormat='strip0')
else:
self.dataXEditTimes = None
dataLayout.addLayout(arrayLayout)
self.registerPropWidget(
(dataLabel, dataEdit), dataLabel.text(),
# ('dataFormat.dataSource', ia), convertType=int)
'dataFormat.dataSource.int({0})'.format(ia), convertType=int)
self.registerPropWidget(
sliceEdit, 'slice', 'dataFormat.slices.int({0})'.format(ia),
hideEmpty=True)
dataLocationTab = qt.QWidget(self)
dataLocationTab.setLayout(dataLayout)
dataLocationTab.setSizePolicy(
qt.QSizePolicy.Expanding, qt.QSizePolicy.Fixed)
edits = self.dataEdits
if self.dataXEditTimes is not None:
edits += [self.dataXEditTimes]
self.registerPropGroup(dataLocationTab, edits, 'data location')
return dataLocationTab
def _resizeToContent(self, text):
edit = self.sender()
fm = qt.QFontMetrics(edit.font())
edit.setFixedWidth(fm.width('['+text+']'))
self.adjustSize()
def setHeaderEnabled(self, enabled=True):
self.tabWidget.setTabEnabled(0, enabled)
self.headerTab.setEnabled(enabled) # to disable context menu entry
if self.tabWidget.currentIndex() == 0:
self.tabWidget.setCurrentIndex(1)
def updateProp(self):
needReplot = False
for it in csi.selectedItems:
if it.hasChanged:
needReplot = True
it.read_data()
it.hasChanged = False
if needReplot:
self.node.widget.replot()
for subnode in self.node.downstreamNodes:
subnode.widget.replot()
# print([cco.getDotAttr(it, 'dataFormat') for it in csi.selectedItems])
def getDataFormat(self, needHeader):
dres = {}
try:
if needHeader:
for rb, ed, kw in zip(
self.radioButtons, self.edits, self.headerKW):
if rb.isChecked():
txt = ed.text()
if kw == 'skiprows':
txt = int(txt)
dres[kw] = txt
cols = [edit.text() for edit in self.dataEdits]
dres['dataSource'] = cols
slices = [edit.text() for edit in self.sliceEdits]
dres['slices'] = slices
if self.dataXEditTimes is not None:
txt = self.dataXEditTimes.text()
if txt:
dres['xFactor'] = float(txt)
except: # noqa
return
return dres
| StarcoderdataPython |
17332 | <reponame>Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
import torch
from torch import optim
from typing import Tuple, List, Dict
import dataset
from model import BiLSTM_CRF
import utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_train', type=str, default='./data/eng.train')
parser.add_argument('--dataset_val', type=str, default='./data/eng.testa')
parser.add_argument('--dataset_test', type=str, default='./data/eng.testb')
parser.add_argument('--pretrained_glove', type=str, default='./data/glove.6B.100d.txt')
parser.add_argument('--output_mapping', type=str, default='./output/mapping.pkl')
parser.add_argument('--output_affix_list', type=str, default='./output/affix_list.json')
parser.add_argument('--use_crf', type=int, default=1)
parser.add_argument('--add_cap_feature', type=int, default=1)
parser.add_argument('--add_affix_feature', type=int, default=1)
parser.add_argument('--use_gpu', type=int, default=1)
parser.add_argument('--model_path', type=str, default='./model')
parser.add_argument('--graph_path', type=str, default='./output')
parser.add_argument('--eval_path', type=str, default='./evaluate/temp')
parser.add_argument('--eval_script', type=str, default='./evaluate/conlleval.pl')
args = parser.parse_args()
args.use_crf = args.use_crf != 0
args.add_cap_feature = args.add_cap_feature != 0
args.add_affix_feature = args.add_affix_feature != 0
args.use_gpu = args.use_gpu != 0
args.use_gpu = args.use_gpu and torch.cuda.is_available()
return args
def load_datasets(train_path: str, val_path: str, test_path: str, pretrained_glove: str, output_mapping: str, output_affix_list: str):
train_sentences = dataset.load_sentences(train_path)[:14000]
val_sentences = dataset.load_sentences(val_path)[:1500] # <<<
test_sentences = dataset.load_sentences(test_path)[-1500:] # <<<
dico_words, _, _ = dataset.word_mapping(train_sentences)
_, char_to_id, _ = dataset.char_mapping(train_sentences)
_, tag_to_id, id_to_tag = dataset.tag_mapping(train_sentences)
_, word_to_id, _, word_embedding = dataset.load_pretrained_embedding(dico_words.copy(), pretrained_glove, word_dim=100)
train_data = dataset.prepare_dataset(train_sentences, word_to_id, char_to_id, tag_to_id)
val_data = dataset.prepare_dataset(val_sentences, word_to_id, char_to_id, tag_to_id)
test_data = dataset.prepare_dataset(test_sentences, word_to_id, char_to_id, tag_to_id)
prefix_dicts, suffix_dicts = dataset.add_affix_to_datasets(train_data, val_data, test_data)
with open(output_mapping, 'wb') as f:
mappings = {'word_to_id': word_to_id, 'tag_to_id': tag_to_id, 'char_to_id': char_to_id, 'word_embedding': word_embedding}
pickle.dump(mappings, f)
with open(output_affix_list, 'w') as f:
json.dump([prefix_dicts, suffix_dicts], f, indent=2)
print('Datasets status:')
print('#train_data: {} / #val_data: {} / #test_data: {}'.format(len(train_data), len(val_data), len(test_data)))
print('#word_to_id: {}, #char_to_id: {}, #tag_to_id: {}, #prefix_dicts: {}, #suffix_dicts: {}, '.format(len(word_to_id), len(char_to_id), len(tag_to_id), len(prefix_dicts), len(suffix_dicts)))
print('#prefixes_2/3/4: [{}, {}, {}], #suffixes_2/3/4: [{}, {}, {}]'.format(len(prefix_dicts[1]), len(prefix_dicts[2]), len(prefix_dicts[3]), len(suffix_dicts[1]), len(suffix_dicts[2]), len(suffix_dicts[3])))
return (train_data, val_data, test_data), (word_to_id, char_to_id, tag_to_id, id_to_tag), word_embedding, (prefix_dicts, suffix_dicts)
def train(model: BiLSTM_CRF, device: str, train_data: List[dataset.Data], val_data: List[dataset.Data], test_data: List[dataset.Data], model_path: str, graph_path: str, **kwargs):
start_timestamp = time.time()
lr = 0.015
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
total_loss_log = 0
total_loss_plot = 0
losses_plot, accuracies_plots, f1scores_plots = [], [[], []], [[], []]
train_count = 0
epochs = 10
batches = len(train_data)
log_every = 100
save_every = int(batches / 2)
plot_every = 100
eval_every = 700
print('\nStart training, totally {} epochs, {} batches...'.format(epochs, batches))
for epoch in range(0, epochs):
for batch, index in enumerate(np.random.permutation(batches)):
model.train()
train_count += 1
data = train_data[index]
words_in = torch.LongTensor(data.words).to(device)
chars_mask = torch.LongTensor(data.chars_mask).to(device)
chars_length = data.chars_length
chars_d = data.chars_d
caps = torch.LongTensor(data.caps).to(device)
tags = torch.LongTensor(data.tags).to(device)
words_prefixes = torch.LongTensor(data.words_prefix_ids).to(device)
words_suffixes = torch.LongTensor(data.words_suffix_ids).to(device)
feats = model(words_in=words_in, chars_mask=chars_mask, chars_length=chars_length, chars_d=chars_d, caps=caps, words_prefixes=words_prefixes, words_suffixes=words_suffixes)
loss = model.calc_loss(feats, tags) / len(data.words)
total_loss_log += loss.item()
total_loss_plot += loss.item()
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
if train_count % log_every == 0:
avg_loss_log = total_loss_log / log_every
total_loss_log = 0
print('{} Epoch: {}/{}, batch: {}/{}, train loss: {:.4f}, time: {}'.format(
utils.time_string(), epoch + 1, epochs, batch + 1, batches, avg_loss_log, utils.time_since(start_timestamp, (epoch * batches + batch) / (epochs * batches))))
if train_count % plot_every == 0:
avg_loss_plot = total_loss_plot / plot_every
total_loss_plot = 0
losses_plot.append(avg_loss_plot)
if (train_count % save_every == 0) or (batch == batches - 1 and epoch == epochs - 1):
torch.save(model, '{}/savepoint_epoch{}_batch{}.pth'.format(model_path, epochs + 1, batches + 1))
if train_count % eval_every == 0:
print('\n{} Evaluating on validating dataset (epoch {}/{}, batch {}/{})...'.format(utils.time_string(), epoch + 1, epochs, batch + 1, batches))
acc1, _, _, f1_score1 = evaluate(model=model, device=device, dataset=val_data, **kwargs)
print('\n{} Evaluating on testing dataset (epoch {}/{}, batch {}/{})...'.format(utils.time_string(), epoch + 1, epochs, batch + 1, batches))
acc2, _, _, f1_score2 = evaluate(model=model, device=device, dataset=test_data, **kwargs)
accuracies_plots[0].append(acc1)
accuracies_plots[1].append(acc2)
f1scores_plots[0].append(f1_score1)
f1scores_plots[1].append(f1_score2)
print("\nContinue training...")
# end batch
# Referred from https://github.com/ZhixiuYe/NER-pytorch.
new_lr = lr / (1 + 0.05 * train_count / len(train_data))
utils.adjust_learning_rate(optimizer, lr=new_lr)
# end epoch
end_timestamp = time.time()
start_time_str = utils.time_string(start_timestamp)
end_time_str = utils.time_string(end_timestamp)
print('Start time: {}, end time: {}, totally spent time: {:d}min'.format(start_time_str, end_time_str, int((end_timestamp - start_timestamp) / 60)))
with open("{}/plots.log".format(graph_path), 'w') as f:
f.write("time: {}\n\n".format(end_time_str))
f.write("loss:\n[{}]\n\n".format(', '.join([str(i) for i in losses_plot])))
f.write("acc1:\n[{}]\n\n".format(', '.join([str(i) for i in accuracies_plots[0]])))
f.write("acc2:\n[{}]\n\n".format(', '.join([str(i) for i in accuracies_plots[1]])))
f.write("f1:\n[{}]\n\n".format(', '.join([str(i) for i in f1scores_plots[0]])))
f.write("f2:\n[{}]\n\n".format(', '.join([str(i) for i in f1scores_plots[1]])))
epochs = list(range(1, len(losses_plot) + 1))
plt.plot(epochs, losses_plot)
plt.legend(['Training'])
plt.xlabel('Index')
plt.ylabel('Loss')
plt.savefig('{}/loss.pdf'.format(graph_path))
plt.clf()
epochs = list(range(1, len(accuracies_plots[0]) + 1))
plt.plot(epochs, accuracies_plots[0], 'b')
plt.plot(epochs, accuracies_plots[1], 'r')
plt.legend(['eng.testa', 'eng.testb'])
plt.xlabel('Index')
plt.ylabel('Accuracy')
plt.savefig('{}/acc.pdf'.format(graph_path))
plt.clf()
epochs = list(range(1, len(f1scores_plots[0]) + 1))
plt.plot(epochs, f1scores_plots[0], 'b')
plt.plot(epochs, f1scores_plots[1], 'r')
plt.legend(['eng.testa', 'eng.testb'])
plt.xlabel('Index')
plt.ylabel('F1-score')
plt.savefig('{}/f1-score.pdf'.format(graph_path))
print("graphs have been saved to {}".format(graph_path))
def evaluate(model: BiLSTM_CRF, device: str, dataset: List[dataset.Data], tag_to_id: Dict[str, int], id_to_tag: Dict[int, str], eval_path: str, eval_script: str) -> Tuple[float, float, float, float]:
prediction = []
confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))
model.eval()
for data in dataset:
words_in = torch.LongTensor(data.words).to(device)
chars_mask = torch.LongTensor(data.chars_mask).to(device)
chars_length = data.chars_length
chars_d = data.chars_d
caps = torch.LongTensor(data.caps).to(device)
words_prefixes = torch.LongTensor(data.words_prefix_ids).to(device)
words_suffixes = torch.LongTensor(data.words_suffix_ids).to(device)
feats = model(words_in=words_in, chars_mask=chars_mask, chars_length=chars_length, chars_d=chars_d, caps=caps, words_prefixes=words_prefixes, words_suffixes=words_suffixes)
_, predicted_ids = model.decode_targets(feats)
for (word, true_id, pred_id) in zip(data.str_words, data.tags, predicted_ids):
line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])
prediction.append(line)
confusion_matrix[true_id, pred_id] += 1
prediction.append('')
eval_lines, acc, pre, rec, f1 = utils.evaluate_by_perl_script(prediction=prediction, eval_path=eval_path, eval_script=eval_script)
print('Accuracy: {:.4f}, precision: {:.4f}, recall: {:.4f}, f1-score: {:.4f}'.format(acc, pre, rec, f1))
print('Detailed result:')
for i, line in enumerate(eval_lines):
print(line)
print('Confusion matrix:')
print(("{: >2}{: >9}{: >15}%s{: >9}" % ("{: >9}" * confusion_matrix.size(0))).format(
"ID", "NE", "Total",
*([id_to_tag[i] for i in range(confusion_matrix.size(0))] + ["Percent"])
))
for i in range(confusion_matrix.size(0)):
print(("{: >2}{: >9}{: >15}%s{: >9}" % ("{: >9}" * confusion_matrix.size(0))).format(
str(i), id_to_tag[i], str(confusion_matrix[i].sum()),
*([confusion_matrix[i][j] for j in range(confusion_matrix.size(0))] +
["%.3f" % (confusion_matrix[i][i] * 100. / max(1, confusion_matrix[i].sum()))])
))
return acc, pre, rec, f1
def main():
args = parse_args()
device = 'cuda' if args.use_gpu else 'cpu'
(train_data, val_data, test_data), (word_to_id, char_to_id, tag_to_id, id_to_tag), word_embedding, (prefix_dicts, suffix_dicts) = load_datasets(
train_path=args.dataset_train,
val_path=args.dataset_val,
test_path=args.dataset_test,
pretrained_glove=args.pretrained_glove,
output_mapping=args.output_mapping,
output_affix_list=args.output_affix_list,
)
model = BiLSTM_CRF(
vocab_size=len(word_to_id),
tag_to_id=tag_to_id,
pretrained_embedding=word_embedding,
word_embedding_dim=100,
char_count=len(char_to_id),
char_embedding_dim=50,
cap_feature_count=4,
cap_embedding_dim=10,
prefix_counts=[len(prefix_dicts[1]) + 1, len(prefix_dicts[2]) + 1, len(prefix_dicts[3]) + 1],
suffix_counts=[len(suffix_dicts[1]) + 1, len(suffix_dicts[2]) + 1, len(suffix_dicts[3]) + 1],
prefix_embedding_dims=[16, 16, 16],
suffix_embedding_dims=[16, 16, 16],
char_lstm_hidden_size=25,
output_lstm_hidden_size=200,
dropout_p=0.5,
device=device,
use_crf=args.use_crf,
add_cap_feature=args.add_cap_feature,
add_affix_feature=args.add_affix_feature,
)
model.to(device)
train(
model=model,
device=device,
train_data=train_data,
val_data=val_data,
test_data=test_data,
model_path=args.model_path,
graph_path=args.graph_path,
**{
'tag_to_id': tag_to_id,
'id_to_tag': id_to_tag,
'eval_path': args.eval_path,
'eval_script': args.eval_script,
},
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1686251 | from pgdrive.component.map.base_map import BaseMap, MapGenerateMethod
from pgdrive.envs.pgdrive_env import PGDriveEnv
if __name__ == "__main__":
def get_image(env):
env.vehicle.image_sensors[env.vehicle.config["image_source"]].save_image()
env.engine.screenshot()
env = PGDriveEnv(
{
"environment_num": 1,
"traffic_density": 0.1,
"start_seed": 4,
"manual_control": True,
"use_render": True,
"offscreen_render": True,
"rgb_clip": True,
"vehicle_config": dict(depth_camera=(200, 88, True), image_source="depth_camera"),
"headless_machine_render": False,
"map_config": {
BaseMap.GENERATE_TYPE: MapGenerateMethod.BIG_BLOCK_NUM,
BaseMap.GENERATE_CONFIG: 12,
BaseMap.LANE_WIDTH: 3.5,
BaseMap.LANE_NUM: 3,
}
}
)
env.reset()
env.engine.accept("m", get_image, extraArgs=[env])
for i in range(1, 100000):
o, r, d, info = env.step([0, 1])
assert env.observation_space.contains(o)
if env.config["use_render"]:
# for i in range(ImageObservation.STACK_SIZE):
# ObservationType.show_gray_scale_array(o["image"][:, :, i])
env.render()
if d:
print("Reset")
env.reset()
env.close()
| StarcoderdataPython |
6474320 | <filename>surreal/envs/env.py
# Copyright 2019 ducandu GmbH, All Rights Reserved
# (this is a modified version of the Apache 2.0 licensed RLgraph file of the same name).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import cv2
import logging
import numpy as np
import re
import tensorflow as tf
import threading
import time
from surreal import PATH_EPISODE_LOGS
from surreal.algos.rl_algo import RLAlgoEvent
from surreal.debug import StoreEveryNthEpisode
from surreal.makeable import Makeable
from surreal.utils.util import SMALL_NUMBER
class Env(Makeable, metaclass=ABCMeta):
"""
An Env class used to run experiment-based RL.
"""
def __init__(self, actors=None, render=False, action_map=None, reward_clipping=None):
"""
Args:
actors (Union[int,List[Actor]]): The number of Actors to create and add to this env or a list of
already existing Actors.
render: If set True, the program will visualize the trainings of gym's environment. Note that such
visualization is probably going to slow down the training.
action_map (Optional[callable): An optional callable taking `actions` as inputs to enable e.g. discrete
learning in continuous envs or vice-versa. The callable must output the mapped actions, ready to be
applied in the underlying env.
reward_clipping (Optional[float,Tuple[float,float]]: Optional reward clipping setting given either by
a single float (rewards will be clipped from -reward_clipping to + reward_clipping), or as a tuple
indicating min and max values.
Note that the clipped values are only visible to the Algos used. Episode returns still be reported with
the original (unclipped) rewards.
"""
super().__init__()
# Our actors.
self.actors = actors or []
# The action map (if any).
self.action_map = action_map
# Should we clip rewards?
self.reward_clipping = reward_clipping
# Global tick counter. Per env tick (for all actors).
self.tick = 0
# Time steps over all actors. n per env tick (n = number of actors).
self.time_step_all_actors = 0
# Time steps over all actors that share the same algo.
self.time_steps_algos = defaultdict(int)
# Time step(s) per episode (batched via actors axis).
self.episodes_time_steps = None
self.historic_episodes_lengths = []
# Maximum number of actor-time-steps/env-ticks for some run (may not be defined, in which case,
# algo needs to figure out what to do).
self.max_ticks = None
self.max_time_steps = None
# Whether the Env is currently running (or paused).
self.running = False
# Running in `run_thread`. No thread used if run synchronously.
self.run_thread = None
# Whether to render this env.
self.do_render = render
# Which actor slot should we trajectory-log right now? False for none.
self.debug_store_episode = False
self.num_episodes = 0
# Current state: Must be implemented as a flattened list of buffer(s) (one for each container item).
self.state = None
# Current rewards per actor.
self.reward = np.zeros(shape=(len(actors),))
# Current accumulated episode returns per actor.
self.episodes_returns = np.zeros(shape=len(actors))
# Historic episode returns in chronological order.
self.historic_episodes_returns = []
# Current terminals per actor.
self.terminal = np.array([True] * len(actors))
@property
def rl_algos_to_actors(self):
"""
Unique algos by name mapped to the different actor-slots that point to them.
"""
ret = {}
for i, actor in enumerate(self.actors):
algo_name = actor.rl_algo.name if actor.rl_algo is not None else "_default_"
if algo_name not in ret:
ret[algo_name] = [i]
else:
ret[algo_name].append(i)
return {k: np.array(v) for k, v in ret.items()}
def run(self, ticks=None, actor_time_steps=None, episodes=None, sync=True, render=None):
"""
Runs this Env for n time_steps (or infinitely if `time_steps` is not given or 0).
Args:
ticks (Optional[int]): The number of env-ticks to run for.
actor_time_steps (Optional[int]): The number of single actor time-steps to run for.
episodes (Optional[int]): The number of episodes (across all actors) to execute.
sync (bool): Whether to run synchronously (wait for execution to be done) or not (run in separate thread).
render (bool): Whether to render this run. If not None, will override `self.do_render`.
"""
# Set up time_step counters per episode (per actor).
if self.episodes_time_steps is None:
self.episodes_time_steps = np.zeros(shape=(len(self.actors),), dtype=np.int32)
self.running = True
if sync is True:
self._run(ticks, actor_time_steps, episodes, render=render)
else:
# TODO: What if `run` is called, while this env is still running?
self.run_thread = threading.Thread(target=self._run, args=[ticks, actor_time_steps, episodes])
self.run_thread.run()
def _run(self, ticks=None, actor_time_steps=None, episodes_x=None, render=None):
"""
The actual loop scaffold implementation (to run in thread or synchronously).
Args:
ticks (Optional[int]): The number of env-ticks to run for.
actor_time_steps (Optional[int]): The number of single actor time-steps to run for.
#currently not supported! episodes (Optional[int]): The number of episodes (across all actors) to execute.
"""
# Set max-time-steps.
# TODO: Have env keep track of time_percentage, not algo!
# TODO: Distinguish between time_percentage this run and time_percentage overall (through many different `run` calls).
# TODO: Solve dilemma of when only `episodes` given. What's the number of ticks then? Do we know the lengths of episodes up front? We could calculate time_percentage based on episodes done.
self.max_ticks = (actor_time_steps / len(self.actors)) if actor_time_steps is not None else \
(ticks or float("inf"))
self.max_time_steps = self.max_ticks * len(self.actors)
#self.max_episodes = episodes if episodes is not None else float("inf")
# Build a algo-map for faster non-repetitive lookup.
quick_algo_map = {}
for algo_name, actor_slots in self.rl_algos_to_actors.items():
quick_algo_map[algo_name] = self.actors[self.rl_algos_to_actors[algo_name][0]].rl_algo # type: RLAlgo
tick = 0
#episode = 0
last_time_measurement = time.time()
last_episode_measurement = 0
last_actor_ts_measurement = 0
last_tick_measurement = 0
while self.running is True and tick < self.max_ticks: # and episode < self.max_episodes:
# Loop through Actors, gather their observations/rewards/terminals and then tick each one of their
# algos exactly once.
for algo_name, actor_slots in self.rl_algos_to_actors.items():
algo = quick_algo_map[algo_name]
# If episode ended, send new-episode event to algo.
for slot in actor_slots:
if self.terminal[slot]:
event = RLAlgoEvent(self, actor_slots, self.time_steps_algos[algo_name], current_actor_slot=slot)
if tick > 0:
self.num_episodes += 1
#episode += 1
last_episode_measurement += 1
# Switch on/off debug trajectory logging.
if StoreEveryNthEpisode is not False and self.debug_store_episode is False and \
self.num_episodes % StoreEveryNthEpisode == 0:
self.debug_store_episode = (self.num_episodes, slot)
elif self.debug_store_episode is not False and self.debug_store_episode[1] == slot:
self.debug_store_episode = False
# Log all historic returns.
self.historic_episodes_returns.append(self.episodes_returns[slot])
# Log all historic episode lengths.
self.historic_episodes_lengths.append(self.episodes_time_steps[slot])
# Send `episode_ends` event.
algo.event_episode_ends(event)
self.summarize_episode(algo)
# Log stats sometimes.
if slot == 0:
t = time.time()
delta_t = (t - last_time_measurement) or SMALL_NUMBER
logging.info(
"Ticks(tx)={} Time-Steps(ts)={} ({} Actors); Episodes(ep)={}; "
"Avg ep len~{}; Avg R~{:.4f}; tx/s={:d}; ts/s={:d}; ep/s={:.2f}".format(
self.tick, self.time_step_all_actors, len(self.actors),
self.num_episodes,
int(np.mean(self.historic_episodes_lengths[-len(self.actors):])),
np.mean(self.historic_episodes_returns[-len(self.actors):]),
int(last_tick_measurement / delta_t),
int(last_actor_ts_measurement / delta_t), # TODO: these two are wrong
last_episode_measurement / delta_t
)
)
last_time_measurement = t
last_episode_measurement = 0
last_actor_ts_measurement = 0
last_tick_measurement = 0
# Reset episode stats.
self.episodes_time_steps[slot] = 0
self.episodes_returns[slot] = 0.0
# Send `episode_starts` event.
algo.event_episode_starts(event)
#self.summarize(algo)
# Tick the algorithm passing self.
# TODO: This may become asynchronous in the future:
# TODO: Need to make sure that we do not expect `self.act` to be called by the algo within this tick.
event = RLAlgoEvent(
self, actor_slots, self.time_steps_algos[algo_name], r=self.reward[actor_slots],
t=self.terminal[actor_slots], s_=self.state[actor_slots]
)
algo.event_tick(event)
self.summarize_tick(algo)
# Accumulate episode rewards.
self.episodes_returns[actor_slots] += self.reward[actor_slots]
# Time steps (all actors with this algo).
self.time_steps_algos[algo_name] += len(actor_slots)
if render is True or (render is None and self.do_render is True):
self.render()
# Time step for just this `run`.
tick += 1
last_tick_measurement += 1
# Global time step.
self.tick += 1
# Global time step (all actors).
self.time_step_all_actors += len(self.actors)
last_actor_ts_measurement += len(self.actors)
# Single episode (per actor) time_steps.
self.episodes_time_steps += 1
# Done with the run.
self.running = False
# Interrupted.
if tick < self.max_ticks:
# TODO: What if paused, may one resume?
logging.info("Run paused at tick {}.".format(tick))
# Cleanly finished run.
else:
logging.info("Run done after {} ticks.".format(tick))
# Reset max-time-steps to undefined after each run.
self.max_ticks = None
self.max_time_steps = None
def pause(self):
self.running = False
def act(self, actions):
"""
Executes actions in this Env by calling the abstract `_act` method, which must be implemented by children
of this class.
Args:
actions (Dict[str,any]): The action(s) to be executed by the environment.
Keys are the Actors' names, values are the actual action structures (depending on the Actors' action
Spaces).
"""
# Handle debug logging of trajectories.
s = None
if self.debug_store_episode is not False:
episode, slot = self.debug_store_episode
s = self.state[slot].copy()
# Action translations?
if self.action_map is not None:
actions = self.action_map(actions)
# Call main action handler.
self._act(actions)
if s is not None:
self._debug_store(
PATH_EPISODE_LOGS + "ep_{:03d}_ts{:03d}_a{}_r{}".format(
episode, self.episodes_time_steps[slot], actions[slot], self.reward[slot]
), s
)
@abstractmethod
def _act(self, actions):
"""
Executes actions in this Env.
Args:
actions (Dict[str,any]): The action(s) to be executed by the environment.
Keys are the Actors' names, values are the actual action structures (depending on the Actors' action
Spaces).
"""
raise NotImplementedError
def render(self, **kwargs):
"""
Renders the env according to some specs given in kwargs (e.g. mode, which sub-env, etc..).
May be implemented or not.
"""
pass
def terminate(self):
"""
Clean up operation.
May be implemented or not.
"""
pass
def point_all_actors_to_algo(self, rl_algo):
"""
Points all of this Env's Actors to the given RLAlgo object.
Args:
rl_algo (RLAlgo): The RLAlgo to point to.
"""
for actor in self.actors:
actor.set_algo(rl_algo)
def summarize_tick(self, algo):
"""
Writes summary information (iff debug.UseTfSummaries is true) to the Algo's `summary_writer` object.
Summary information and setup can be passed into the Algo via `config.summaries`, which is a list of items,
that will simply be executed on the Algo context (prepending "algo."):
E.g.:
"Q[0](np.array([[1.0, 0.0]]))": Will summarize the result of calling `self.Q[0](...)` on the Algo object.
"L_critic": Will summarize the value of `self.L_critic` on the Algo object.
Args:
algo (Algo): The Algo to summarize.
"""
# Summaries not setup.
if algo.summary_writer is None:
return
with algo.summary_writer.as_default():
for summary in algo.config.summaries:
name = code_ = summary
# Tuple/List of 2: Summary name + prop.
if isinstance(summary, (list, tuple)) and len(summary) == 2:
name = summary[0]
code_ = summary[1]
# Ignore episode stats.
if re.match(r'^episode\..+$', name):
continue
l_dict = {"algo": algo}
# Execute the code.
try:
exec("result = algo.{}".format(code_), None, l_dict)
# This should never really fail.
except Exception as e:
logging.error("Summary ERROR '{}' in '{}'!".format(e, code_))
continue
result = l_dict["result"]
# Array or Tensor?
if isinstance(result, (np.ndarray, tf.Tensor)):
if result.shape == ():
tf.summary.scalar(name, result, step=self.tick)
elif result.shape == (1,):
tf.summary.scalar(name, tf.squeeze(result), step=self.tick)
# TODO: Add images, etc..?
else:
tf.summary.histogram(name, result, step=self.tick)
# Assume scalar.
else:
tf.summary.scalar(name, result, step=self.tick)
def summarize_episode(self, algo):
"""
See `summarize`.
This method only considers entries in `algo.summaries` that start with "episode.[some prop]".
Currently only supports props: `episode.return` and `episode.time_steps`.
Args:
algo (Algo): The Algo to summarize.
#num_episodes (int): The number of episodes that have been finished.
#episode_return (float): The overall return (undiscounted) of the finished episode.
#episode_time_steps (int): The length of the finished episode in time-steps.
"""
# Summaries not setup.
if algo.summary_writer is None:
return
with algo.summary_writer.as_default():
for summary in algo.config.summaries:
name = code_ = summary
# Tuple/List of 2: Summary name + prop.
if isinstance(summary, (list, tuple)) and len(summary) == 2:
name = summary[0]
if not re.match(r'^episode\..+', name):
continue
value = self.historic_episodes_returns[-1] if name == "episode.return" else \
self.historic_episodes_lengths[-1]
tf.summary.scalar(name, value, step=self.num_episodes)
@staticmethod
def _debug_store(path, state):
# TODO: state Dict or Tuple, etc..
# Probably an image.
if len(state.shape) == 3 and (state.shape[2] == 1 or state.shape[2] == 3):
cv2.imwrite(path+".png", state)
# Some other data.
else:
logging.warning("***WARNING: No mechanism yet for state debug-saving if not image!")
#with open(path, "w") as file:
# json.dump(file, state)
@abstractmethod
def __str__(self):
raise NotImplementedError
| StarcoderdataPython |
1974699 | '''
python evaluate.py \
--gt_path ../datasets/nyu_data/test_depth \
--split indoor --eigen_crop \
--min_depth 0.1 --max_depth 10.0 --normalize_depth 10.0 \
--predicted_depth_path ./results/[] \
python evaluate.py \
--gt_path ../datasets/kitti_data/ \
--split eigen --garg_crop \
--min_depth 1.0 --max_depth 80.0 --normalize_depth 80.0 \
--predicted_depth_path ./results/[] \
python evaluate.py \
--gt_path ../datasets/Gridlaserdata/ \
--split make3d --garg_crop \
--min_depth 0.001 --max_depth 70.0 --normalize_depth 80.0 \
--predicted_depth_path ./results/[] \
'''
import argparse
from util.process_data import *
import cv2
import scipy
import scipy.io
from natsort import natsorted
parser = argparse.ArgumentParser(description='Evaluation ont the dataset')
parser.add_argument('--split', type=str, default='indoor', help='data split, indoor or eigen')
parser.add_argument('--predicted_depth_path', type=str,
default='results/>>>', help='path to estimated depth')
parser.add_argument('--gt_path', type = str,
default='../datasets/nyu_data/test_depth/', help = 'path to ground truth')
parser.add_argument('--file_path', type = str, default='./datasplit/', help = 'path to datasplit files')
parser.add_argument('--min_depth', type=float, default=0.1, help='minimun depth for evaluation, indoor 0.1 / eigen 1.0 / make3d 0.001')
parser.add_argument('--max_depth', type=float, default=10.0, help='maximun depth for evaluation, indoor 10.0 / eigen 50.0 / make3d 70.0')
parser.add_argument('--normalize_depth', type=float, default=10.0, help='depth normalization value, indoor 10.0 / eigen 80.0 / make3d 80.0')
parser.add_argument('--eigen_crop',action='store_true', help='if set, crops according to Eigen NIPS14')
parser.add_argument('--garg_crop', action='store_true', help='if set, crops according to Garg ECCV16')
args = parser.parse_args()
if __name__ == "__main__":
predicted_depths = load_depth(args.predicted_depth_path,args.split, args.normalize_depth)
if args.split == 'indoor':
ground_truths = load_depth(args.gt_path, args.split, 10.0)
num_samples = len(ground_truths)
elif args.split == 'eigen':
test_files = natsorted(read_text_lines(args.file_path + 'eigen_test_files.txt'))
gt_files, gt_calib, im_sizes, im_files, cams = read_file_data(test_files, args.gt_path)
num_samples = len(im_files)
ground_truths = []
for t_id in range(num_samples):
camera_id = cams[t_id]
depth = generate_depth_map(gt_calib[t_id], gt_files[t_id], im_sizes[t_id], camera_id, False, True)
ground_truths.append(depth.astype(np.float32))
depth = cv2.resize(predicted_depths[t_id],(im_sizes[t_id][1], im_sizes[t_id][0]),interpolation=cv2.INTER_LINEAR)
predicted_depths[t_id] = depth
# # convert dist to depth maps
# depth, depth_inter = generate_depth_map(gt_calib[t_id], gt_files[t_id], im_sizes[t_id], camera_id, True, True)
# ground_truths.append(depth_inter.astype(np.float32))
# depth_img = Image.fromarray(np.uint8(depth_inter/80*255))
# depth_path = os.path.join('../datasets/kitti_data/eigen_val_labels', str(t_id) + '_' + test_files[t_id].replace('/', '_')[0:66])
# depth_img.save(depth_path)
# if t_id % 200 == 0:
# print(t_id)
# print('saved')
# x = input()
elif args.split == 'make3d':
with open(os.path.join(args.file_path, "make3d_test_files.txt")) as f:
test_filenames = f.read().splitlines()
test_filenames = map(lambda x: x[4:-4], test_filenames)
ground_truths = []
for filename in test_filenames:
mat = scipy.io.loadmat(os.path.join(args.gt_path, "depth_sph_corr-{}.mat".format(filename))) # "datasets/Gridlaserdata/"
ground_truths.append(mat["Position3DGrid"][:,:,3])
num_samples = len(ground_truths)
depths_gt_resized = map(lambda x: cv2.resize(x, (305, 407), interpolation=cv2.INTER_NEAREST), ground_truths)
ground_truths = list(map(lambda x: x[int((55 - 21)/2): int((55 + 21)/2)], ground_truths))
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples,np.float32)
rmse = np.zeros(num_samples,np.float32)
rmse_log = np.zeros(num_samples,np.float32)
log_10 = np.zeros(num_samples,np.float32)
a1 = np.zeros(num_samples,np.float32)
a2 = np.zeros(num_samples,np.float32)
a3 = np.zeros(num_samples,np.float32)
for i in range(num_samples):
# for i in range(1):
ground_depth = ground_truths[i]
predicted_depth = predicted_depths[i]
if args.split == 'indoor' or args.split == 'eigen':
height, width = ground_depth.shape
_height, _width = predicted_depth.shape
if not height == _height:
predicted_depth = cv2.resize(predicted_depth,(width,height),interpolation=cv2.INTER_LINEAR)
mask = np.logical_and(ground_depth > args.min_depth, ground_depth < args.max_depth)
# crop used by Garg ECCV16
if args.garg_crop:
crop = np.array([0.40810811 * height, 0.99189189 * height,
0.03594771 * width, 0.96405229 * width]).astype(np.int32)
# crop we found by trail and error to reproduce Eigen NIPS14 results
elif args.eigen_crop:
crop = np.array([0.3324324 * height, 0.91351351 * height,
0.0359477 * width, 0.96405229 * width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
ground_depth = ground_depth[mask]
predicted_depth = predicted_depth[mask]
elif args.split == 'make3d':
predicted_depth = cv2.resize(predicted_depth, ground_depth.shape[::-1], interpolation=cv2.INTER_NEAREST)
mask = np.logical_and(ground_depth > args.min_depth, ground_depth < args.max_depth)
ground_depth = ground_depth[mask]
predicted_depth = predicted_depth[mask]
predicted_depth *= np.median(ground_depth) / np.median(predicted_depth)
predicted_depth[predicted_depth < args.min_depth] = args.min_depth
predicted_depth[predicted_depth > args.max_depth] = args.max_depth
abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], log_10[i], a1[i], a2[i], a3[i] = compute_errors(ground_depth,predicted_depth)
print('{},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}, {:10.4f}'
.format(i, abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], log_10[i], a1[i], a2[i], a3[i]))
print ('{:>10},{:>10},{:>10},{:>10},{:>10},{:>10},{:>10},{:>10}'.format('abs_rel','sq_rel','rmse','rmse_log','log_10', 'a1','a2','a3'))
print ('{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'
.format(abs_rel.mean(),sq_rel.mean(),rmse.mean(),rmse_log.mean(), log_10.mean(), a1.mean(),a2.mean(),a3.mean()))
| StarcoderdataPython |
78111 | from unittest.mock import patch
import pytest
import requests
from scenario_player.exceptions.services import (
BrokenService,
ServiceReadTimeout,
ServiceUnavailable,
ServiceUnreachable,
)
from scenario_player.services.utils.interface import ServiceInterface, SPaaSAdapter
from scenario_player.utils.configuration.spaas import SPaaSConfig
@pytest.mark.depends(name="spaas_adapter_mounted")
def test_adapter_is_auto_mounted_in_interface_class():
iface = ServiceInterface(SPaaSConfig({}))
assert "spaas" in iface.adapters
assert isinstance(iface.adapters["spaas"], SPaaSAdapter)
@pytest.mark.depends(depends=["spaas_adapter_mounted"])
@patch("scenario_player.services.utils.interface.HTTPAdapter.send")
class TestSPaaSAdapter:
@pytest.mark.parametrize("service", ["rpc"])
@pytest.mark.parametrize("port", ["1", None])
@pytest.mark.parametrize("host", ["superhost.com", None])
@pytest.mark.parametrize("scheme", ["ftp", None])
def test_send_loads_host_and_port_correctly(self, _, scheme, host, port, service):
"""If a host and port key have **not** been given in the SPAAS config section,
SPaaSAdapter.prep_service_request should default to sensible values."""
expected_url = (
f"{scheme or 'http'}://{host or '127.0.0.1'}:{port or '5000'}/{service}/my-endpoint"
)
input_config = {}
if host:
input_config["host"] = host
if port:
input_config["port"] = port
if scheme:
input_config["scheme"] = scheme
config = SPaaSConfig({"spaas": {service: input_config}})
adapter = SPaaSAdapter(spaas_settings=config)
given_request = requests.Request(url=f"spaas://{service}/my-endpoint").prepare()
setattr(given_request, "service", service)
request = adapter.prep_service_request(given_request)
assert request.url == expected_url
def test_send_method_monkeypatches_metadata_onto_request(self, mock_adapter_send):
def return_modded_request(request, *_):
request.raise_for_status = lambda: True
request.json = lambda: True
return request
mock_adapter_send.side_effect = return_modded_request
config = SPaaSConfig({"spaas": {}})
adapter = SPaaSAdapter(config)
given_request = requests.Request(url="spaas://rpc/my-endpoint").prepare()
resulting_request = adapter.send(given_request, None)
assert hasattr(resulting_request, "orig_url")
assert resulting_request.orig_url == "spaas://rpc/my-endpoint"
assert hasattr(resulting_request, "service")
assert resulting_request.service == "rpc"
@pytest.mark.parametrize(
"send_err, status_code, expected_err",
argvalues=[
(requests.exceptions.ProxyError, None, ServiceUnreachable),
(requests.exceptions.SSLError, None, ServiceUnreachable),
(requests.exceptions.ConnectTimeout, None, ServiceUnreachable),
(requests.exceptions.ReadTimeout, None, ServiceReadTimeout),
(None, 500, BrokenService),
(None, 503, ServiceUnavailable),
],
ids=[
"ProxyError raises ServiceUnreachable",
"SSLError raises ServiceUnreachable",
"ConnectTimeout raises ServiceUnreachable",
"ReadTimeout raises ServiceReadTimeout",
"500 Internal Server Error raises BrokenService exception",
"503 Service raises BrokenService ServiceUnavailable",
],
)
def test_exceptions_are_converted_correctly_when_expected(
self, mock_adapter_send, send_err, status_code, expected_err
):
if send_err:
mock_adapter_send.side_effect = send_err
else:
resp = requests.Response()
resp.status_code = status_code
mock_adapter_send.return_value = resp
with pytest.raises(expected_err):
config = SPaaSConfig({"spaas": {}})
adapter = SPaaSAdapter(config)
req = requests.Request(url="http://127.0.0.1:5000").prepare()
adapter.send(req)
| StarcoderdataPython |
6506179 | import random
import string
import sys
import os
from datetime import date
import datetime
print("(1Loop = 6TokenType)")
Loop = input("How many Tokens loop?: ")
print("\n\n")
count = 0
path = os.path.dirname(os.path.realpath(__file__))
while(int(count)<int(Loop)):
Gen1 = random.choice(string.ascii_letters).upper()+random.choice(string.ascii_letters).upper()+random.choice(string.ascii_letters)+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(21))+"."+ random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
Gen2 = "MT"+random.choice(string.ascii_letters)+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(21))+"."+random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
Gen3 = "NT"+random.choice(string.ascii_letters)+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(21))+"."+random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
Gen4 = "MD"+random.choice(string.ascii_letters)+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(21))+"."+random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
Gen5 = "MT"+random.choice(string.ascii_letters)+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(21))+"."+random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
Gen6 = "NT"+random.choice(string.ascii_letters)+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(21))+"."+random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
f = open(path+"/"+str("output")+str("")+".txt","a")
f.write(Gen1+"\n"+Gen2+"\n"+Gen3+"\n"+Gen4+"\n"+Gen5+"\n"+Gen6+"\n")
print(Gen1)
print(Gen2)
print(Gen3)
print(Gen4)
print(Gen5)
print(Gen6)
count+=1
print("\n\nTokens has been save in output.txt")
| StarcoderdataPython |
115496 | # Copyright 2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the webserver and its endpoints."""
import logging
import json
import unittest
import falcon
from environment_provider.webserver import SubSuite
from tests.library.fake_request import FakeRequest, FakeResponse
from tests.library.fake_database import FakeDatabase
class TestSubSuite(unittest.TestCase):
"""Tests for the sub suite endpoint."""
logger = logging.getLogger(__name__)
def test_get(self):
"""Test that it is possible to get a sub suite from the sub suite endpoint.
Approval criteria:
- The sub suite endpoint shall return a sub suite if one exists.
Test steps:
1. Add a sub suite to the database.
2. Send a fake request to the sub suite endpoint.
3. Verify that the sub suite endpoint responds with a sub suite.
"""
self.logger.info("STEP: Add a sub suite to the database.")
database = FakeDatabase()
suite_id = "thetestiestofsuites"
sub_suite = {"test": "suite"}
database.write(suite_id, json.dumps(sub_suite))
self.logger.info("STEP: Send a fake request to the sub suite endpoint.")
request = FakeRequest()
request.fake_params["id"] = suite_id
response = FakeResponse()
SubSuite(database).on_get(request, response)
self.logger.info(
"STEP: Verify that the sub suite endpoint responds with a sub suite."
)
self.assertDictEqual(response.fake_responses.get("media"), sub_suite)
def test_get_no_id(self):
"""Test that the sub suite endpoint fails when sub suite was not found.
Approval criteria:
- The sub suite endpoint shall raise a HTTPNotFound exception when no suite is found.
Test steps:
1. Send a fake request to the sub suite endpoint.
2. Verify that the sub suite endpoint responds with HTTPNotFound.
"""
self.logger.info("STEP: Send a fake request to the sub suite endpoint.")
request = FakeRequest()
request.fake_params["id"] = "thisonedoesnotexist"
response = FakeResponse()
self.logger.info(
"STEP: Verify that the sub suite endpoint responds with HTTPNotFound."
)
with self.assertRaises(falcon.HTTPNotFound):
SubSuite(FakeDatabase).on_get(request, response)
| StarcoderdataPython |
1618297 | from typing import Deque
from random import sample
from matplotlib import pyplot as plt
import numpy as np
from torch import nn
class ReplayBuffer:
def __init__(self, capacity: int) -> None:
self.buffer = Deque([], maxlen=capacity)
def save(self, obs):
self.buffer.append(obs)
def get_batch(self, dim=256):
return sample(self.buffer, dim)
def __len__(self):
return len(self.buffer)
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
k = 256
self.linear_relu_stack = nn.Sequential(
nn.Linear(2, k),
nn.BatchNorm1d(k),
nn.ReLU(),
nn.Linear(k, k),
nn.BatchNorm1d(k),
nn.ReLU(),
nn.Linear(k, 2),
)
def forward(self, x):
x[:, 0] = (x[:, 0] - (7.5 / 2)) / 7.5
x[:, 1] = (x[:, 1] - (50)) / 100
logits = self.linear_relu_stack(x)
return logits
def eps(ep):
eps_start = 0.4
eps_end = 0.001
n_episodes = 250000
eps_decay = int(np.ceil(n_episodes / 3)) # /3
decay_ep = n_episodes - n_episodes / 5
no_eps = False
return eps_end + max(
(eps_start - eps_end) * (1 - np.exp((ep - decay_ep) / eps_decay)),
0,
)
data = [eps(i) for i in range(0, 250000)]
plt.title("eps")
plt.xlabel("ep")
plt.ylabel("eps")
plt.plot([i for i in range(0, len(data))], data)
plt.show()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.