id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6490755 | <gh_stars>1-10
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import rclpy
from gazebo_msgs.srv import SpawnEntity
from ament_index_python.packages import get_package_prefix
from ament_index_python.packages import get_package_share_directory
import xacro
def main(args=None):
rclpy.init(args=args)
node = rclpy.create_node('minimal_client')
cli = node.create_client(SpawnEntity, '/spawn_entity')
# Robot model: configure as needed
robot_model_file = 'rrbot.xacro'
robot_model_relative_path = 'urdf/'
robot_model_package = 'rrbot_description'
xacro_file = os.path.join(get_package_share_directory(
robot_model_package), robot_model_relative_path, robot_model_file)
assert os.path.exists(
xacro_file), "The file "+str(robot_model_file)+" doesnt exist in "+str(xacro_file)
install_dir = get_package_prefix(robot_model_package)
if 'GAZEBO_MODEL_PATH' in os.environ:
os.environ['GAZEBO_MODEL_PATH'] = os.environ['GAZEBO_MODEL_PATH'] + \
':' + install_dir + '/share'
else:
os.environ['GAZEBO_MODEL_PATH'] = install_dir + "/share"
if 'GAZEBO_PLUGIN_PATH' in os.environ:
os.environ['GAZEBO_PLUGIN_PATH'] = os.environ['GAZEBO_PLUGIN_PATH'] + \
':' + install_dir + '/lib'
else:
os.environ['GAZEBO_PLUGIN_PATH'] = install_dir + '/lib'
robot_description_config = xacro.process_file(xacro_file)
robot_desc = robot_description_config.toxml()
#content = sys.argv[1]
content = robot_desc
req = SpawnEntity.Request()
req.name = "robot"
req.xml = content
req.robot_namespace = ""
req.reference_frame = "world"
while not cli.wait_for_service(timeout_sec=1.0):
node.get_logger().info('service not available, waiting again...')
future = cli.call_async(req)
rclpy.spin_until_future_complete(node, future)
if future.result() is not None:
node.get_logger().info(
'Result ' + str(future.result().success) + " " + future.result().status_message)
else:
node.get_logger().info('Service call failed %r' % (future.exception(),))
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| StarcoderdataPython |
129678 | <gh_stars>1-10
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import numpy as np
from braket.circuits import AsciiCircuitDiagram, Circuit, Gate, Instruction, Observable, Operator
def test_empty_circuit():
assert AsciiCircuitDiagram.build_diagram(Circuit()) == ""
def test_one_gate_one_qubit():
circ = Circuit().h(0)
expected = ("T : |0|", " ", "q0 : -H-", "", "T : |0|")
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_qubit_width():
circ = Circuit().h(0).h(100)
expected = (
"T : |0|",
" ",
"q0 : -H-",
" ",
"q100 : -H-",
"",
"T : |0|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_gate_width():
class Foo(Gate):
def __init__(self):
super().__init__(qubit_count=1, ascii_symbols=["FOO"])
def to_ir(self, target):
return "foo"
circ = Circuit().h(0).h(1).add_instruction(Instruction(Foo(), 0))
expected = (
"T : |0| 1 |",
" ",
"q0 : -H-FOO-",
" ",
"q1 : -H-----",
"",
"T : |0| 1 |",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_time_width():
circ = Circuit()
num_qubits = 15
for qubit in range(num_qubits):
if qubit == num_qubits - 1:
break
circ.cnot(qubit, qubit + 1)
expected = (
"T : |0|1|2|3|4|5|6|7|8|9|10|11|12|13|",
" ",
"q0 : -C-------------------------------",
" | ",
"q1 : -X-C-----------------------------",
" | ",
"q2 : ---X-C---------------------------",
" | ",
"q3 : -----X-C-------------------------",
" | ",
"q4 : -------X-C-----------------------",
" | ",
"q5 : ---------X-C---------------------",
" | ",
"q6 : -----------X-C-------------------",
" | ",
"q7 : -------------X-C-----------------",
" | ",
"q8 : ---------------X-C---------------",
" | ",
"q9 : -----------------X-C-------------",
" | ",
"q10 : -------------------X-C-----------",
" | ",
"q11 : ---------------------X--C--------",
" | ",
"q12 : ------------------------X--C-----",
" | ",
"q13 : ---------------------------X--C--",
" | ",
"q14 : ------------------------------X--",
"",
"T : |0|1|2|3|4|5|6|7|8|9|10|11|12|13|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_connector_across_two_qubits():
circ = Circuit().cnot(3, 4).h(range(2, 6))
expected = (
"T : |0|1|",
" ",
"q2 : -H---",
" ",
"q3 : -C-H-",
" | ",
"q4 : -X-H-",
" ",
"q5 : -H---",
"",
"T : |0|1|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_overlapping_qubits():
circ = Circuit().cnot(0, 2).cnot(1, 3).h(0)
expected = (
"T : | 0 |1|",
" ",
"q0 : -C---H-",
" | ",
"q1 : -|-C---",
" | | ",
"q2 : -X-|---",
" | ",
"q3 : ---X---",
"",
"T : | 0 |1|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_overlapping_qubits_angled_gates():
circ = Circuit().zz(0, 2, 0.15).cnot(1, 3).h(0)
expected = (
"T : | 0 |1|",
" ",
"q0 : -ZZ(0.15)---H-",
" | ",
"q1 : -|--------C---",
" | | ",
"q2 : -ZZ(0.15)-|---",
" | ",
"q3 : ----------X---",
"",
"T : | 0 |1|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_connector_across_gt_two_qubits():
circ = Circuit().h(4).cnot(3, 5).h(4).h(2)
expected = (
"T : | 0 |1|",
" ",
"q2 : -H-----",
" ",
"q3 : ---C---",
" | ",
"q4 : -H-|-H-",
" | ",
"q5 : ---X---",
"",
"T : | 0 |1|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_connector_across_non_used_qubits():
circ = Circuit().h(4).cnot(3, 100).h(4).h(101)
expected = (
"T : | 0 |1|",
" ",
"q3 : ---C---",
" | ",
"q4 : -H-|-H-",
" | ",
"q100 : ---X---",
" ",
"q101 : -H-----",
"",
"T : | 0 |1|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_ignore_non_gates():
class Foo(Operator):
@property
def name(self) -> str:
return "foo"
def to_ir(self, target):
return "foo"
circ = Circuit().h(0).h(1).cnot(1, 2).add_instruction(Instruction(Foo(), 0))
expected = (
"T : |0|1|",
" ",
"q0 : -H---",
" ",
"q1 : -H-C-",
" | ",
"q2 : ---X-",
"",
"T : |0|1|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_result_types_target_none():
circ = Circuit().h(0).h(100).probability()
expected = (
"T : |0|Result Types|",
" ",
"q0 : -H-Probability--",
" | ",
"q100 : -H-Probability--",
"",
"T : |0|Result Types|",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_result_types_target_some():
circ = (
Circuit()
.h(0)
.h(1)
.h(100)
.expectation(observable=Observable.Y() @ Observable.Z(), target=[0, 100])
)
expected = (
"T : |0| Result Types |",
" ",
"q0 : -H-Expectation(Y@Z)-",
" | ",
"q1 : -H-|----------------",
" | ",
"q100 : -H-Expectation(Y@Z)-",
"",
"T : |0| Result Types |",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_additional_result_types():
circ = Circuit().h(0).h(1).h(100).state_vector().amplitude(["110", "001"])
expected = (
"T : |0|",
" ",
"q0 : -H-",
" ",
"q1 : -H-",
" ",
"q100 : -H-",
"",
"T : |0|",
"",
"Additional result types: StateVector, Amplitude(110,001)",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_multiple_result_types():
circ = (
Circuit()
.cnot(0, 2)
.cnot(1, 3)
.h(0)
.variance(observable=Observable.Y(), target=0)
.expectation(observable=Observable.Y(), target=2)
.sample(observable=Observable.Y())
)
expected = (
"T : | 0 |1| Result Types |",
" ",
"q0 : -C---H-Variance(Y)----Sample(Y)-",
" | | ",
"q1 : -|-C------------------Sample(Y)-",
" | | | ",
"q2 : -X-|---Expectation(Y)-Sample(Y)-",
" | | ",
"q3 : ---X------------------Sample(Y)-",
"",
"T : | 0 |1| Result Types |",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_multiple_result_types_with_state_vector_amplitude():
circ = (
Circuit()
.cnot(0, 2)
.cnot(1, 3)
.h(0)
.variance(observable=Observable.Y(), target=0)
.expectation(observable=Observable.Y(), target=3)
.expectation(observable=Observable.Hermitian(np.array([[1.0, 0.0], [0.0, 1.0]])), target=1)
.amplitude(["0001"])
.state_vector()
)
expected = (
"T : | 0 |1| Result Types |",
" ",
"q0 : -C---H-Variance(Y)------------",
" | ",
"q1 : -|-C---Expectation(Hermitian)-",
" | | ",
"q2 : -X-|--------------------------",
" | ",
"q3 : ---X---Expectation(Y)---------",
"",
"T : | 0 |1| Result Types |",
"",
"Additional result types: Amplitude(0001), StateVector",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_multiple_result_types_with_custom_hermitian_ascii_symbol():
herm_matrix = (Observable.Y() @ Observable.Z()).to_matrix()
circ = (
Circuit()
.cnot(0, 2)
.cnot(1, 3)
.h(0)
.variance(observable=Observable.Y(), target=0)
.expectation(observable=Observable.Y(), target=3)
.expectation(
observable=Observable.Hermitian(
matrix=herm_matrix,
display_name="MyHerm",
),
target=[1, 2],
)
)
expected = (
"T : | 0 |1| Result Types |",
" ",
"q0 : -C---H-Variance(Y)---------",
" | ",
"q1 : -|-C---Expectation(MyHerm)-",
" | | | ",
"q2 : -X-|---Expectation(MyHerm)-",
" | ",
"q3 : ---X---Expectation(Y)------",
"",
"T : | 0 |1| Result Types |",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_noise_1qubit():
circ = Circuit().h(0).x(1).bit_flip(1, 0.1)
expected = (
"T : | 0 |",
" ",
"q0 : -H---------",
" ",
"q1 : -X-BF(0.1)-",
"",
"T : | 0 |",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
def test_noise_2qubit():
circ = Circuit().h(1).kraus((0, 2), [np.eye(4)])
expected = (
"T : | 0 |",
" ",
"q0 : ---KR-",
" | ",
"q1 : -H-|--",
" | ",
"q2 : ---KR-",
"",
"T : | 0 |",
)
expected = "\n".join(expected)
assert AsciiCircuitDiagram.build_diagram(circ) == expected
| StarcoderdataPython |
1825691 | """Collection of algebraic objects extending :mod:`~qnet.algebra.core`"""
| StarcoderdataPython |
6463432 | <filename>array/computernumer.py
def compute(instructions):
stack = [0 for _ in range(9)]
haveblock = False
currentstage = 0
for ac in instructions:
if ac == 'P':
haveblock = True
currentstage = 0
elif ac == 'M':
if currentstage != 9:
currentstage += 1
elif ac == 'L':
if stack[currentstage] != 15:
if haveblock:
haveblock = False
stack[currentstage]+=1
res = ''
dic ={
10:'A',
11:'B',
12:'C',
13:'D',
14:'E',
15:'F'
}
for num in stack:
if num in dic:
res+=dic[num]
else:
res+=str( num)
return res
print(compute("PLPLPLPLPLPLPLPLPLPL")) | StarcoderdataPython |
11354509 | <reponame>vromanuk/data-driven-web-app<gh_stars>0
from typing import List, Optional
from application.pypi_org.nosql.packages import Package
from application.pypi_org.nosql.releases import Release
def get_latest_releases(limit=10) -> List[Release]:
releases = Release.objects(). \
order_by("-created_date"). \
limit(limit). \
all()
return releases
def get_package_count() -> int:
return Package.objects().count()
def get_release_count() -> int:
return Release.objects().count()
def get_package_by_id(package_id: str) -> Optional[Package]:
if not package_id:
return None
package_id = package_id.strip().lower()
package = Package.objects().filter(id=package_id).first()
return package
def all_packages(limit: int) -> List[Package]:
return list(Package.objects().limit(limit))
def get_packages_by_ids(package_ids: List[str]) -> List[Package]:
return list(Package.objects(id__in=package_ids))
def get_latest_release_for_package(package_id: str) -> Optional[Release]:
return Release.objects(package_id=package_id).order_by('-created_date').first()
#
# def test():
# """
# My test function for practicing sqlalchemy queries
# """
# session = db_session.create_session()
# result = session.query(Package). \
# filter(
# or_(
# Package.id.like("flask"),
# Package.id == "boto3")) \
# .all()
#
# print(result)
# return {'message': result}
| StarcoderdataPython |
6473220 | # Copyright (c) 2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This Python script is intended for the creation of autotuned configurations
for the supported rocPRIM algorithms based on benchmark results. The script
does not update the configurations automatically, the user is responsible for
installation and the correctness of the files
"""
import json
import re
import argparse
import os
def tokenize_test_name(input_name, name_regex):
match = re.search(name_regex, input_name)
data_dict = match.groupdict()
return data_dict
class BenchmarksOfArchitecture:
def __init__(self, arch_name):
# Stores datatype as keys, measurement data as values
self.datatypes = {}
self.arch_name = arch_name
def add_measurement(self, data_entry):
datatype = data_entry['datatype']
if datatype not in self.datatypes.keys():
self.datatypes[datatype] = []
self.datatypes[datatype].append(data_entry)
@property
def name(self):
return self.arch_name
@property
def base_config_case(self):
# For now lets just return the best int performance as a fallback if the dtype is not found
return max(self.datatypes['int'], key=lambda x: x['items_per_second'])
@property
def specialized_config_cases(self):
# return a dict
output = {}
for key, value in self.datatypes.items():
output[key] = max(value, key=lambda x: x['items_per_second'])
return output
class Algorithm:
"""
Aggregates the data for a algorithm, including the generation of
the configuration file
"""
def __init__(self, algorithm_name):
self.name = algorithm_name
self.architectures = {}
self.configuration_lines = []
def architecture_exists(self, architecture_name):
return architecture_name in self.architectures.keys()
def add_new_architecture(self, architecture_name):
benchmarks_of_architecture = BenchmarksOfArchitecture(architecture_name)
self.architectures[architecture_name] = benchmarks_of_architecture
def get_architecture(self, architecture_name):
return self.architectures[architecture_name]
def add_measurement(self, single_benchmark_data):
architecture_name = single_benchmark_data['arch']
if not self.architecture_exists(architecture_name):
self.add_new_architecture(architecture_name)
self.get_architecture(architecture_name).add_measurement(single_benchmark_data)
def create_config_file_content(self):
"""
Generate the content of the configuration file, including license
and header guards, based on general template file
"""
generated_config_file_content=""
self.set_configurations()
configuration= '\n'.join(self.configuration_lines)
abs_path_to_script_dir=os.path.dirname(os.path.abspath(__file__))
path_to_template=(os.path.join(abs_path_to_script_dir, "config_template"))
with open(path_to_template) as template_file:
template_file_content = template_file.read()
generated_config_file_content=template_file_content.format(guard=self.name.upper(), config_body=configuration)
return generated_config_file_content
def set_configurations(self):
"""
Generate each line of configuration, where configuration
is a valid cpp template instantiation
"""
self.configuration_lines.append(self._create_general_base_case())
for benchmarks_of_architecture in self.architectures.values():
self.configuration_lines.append(self._create_base_case_for_arch(benchmarks_of_architecture))
self.configuration_lines += self._create_specialized_cases_for_arch(benchmarks_of_architecture)
class AlgorithmDeviceReduce(Algorithm):
def _create_general_base_case(self):
#Hardcode some configurations in case non of the specializations can be instantiated
return "template<unsigned int arch, class Value> struct default_reduce_config : reduce_config<256, 4, ::rocprim::block_reduce_algorithm::using_warp_reduce> { };"
def _create_base_case_for_arch(self, arch):
measurement = arch.base_config_case
return f"template<class Value> struct default_reduce_config<{arch.name}, Value> : reduce_config<{measurement['block_size']}, {measurement['items_per_thread']}, ::rocprim::block_reduce_algorithm::using_warp_reduce> {{ }};"
def _create_specialized_cases_for_arch(self, arch):
out = []
for key, measurement in arch.specialized_config_cases.items():
out.append(f"template<> struct default_reduce_config<{arch.name}, {key}> : reduce_config<{measurement['block_size']}, {measurement['items_per_thread']}, ::rocprim::block_reduce_algorithm::using_warp_reduce> {{ }};")
return out
class AlgorithmFactory:
def create_algorithm(self, algorithm_name):
if algorithm_name == 'device_reduce':
return AlgorithmDeviceReduce(algorithm_name)
else:
raise(KeyError)
class BenchmarkDataManager:
"""
Aggregates the data from multiple benchmark files containing single benchmark runs
with different configurations.
"""
def __init__(self):
self.algorithms={}
self.algo_factory = AlgorithmFactory()
def add_run(self, benchmark_run_file_path, arch):
benchmark_run_data = {}
with open(benchmark_run_file_path) as file_handle:
benchmark_run_data = json.load(file_handle)
name_regex = benchmark_run_data['context']['autotune_config_pattern']
for single_benchmark in benchmark_run_data['benchmarks']:
tokenized_name = tokenize_test_name(single_benchmark['name'], name_regex)
single_benchmark=dict(single_benchmark, **tokenized_name)
single_benchmark['arch'] = arch
self.__add_measurement(single_benchmark)
def write_configs_to_files(self, base_dir):
data = self.__generate_configuration()
for algo_name, config in data.items():
path_str=os.path.join(base_dir, algo_name)
with open(path_str, "w") as outfile:
outfile.write(config)
def add_new_algorithm(self, algo_name):
self.algorithms[algo_name] = self.algo_factory.create_algorithm(algo_name)
def algorithm_exists(self, algo_name):
return algo_name in self.algorithms.keys()
def get_algorithm(self, algo_name):
return self.algorithms[algo_name]
def __add_measurement(self, single_benchmark_data):
algorithm_name = single_benchmark_data['algo']
if not self.algorithm_exists(algorithm_name):
self.add_new_algorithm(algorithm_name)
self.get_algorithm(algorithm_name).add_measurement(single_benchmark_data)
def __generate_configuration(self):
out = {}
for key, algo in self.algorithms.items():
out[key] = algo.create_config_file_content()
return out
def main():
parser = argparse.ArgumentParser(description="Tool for generating optimized launch parameters for rocPRIM based on benchmark results")
parser.add_argument('-b','--benchmark_files', nargs='+', help="Benchmarked architectures listed int the form <arch-id>:<path_to_benchmark>.json")
parser.add_argument("-p", "--out_basedir", type=str, help="Base dir for the output files, for each algorithm a new file will be created in this directory", required=True)
args = parser.parse_args()
benchmark_manager = BenchmarkDataManager()
for benchmark_run_file_and_arch in args.benchmark_files:
arch_id, bench_path = benchmark_run_file_and_arch.split(":")
benchmark_manager.add_run(bench_path, arch_id)
benchmark_manager.write_configs_to_files(args.out_basedir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6406301 | import logging
import sys
import uuid
from copy import deepcopy
from joblib import hash as hashy
import graphviz
import ipywidgets as ipy
import networkx as nx
import pandas as pd
from IPython.display import display
from pipy.interactive import InteractiveDict
from pipy.parameters import Iterable, PandasParam
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
def get_label(s):
return ipy.HTML("<b>{}</b>".format(s))
class Step:
_columns = {}
_params = {}
coeffs = {}
@property
def name(self):
return type(self).__name__
def __init__(self, params: dict = None, columns: dict = None):
self.uuid = str(uuid.uuid4())
self.columns = self._init_columns(columns or {})
self.params = self._init_params(params or {})
def _init_columns(self, columns):
return {k: deepcopy(v).update(columns.get(k)) for k, v in self._columns.items()}
def _init_params(self, params):
return {
k: (
v.update(params.get(k))
if isinstance(v, PandasParam)
else params.get(k, v)
)
for k, v in self._params.items()
}
def _ipython_display_(self):
display(self.render())
def _iter(self):
iter_params = {k for k, v in self.params.items() if isinstance(v, Iterable)}
if not iter_params:
for c in self.columns.get("in", []):
yield self.params, c
else:
for k in iter_params:
for p in self.params[k]:
for c in self.columns["in"]:
yield {k: p}, c
def get_columns_in(self):
return [c for k, l in self.columns.items() for c in l if k != "out"]
def get_columns_out(self):
def get_param_str(p):
param_str = ",".join("{}={}".format(k, v) for k, v in p.items())
return "({})".format(param_str) if param_str else param_str
return [
"{}|{}{}".format(c, self.name, get_param_str(p)) for p, c in self._iter()
]
def get_dag(self):
dag = nx.DiGraph()
for (p, i), o in zip(self._iter(), self.get_columns_out()):
dag.add_node(o, params=p)
dag.add_edge(i, o)
return dag
def update_available_columns(self, columns):
for key, c in self.columns.items():
if key == "out":
continue
c.options = columns
def render(self):
widgets = []
hspace = ipy.Box([], layout=ipy.Layout(min_width="20px"))
column_widgets = InteractiveDict(self.columns).render()
if column_widgets.children:
widgets.append(get_label("Columns:"))
widgets.append(ipy.HBox([hspace, column_widgets]))
param_widgets = InteractiveDict(self.params).render()
if param_widgets.children:
widgets.append(get_label("Parameters:"))
widgets.append(ipy.HBox([hspace, param_widgets]))
return ipy.VBox(widgets)
def fit(self, df: pd.DataFrame) -> None:
pass
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
return df
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
self.fit(df)
return self.transform(df)
class Pipeline(Step):
_params = {"steps": []}
def __init__(self, params: dict = None, columns: dict = None):
super(Pipeline, self).__init__(params, columns)
self.dag = self.get_dag()
self.df = pd.DataFrame()
self.update_available_columns()
def update_available_columns(self, columns: list = None):
all_columns = []
for step in self.params["steps"]:
step.update_available_columns(all_columns.copy())
all_columns += step.get_columns_out()
def get_dag(self):
dag = nx.DiGraph()
for step in self.params["steps"]:
dag.update(step.get_dag())
return dag
def display_dag(self):
dag = graphviz.Digraph(
graph_attr={"fixedsize": "false", "outputorder": "edgesfirst"},
node_attr={
"height": "0.4",
"fontsize": "11",
"style": "filled",
"color": "white",
},
edge_attr={"arrowsize": "0.6"},
)
for step in self.params["steps"]:
columns_in = step.get_columns_in()
columns_out = step.get_columns_out()
dag.node(step.name, shape="box", color="lightblue")
with dag.subgraph() as s:
for c in columns_out:
s.attr(rank="same")
s.node(c, shape="box", height="0.2")
dag.edges([(c, step.name) for c in columns_in])
dag.edges([(step.name, c) for c in columns_out])
display(dag)
def _ipython_display_(self):
steps = [s.render() for s in self.params["steps"]]
widget = ipy.Accordion(steps)
widget.selected_index = None
for n, s in enumerate(self.params["steps"]):
widget.set_title(n, s.name)
output = ipy.Output(
layout=ipy.Layout(overflow="auto", _webkit_overflow_y="auto")
)
with output:
self.display_dag()
tabs = ipy.Tab([widget, output])
tabs.set_title(0, "Steps")
tabs.set_title(1, "Blueprint")
display(tabs)
def fit(self, df: pd.DataFrame) -> None:
for s in self.params["steps"]:
s.fit(df)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
for s in self.params["steps"]:
self.df = s.transform(self.df)
return self.df
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
for s in self.params["steps"]:
s.fit(self.df)
self.df = s.transform(self.df)
return self.df
def run(self):
self.df = self.fit_transform(self.df)
return self.df
class Skippy(Pipeline):
coeffs = {"hashes": pd.Series()}
def get_dependents(self, column):
for dependent in self.dag.successors(column):
yield dependent
yield from self.get_dependents(dependent)
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
old_hashes = self.coeffs["hashes"]
if old_hashes.empty:
df_ = df
else:
now_hashes = df.apply(hashy, axis=0)
columns = list(self.dag.nodes())
now_hashes = now_hashes.reindex(columns)
old_hashes = old_hashes.reindex(columns)
if now_hashes.equals(old_hashes):
logger.info("No changes detected - skipping.")
return df
changed_columns = now_hashes.index[now_hashes != old_hashes].tolist()
if len(changed_columns) < now_hashes.shape[0]:
logger.info(
"Changes detected - rerunning pipeline for {} only.".format(
changed_columns
)
)
dependents = {
d for c in changed_columns for d in self.get_dependents(c)
}
df_ = df[dependents]
else:
df_ = df
df_ = super(Skippy, self).fit_transform(df_)
new_hashes = df_.apply(hashy, axis=0)
self.coeffs["hashes"] = old_hashes.combine_first(new_hashes)
self.df.update(df_)
return self.df
| StarcoderdataPython |
6652931 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-19 10:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fleet_management', '0013_auto_20180110_1349'),
]
operations = [
migrations.AlterField(
model_name='historicalvehicle',
name='division',
field=models.CharField(choices=[(b'all', b'All'), (b'marketing', b'Marketing'), (b'sales', b'Sales')], max_length=120),
),
migrations.AlterField(
model_name='incident',
name='incident_date',
field=models.DateTimeField(verbose_name=b'Incident Date'),
),
migrations.AlterField(
model_name='vehicle',
name='division',
field=models.CharField(choices=[(b'all', b'All'), (b'marketing', b'Marketing'), (b'sales', b'Sales')], max_length=120),
),
]
| StarcoderdataPython |
89172 | <filename>mlp.py<gh_stars>0
import time
# only required to run python3 examples/cvt_arm.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import Dataset
import numpy as np
import math
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 64 # batch size in every epoch
class CustomDataset(Dataset):
def __init__(self, data, targets, transform=None, target_transform=None):
self.data = data
self.targets = targets
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
img, target = self.data[idx], self.targets[idx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def trainloader():
"""
"""
train_dataset = datasets.MNIST(root = 'data/', train=True, download=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# training set
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
return train_loader
def standard_testloader():
"""
"""
test_dataset = datasets.MNIST(root = 'data/', train=False, download=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# test set
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=100,
shuffle=True,
num_workers=4,
drop_last=False)
return test_loader
def invariance_testloader():
"""
"""
inv_adv_examples = np.load("invariance_examples/final_l0/inv_adv_examples.npy") # visualize this for sanity check
human_labels = np.load("invariance_examples/final_l0/human_labels.npy")
inv_eg_dataset = CustomDataset(data=inv_adv_examples,
targets=human_labels,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
inv_eg_dataloader = torch.utils.data.DataLoader(inv_eg_dataset,
batch_size=10,
shuffle=True,
num_workers=4,
drop_last=False)
return inv_eg_dataloader
def train(model, epochs, train_loader):
model = model.to(device)
# specify loss function
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
model.train() # prep model for training
start = time.process_time()
for epoch in range(epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
data, target = data.to(device), target.to(device)
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
end = time.process_time()
print("time taken for training: {}".format(end - start))
return model
def test(model, test_loader, mode="standard"):
"""
"""
model = model.to(device)
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
# specify loss function
criterion = nn.CrossEntropyLoss()
model.eval() # prep model for *evaluation*
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# # calculate and print avg test loss
# test_loss = test_loss/len(test_loader.dataset)
# print('Test Loss: {:.6f}\n'.format(test_loss))
# for i in range(10):
# if class_total[i] > 0:
# print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
# str(i), 100 * class_correct[i] / class_total[i],
# np.sum(class_correct[i]), np.sum(class_total[i])))
# else:
# print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
test_accuracy = 100. * np.sum(class_correct) / np.sum(class_total)
# if mode=="standard":
# print('\n Standard test accuracy: %2d%% (%2d/%2d)' % (test_accuracy,
# np.sum(class_correct),
# np.sum(class_total)))
# elif mode=="robust":
# print('\n Robust test accuracy: %2d%% (%2d/%2d)' % (test_accuracy,
# np.sum(class_correct),
# np.sum(class_total)))
return test_accuracy
def evaluate_from_scratch(model, epochs, mode="standard"):
train_loader = trainloader()
model = train(model, epochs, train_loader)
if mode=="standard":
test_loader = standard_testloader()
elif mode=="robust":
test_loader = invariance_testloader()
testaccuracy = test(model, test_loader, mode=mode)
return model, testaccuracy
def evaluate_from_pretrained_weights(model, mode="standard"):
if mode=="standard":
test_loader = standard_testloader()
elif mode=="robust":
test_loader = invariance_testloader()
testaccuracy = test(model, test_loader, mode=mode)
return testaccuracy | StarcoderdataPython |
280285 | <reponame>brennanmcfarland/gan-comparison
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Dense, Conv2D, Conv2DTranspose, UpSampling2D, BatchNormalization, Dropout, \
Activation, GaussianNoise, Reshape, Add, Flatten, LeakyReLU, Input
from tensorflow.keras.constraints import min_max_norm
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
import numpy as np
import tensorflow as tf
# TODO: clean up this mess
class ToChoices(Layer):
def call(self, inputs): # reals, fakes, shuffle indices
reals, fakes, shuffle_indices = inputs
choice_images = tf.stack((fakes, reals), axis=-2)
shuffle_indices = tf.cast(shuffle_indices, dtype=tf.int64)
def apply_gather(x):
indices = x[1,0,0,:,0]
return tf.gather(x, tf.cast(indices, tf.int32), axis=-2)
choice_images = tf.cast(choice_images, dtype=tf.float32)
shuffle_indices = tf.cast(shuffle_indices, dtype=tf.float32)
for i in fakes.shape[1:-1]:
shuffle_indices = tf.stack([shuffle_indices for _ in range(i)], axis=1)#tf.tile(shuffle_indices, tf.expand_dims(i, axis=0))
shuffle_indices = tf.stack([shuffle_indices for _ in range(fakes.shape[-1])], axis=-1)
stacked = tf.stack([choice_images, shuffle_indices], axis=1)
#stacked = tf.ensure_shape(stacked, shape=(32, 2, 28, 28, 2, 1))
gathered = tf.map_fn(apply_gather, stacked)
gathered = gathered[:,0]
choice_images = tf.stack(gathered, axis=0)
#choice_images = tf.ensure_shape(choice_images, shape=self.compute_output_shape((i.shape for i in inputs))) # TODO: uncomment
return choice_images
# output shape is verified to be correct
def compute_output_shape(self, input_shapes): # reals, fakes, shuffle indices
reals_shape, fakes_shape, _ = input_shapes
#output_shape = tf.concat((reals_shape[:-1], reals_shape[-1] + fakes_shape[-1]), axis=0)
output_shape = reals_shape[:-1].concatenate(reals_shape[-1] + fakes_shape[-1]).concatenate(reals_shape[-1])
return output_shape
# TODO: this sucks
def ensure_batch_size(self, input):
return tf.concat(tf.constant((32,)), (input.shape[1:]))
# concatenate a batch of images with their batch of distributions, as extra channels?
class ConcatWithEncoded(Layer):
def call(self, inputs): # images, encoded
images, encoded = inputs
# TODO: fix
images, encoded = tf.ensure_shape(images, (32, 28, 28, 1)), tf.ensure_shape(encoded, (32, 100))
#images, encoded = tf.ensure_shape(images, shape=self.ensure_batch_size(images)), tf.ensure_shape(encoded, shape=self.ensure_batch_size(encoded))
encoded = tf.expand_dims(tf.expand_dims(encoded, axis=-2), axis=-2)
encoded = tf.tile(encoded, (1,) + images.shape[1:])
output = tf.concat([images, encoded], axis=-1)
return output
def compute_output_shape(self, input_shapes): # images, encoded
images_shape, encoded_shape = input_shapes
output_shape = images_shape[:-1].concatenate(encoded_shape[-1] + images_shape[-1])
# TODO: this sucks
@tf.function
def ensure_batch_size(self, input):
return tf.concat((tf.constant((32,)), input.shape[1:]), axis=0)
# TODO: just copy pasted this from online make it nice, and maybe we can use it
class SpectralNorm(Layer):
def build(self, input_shape):
self.u = self.add_weight(
name='u',
shape=tf.stack((1, input_shape[-1])),
initializer=tf.random_normal_initializer(),
trainable=False
)
self.built = True
def call(self, inputs):
iteration = 1
w_shape = inputs.shape
inputs = tf.reshape(inputs, [-1, w_shape[-1]])
w = inputs
u_hat = self.u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([self.u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, tf.pad(w_shape[1:], [[1, 0]], constant_values=-1))
return w_norm
class EqualizedDense(Dense):
def call(self, inputs):
output = K.dot(inputs, scale_weights(self.kernel))
if self.use_bias:
output = K.bias_add(output, scale_weights(self.bias), data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
class EqualizedConv2D(Conv2D):
def call(self, inputs):
outputs = K.conv2d(
inputs,
scale_weights(self.kernel),
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
scale_weights(self.bias),
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
class EqualizedLayer(Layer):
def __new__(cls, sublayer, **kwargs):
# intercept and modify build call?
old_build_f = sublayer.build
def new_f(obj, old_f, input_shape):
old_f(input_shape)
obj.set_weights([
property(
lambda self: scale_weights(self),
lambda self, value: setattr(self, self.__name, value)
) for w in sublayer.get_weights()])
new_build_f = lambda input_shape: new_f(sublayer, old_build_f, input_shape)
sublayer.build = new_build_f
return sublayer
class PixelNorm(Layer):
def __init__(self, eps=1e-8, **kwargs):
self.eps = eps
super(PixelNorm, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
with tf.variable_scope('PixelNorm'):
return x / tf.sqrt(tf.reduce_mean(tf.square(x), axis=-1, keep_dims=True) + self.eps)
class MiniBatchStdDev(Layer):
def __init__(self, **kwargs):
super(MiniBatchStdDev, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-2] + (input_shape[-1] + 1,)
def call(self, x):
std = tf.keras.backend.std(x, axis=0)
std = tf.math.reduce_mean(std)
std_shape = tf.shape(tf.expand_dims(tf.unstack(x, axis=-1)[0], axis=-1))
constant_std_tensor = tf.fill(std_shape, std)
output_tensor = tf.concat([x, constant_std_tensor], axis=-1)
return output_tensor
class AdaIN(Layer):
def __init__(self, data_format=None, eps=1e-7, **kwargs):
super(AdaIN, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
self.spatial_axis = [1, 2] if self.data_format == 'channels_last' else [2, 3]
self.eps = eps
def compute_output_shape(self, input_shape):
return input_shape[0]
def call(self, inputs):
image = inputs[0]
if len(inputs) == 2:
style = inputs[1]
style_mean, style_var = tf.nn.moments(style, self.spatial_axis, keep_dims=True)
else:
style_mean = tf.expand_dims(K.expand_dims(inputs[1], self.spatial_axis[0]), self.spatial_axis[1])
style_var = tf.expand_dims(K.expand_dims(inputs[2], self.spatial_axis[0]), self.spatial_axis[1])
image_mean, image_var = tf.nn.moments(image, self.spatial_axis, keep_dims=True)
out = tf.nn.batch_normalization(image, image_mean,
image_var, style_mean,
tf.sqrt(style_var), self.eps)
return out
class NoiseInput(Layer):
def __init__(self, shape, **kwargs):
self.shape = shape
super(NoiseInput, self).__init__(**kwargs)
def compute_output_shape(self, _):
return self.shape
def build(self, _):
self.means = self.add_weight(name='means',
shape=self.shape[1:], # exclude batch
initializer='random_normal',
trainable=True)
self.variances = self.add_weight(name='variances',
shape=self.shape[1:],
initializer='random_normal',
trainable=True)
super(NoiseInput, self).build(self.shape) # Be sure to call this at the end
def call(self, inputs):
return K.random_normal(shape=K.shape(inputs),
mean=0.,
stddev=self.variances) + self.means
# necessary for proper serialization
def get_config(self):
config = {'shape': self.shape}
base_config = super(NoiseInput, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class EqualizedNoiseInput(NoiseInput):
def call(self, inputs):
return K.random_normal(shape=K.shape(inputs),
mean=0.,
stddev=scale_weights(self.variances)) + scale_weights(self.means)
class ScaleLayer(Layer):
def __init__(self, scale_factor, **kwargs):
self.scale_factor = scale_factor
super(ScaleLayer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
return tf.math.scalar_mul(self.scale_factor, x)
def normalize_data_format(value):
if value is None:
value = 'channels_last'
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def scale_weights(w):
return w * tf.sqrt(2.0 / tf.size(w, out_type=tf.float32)) | StarcoderdataPython |
3474421 | <filename>lakeshore/model_155.py<gh_stars>1-10
"""Implements functionality unique to the Lake Shore 155 Precision Source"""
from time import sleep
import itertools
from .xip_instrument import XIPInstrument, RegisterBase, StatusByteRegister, StandardEventRegister
class PrecisionSourceOperationRegister(RegisterBase):
"""Class object representing the operation status register"""
bit_names = [
"",
"",
"",
"",
"",
"waiting_for_trigger_event",
"waiting_for_arm_event",
"",
"",
"",
"trigger_model_is_idle",
"",
"interlock_is_open"
]
def __init__(self,
waiting_for_trigger_event,
waiting_for_arm_event,
trigger_model_is_idle,
interlock_is_open):
self.waiting_for_trigger_event = waiting_for_trigger_event
self.waiting_for_arm_event = waiting_for_arm_event
self.trigger_model_is_idle = trigger_model_is_idle
self.interlock_is_open = interlock_is_open
class PrecisionSourceQuestionableRegister(RegisterBase):
"""Class object representing the questionable status register"""
bit_names = [
"voltage_source_in_current_limit",
"current_source_in_voltage_compliance",
"",
"",
"",
"",
"",
"",
"calibration_error",
"inter_processor_communication_error"
]
def __init__(self,
voltage_source_in_current_limit,
current_source_in_voltage_compliance,
calibration_error,
inter_processor_communication_error):
self.voltage_source_in_current_limit = voltage_source_in_current_limit
self.current_source_in_voltage_compliance = current_source_in_voltage_compliance
self.calibration_error = calibration_error
self.inter_processor_communication_error = inter_processor_communication_error
class PrecisionSource(XIPInstrument):
"""A class object representing a Lake Shore 155 precision I/V source"""
vid_pid = [(0x1FB9, 0x0103)]
def __init__(self,
serial_number=None,
com_port=None,
baud_rate=115200,
flow_control=False,
timeout=2.0,
ip_address=None,
tcp_port=7777,
**kwargs):
# Call the parent init, then fill in values specific to the 155
XIPInstrument.__init__(self, serial_number, com_port, baud_rate, flow_control, timeout, ip_address, tcp_port, **kwargs)
self.status_byte_register = StatusByteRegister
self.standard_event_register = StandardEventRegister
self.operation_register = PrecisionSourceOperationRegister
self.questionable_register = PrecisionSourceQuestionableRegister
def sweep_voltage(self,
dwell_time,
offset_values=None,
amplitude_values=None,
frequency_values=None):
"""Sweep source output voltage parameters based on list arguments.
Args:
dwell_time (float):
The length of time in seconds to wait at each parameter combination.
Note that the update rate will be limited by the SCPI communication response time.
The response time is usually on the order of 10-30 milliseconds.
offset_values (list):
DC offset values in volts to sweep over
amplitude_values (list):
Peak to peak values in volts to sweep over
frequency_values (list):
Frequency values in Hertz to sweep over
"""
# Change the output mode to source voltage instead of current.
self.command("SOURCE:FUNCTION:MODE VOLTAGE")
# Configure the instrument to output a sine wave
self.command("SOURCE:FUNCTION:SHAPE SIN")
# Turn on the output voltage
self.command("OUTPUT ON")
# Check to see if arguments were passed for each parameter.
# If not, initialize them in a way they will be ignored.
if offset_values is None:
offset_values = [None]
if amplitude_values is None:
amplitude_values = [None]
if frequency_values is None:
frequency_values = [None]
# Step through every combination of the three values.
for offset, frequency, amplitude in itertools.product(offset_values, frequency_values, amplitude_values):
parameter_commands = []
if offset is not None:
parameter_commands.append("SOURCE:VOLTAGE:OFFSET " + str(offset))
if frequency is not None:
parameter_commands.append("SOURCE:FREQUENCY " + str(frequency))
if amplitude is not None:
parameter_commands.append("SOURCE:VOLTAGE:AMPLITUDE " + str(amplitude))
self.command(*parameter_commands)
sleep(dwell_time)
def sweep_current(self,
dwell_time,
offset_values=None,
amplitude_values=None,
frequency_values=None):
"""Sweep the source output current parameters based on list arguments
Args:
dwell_time (float):
The length of time in seconds to wait at each parameter combination.
Note that the update rate will be limited by the SCPI communication response time.
The response time is usually on the order of 10-30 milliseconds.
offset_values (list):
DC offset values in volts to sweep over
amplitude_values (list):
Peak to peak values in volts to sweep over
frequency_values (list):
Frequency values in Hertz to sweep over
"""
# Change the output mode to source current instead of voltage
self.command("SOURCE:FUNCTION:MODE CURRENT")
# Configure the instrument to output a sine wave
self.command("SOURCE:FUNCTION:SHAPE SIN")
# Turn on the output voltage
self.command("OUTPUT ON")
# Check to see if arguments were passed for each parameter.
# If not, initialize them in a way they will be ignored.
if offset_values is None:
offset_values = [None]
if amplitude_values is None:
amplitude_values = [None]
if frequency_values is None:
frequency_values = [None]
# Step through every combination of the three values.
for offset, frequency, amplitude in itertools.product(offset_values, frequency_values, amplitude_values):
parameter_commands = []
if offset is not None:
parameter_commands.append("SOURCE:CURRENT:OFFSET " + str(offset))
if frequency is not None:
parameter_commands.append("SOURCE:FREQUENCY " + str(frequency))
if amplitude is not None:
parameter_commands.append("SOURCE:CURRENT:AMPLITUDE " + str(amplitude))
self.command(*parameter_commands)
sleep(dwell_time)
def enable_output(self):
"""Turns on the source output."""
self.command("OUTPUT ON")
def disable_output(self):
"""Turns off the source output."""
self.command("OUTPUT OFF")
def set_output(self, output_on):
"""Configure the source output on or off.
Args:
output_on (bool):
Turns the source output on when True, off when False.
"""
if output_on:
self.enable_output()
else:
self.disable_output()
def route_terminals(self, output_connections_location="REAR"):
"""Configures whether the source output is routed through the front or rear connections.
Args:
output_connections_location (str):
* Valid options are:
* "REAR" (Output is routed out the rear connections)
* "FRONT" (Output is routed out the front connections)
"""
self.command("ROUTE:TERMINALS " + output_connections_location)
def output_sine_current(self, amplitude, frequency, offset=0.0, phase=0.0):
"""Configures and enables the source output to be a sine wave current source.
Args:
amplitude (float):
The peak current amplitude value in amps.
frequency (float):
The source frequency value in hertz.
offset (float):
The DC offset current in amps.
phase (float):
Shifts the phase of the output relative to the reference out. Must be between -180 and 180 degrees.
"""
# Change the output mode to source current instead of voltage
self.command("SOURCE:FUNCTION:MODE CURRENT")
# Configure the instrument to output a sine wave
self.command("SOURCE:FUNCTION:SHAPE SIN")
# Configure the output amplitude
self.command("SOURCE:CURRENT:AMPLITUDE " + str(amplitude))
# Configure the output frequency
self.command("SOURCE:FREQUENCY " + str(frequency))
# Configure the output DC offset
self.command("SOURCE:CURRENT:OFFSET " + str(offset))
# Configure the phase of the output
self.command("SOURCE:PHASE " + str(phase))
# Turn on the output current
self.command("OUTPUT ON")
def output_sine_voltage(self, amplitude, frequency, offset=0.0, phase=0.0):
"""Configures and enables the source output to be a sine wave voltage source.
Args:
amplitude (float):
The peak voltage amplitude value in volts.
frequency (float):
The source frequency value in hertz.
offset (float):
The DC offset voltage in volts.
phase (float):
Shifts the phase of the output relative to the reference out. Must be between -180 and 180 degrees.
"""
# Change the output mode to source voltage instead of current
self.command("SOURCE:FUNCTION:MODE VOLTAGE")
# Configure the instrument to output a sine wave
self.command("SOURCE:FUNCTION:SHAPE SIN")
# Configure the output amplitude
self.command("SOURCE:VOLTAGE:AMPLITUDE " + str(amplitude))
# Configure the output frequency
self.command("SOURCE:FREQUENCY " + str(frequency))
# Configure the output DC offset
self.command("SOURCE:VOLTAGE:OFFSET " + str(offset))
# Configure the phase of the output
self.command("SOURCE:PHASE " + str(phase))
# Turn on the output voltage
self.command("OUTPUT ON")
def output_dc_current(self, current_level):
"""Configures the source output to be a DC current source.
Args:
current_level (float):
The output current level in amps.
"""
# Change the output mode to source current instead of voltage
self.command("SOURCE:FUNCTION:MODE CURRENT")
# Configure the instrument to output a sine wave
self.command("SOURCE:FUNCTION:SHAPE DC")
# Configure DC current level
self.command("SOURCE:CURRENT:AMPLITUDE " + str(current_level))
# Turn on the output current
self.command("OUTPUT ON")
def output_dc_voltage(self, voltage_level):
"""Configures the source output to be a DC current source.
Args:
voltage_level (float):
The output voltage level in volts.
"""
# Change the output mode to source voltage instead of current
self.command("SOURCE:FUNCTION:MODE VOLTAGE")
# Configure the instrument to output a sine wave
self.command("SOURCE:FUNCTION:SHAPE DC")
# Configure DC current level
self.command("SOURCE:VOLTAGE:AMPLITUDE " + str(voltage_level))
# Turn on the output voltage
self.command("OUTPUT ON")
def get_output_settings(self):
"""Returns a dictionary of the output settings."""
mode = self.query("SOURCE:FUNCTION:MODE?")
output_settings = {"mode": mode,
"output_shape": self.query("SOURCE:FUNCTION:SHAPE?"),
"amplitude": float(self.query("SOURCE:" + mode + ":AMPLITUDE?")),
"frequency": float(self.query("SOURCE:FREQUENCY?")),
"offset": float(self.query("SOURCE:" + mode + ":OFFSET?")),
"phase": float(self.query("SOURCE:PHASE?")),
"autorange": bool(self.query("SOURCE:" + mode + ":RANGE:AUTO?")),
"range": self.query("SOURCE:" + mode + ":RANGE?"),
"limit": float(self.query("SOURCE:" + mode + ":LIMIT?")),
"protection": float(self.query("SOURCE:" + mode + ":PROTECTION?"))}
return output_settings
def enable_autorange(self):
"""Enables the instrument to automatically select the best range for the given output parameters."""
self.command("SOURCE:VOLTAGE:RANGE:AUTO ON")
self.command("SOURCE:CURRENT:RANGE:AUTO ON")
def disable_autorange(self):
"""Enables the instrument to automatically select the best range for the given output parameters."""
self.command("SOURCE:VOLTAGE:RANGE:AUTO OFF")
self.command("SOURCE:CURRENT:RANGE:AUTO OFF")
def set_current_range(self, current_range="100E-3"):
"""Manually sets the current range when autorange is disabled.
Args:
current_range (str):
* The range in amps. Valid ranges are:
* "100E-3"
* "10E-3"
* "1E-3"
* "100E-6"
* "10E-6"
* "1E-6"
"""
self.command("SOURCE:CURRENT:RANGE " + current_range)
def set_voltage_range(self, voltage_range="10"):
"""Manually sets the voltage range when autorange is disabled.
Args:
voltage_range (str):
* The range in volts. Valid ranges are:
* "100"
* "10"
* "1"
* "0.1"
* "0.01"
"""
self.command("SOURCE:VOLTAGE:RANGE " + voltage_range)
def set_current_limit(self, current_limit):
"""Sets the highest settable current output value when in current mode.
Args:
current_limit (float):
The maximum settable current in amps. Must be between 0 and 100 milliamps.
"""
self.command("SOURCE:CURRENT:LIMIT " + str(current_limit))
def set_voltage_limit(self, voltage_limit):
"""Sets the highest settable voltage output value when in voltage mode.
Args:
voltage_limit (float):
The maximum settable voltage in amps. Must be between 0 and 100 volts.
"""
self.command("SOURCE:VOLTAGE:LIMIT " + str(voltage_limit))
def set_current_mode_voltage_protection(self, max_voltage):
"""Sets the maximum voltage level permitted by the instrument when sourcing current.
Args:
max_voltage (float):
The maximum permissible voltage. Must be between 1 and 100 volts.
"""
self.command("SOURCE:CURRENT:PROTECTION " + str(max_voltage))
def set_voltage_mode_current_protection(self, max_current):
"""Sets the maximum current level permitted by the instrument when sourcing voltage.
Args:
max_current (float):
The maximum permissible voltage. Must be between 1 and 100 volts.
"""
self.command("SOURCE:VOLTAGE:PROTECTION " + str(max_current))
def enable_ac_high_voltage_compliance(self):
"""Configures the current mode compliance voltage to be 100V in AC output modes."""
self.command("SOURCE:CURRENT:AC:VRANGE 100")
def disable_ac_high_voltage_compliance(self):
"""Configures the current mode compliance voltage to be 10V in AC output modes."""
self.command("SOURCE:CURRENT:AC:VRANGE 10")
# Create an alias using the product name
Model155 = PrecisionSource
| StarcoderdataPython |
1902635 | __author__ = 'bs'
import cv2
import numpy as np
from config.Const import *
from tools import Utils
from matplotlib.pyplot import figure
def simpleTextureMap():
I1 = cv2.imread(ITU_LOGO)
I2 = cv2.imread(ITU_MAP)
#Print Help
H,Points = Utils.getHomographyFromMouse(I1,I2,4)
h, w,d = I2.shape
overlay = cv2.warpPerspective(I1, H,(w, h))
M = cv2.addWeighted(I2, 0.5, overlay, 0.5,0)
cv2.imshow("Overlayed Image",M)
cv2.waitKey(0)
def textureMapGroundFloor():
#Load videodata
# logo = cv2.imread(ITU_LOGO)
texture = cv2.imread(TEXTURE)
fn = GROUND_FLOOR_VIDEO
cap = cv2.VideoCapture(fn)
#load Tracking data
running, imgOrig = cap.read()
H,Points = Utils.getHomographyFromMouse(texture, imgOrig, -1)
h, w,d = imgOrig.shape
while(cap.isOpened()):
ret, frame = cap.read()
try:
overlay = cv2.warpPerspective(texture, H,(w, h))
wFirst = 0.9
wSecond = 0.1
gamma = 9
M = cv2.addWeighted(frame, wFirst, overlay, wSecond, gamma)
except:
break
cv2.imshow("Overlayed Image",M)
if cv2.waitKey(DELAY) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def texturemapGridSequence():
""" Skeleton for texturemapping on a video sequence"""
fn = GRID_1
cap = cv2.VideoCapture(fn)
drawContours = True
texture = cv2.imread(ITU_LOGO)
texture = cv2.pyrDown(texture)
mTex,nTex,t = texture.shape
#load Tracking data
running, imgOrig = cap.read()
mI,nI,t = imgOrig.shape
cv2.imshow("win2",imgOrig)
pattern_size = (9, 6)
idx = [0,8,45,53]
while(running):
#load Tracking data
running, imgOrig = cap.read()
if(running):
imgOrig = cv2.pyrDown(imgOrig)
gray = cv2.cvtColor(imgOrig,cv2.COLOR_BGR2GRAY)
found, corners = cv2.findChessboardCorners(gray, pattern_size)
if found:
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
cv2.drawChessboardCorners(imgOrig, pattern_size, corners, found)
for t in idx:
cv2.circle(imgOrig,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))
cv2.imshow("win2",imgOrig)
cv2.waitKey(DELAY) | StarcoderdataPython |
4872936 | <filename>6_Builtin_Functions/_dir.py<gh_stars>0
"""
dir
"""
import math
__version__ = 1.0 # attribute
def test():
pass
print(dir()) # list thr names that the current module defines
print()
print(dir(math)) # list names from math module
print()
var = "teste"
print(dir(var))
print()
var = 5
print(dir(var))
| StarcoderdataPython |
163122 | <reponame>leferrad/rl-3
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""""""
__author__ = 'leferrad'
import argparse
import sys
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='source', type=int,
default=0, help='Device index of the camera.')
args = parser.parse_args()
| StarcoderdataPython |
4893044 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import collections
from typing import Any, Iterable, cast, DefaultDict, TYPE_CHECKING, FrozenSet
from numpy import sqrt
from cirq import _compat, devices, ops, circuits, value
from cirq.devices.grid_qubit import GridQubit
from cirq.ops import raw_types
from cirq.value import Duration
from cirq.neutral_atoms import convert_to_neutral_atom_gates
if TYPE_CHECKING:
import cirq
def _subgate_if_parallel_gate(gate: 'cirq.Gate') -> 'cirq.Gate':
"""Returns gate.sub_gate if gate is a ParallelGate, else returns gate"""
return gate.sub_gate if isinstance(gate, ops.ParallelGate) else gate
def neutral_atom_gateset(max_parallel_z=None, max_parallel_xy=None):
return ops.Gateset(
ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CZPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),
ops.ParallelGateFamily(ops.ZPowGate, max_parallel_allowed=max_parallel_z),
ops.ParallelGateFamily(ops.XPowGate, max_parallel_allowed=max_parallel_xy),
ops.ParallelGateFamily(ops.YPowGate, max_parallel_allowed=max_parallel_xy),
ops.ParallelGateFamily(ops.PhasedXPowGate, max_parallel_allowed=max_parallel_xy),
ops.MeasurementGate,
ops.IdentityGate,
unroll_circuit_op=False,
accept_global_phase_op=False,
)
@value.value_equality
class NeutralAtomDevice(devices.Device):
"""A device with qubits placed on a grid."""
def __init__(
self,
measurement_duration: 'cirq.DURATION_LIKE',
gate_duration: 'cirq.DURATION_LIKE',
control_radius: float,
max_parallel_z: int,
max_parallel_xy: int,
max_parallel_c: int,
qubits: Iterable[GridQubit],
) -> None:
"""Initializes the description of the AQuA device.
Args:
measurement_duration: the maximum duration of a measurement.
gate_duration: the maximum duration of a gate
control_radius: the maximum distance between qubits for a controlled
gate. Distance is measured in units of the indices passed into
the GridQubit constructor.
max_parallel_z: The maximum number of qubits that can be acted on
in parallel by a Z gate
max_parallel_xy: The maximum number of qubits that can be acted on
in parallel by a local XY gate
max_parallel_c: the maximum number of qubits that can be acted on in
parallel by a controlled gate. Must be less than or equal to the
lesser of max_parallel_z and max_parallel_xy
qubits: Qubits on the device, identified by their x, y location.
Must be of type GridQubit
Raises:
ValueError: if the wrong qubit type is provided or if invalid
parallel parameters are provided
"""
self._measurement_duration = Duration(measurement_duration)
self._gate_duration = Duration(gate_duration)
self._control_radius = control_radius
self._max_parallel_z = max_parallel_z
self._max_parallel_xy = max_parallel_xy
if max_parallel_c > min(max_parallel_z, max_parallel_xy):
raise ValueError(
"max_parallel_c must be less than or equal to the"
"min of max_parallel_z and max_parallel_xy"
)
self._max_parallel_c = max_parallel_c
self.xy_gateset_all_allowed = ops.Gateset(
ops.ParallelGateFamily(ops.XPowGate),
ops.ParallelGateFamily(ops.YPowGate),
ops.ParallelGateFamily(ops.PhasedXPowGate),
unroll_circuit_op=False,
accept_global_phase_op=False,
)
self.controlled_gateset = ops.Gateset(
ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CZPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),
unroll_circuit_op=False,
accept_global_phase_op=False,
)
self.gateset = neutral_atom_gateset(max_parallel_z, max_parallel_xy)
for q in qubits:
if not isinstance(q, GridQubit):
raise ValueError(f'Unsupported qubit type: {q!r}')
self.qubits = frozenset(qubits)
self._metadata = devices.GridDeviceMetadata(
[(a, b) for a in self.qubits for b in self.qubits if a.is_adjacent(b)], self.gateset
)
@property
def metadata(self) -> devices.GridDeviceMetadata:
return self._metadata
@_compat.deprecated(fix='Use metadata.qubit_set if applicable.', deadline='v0.15')
def qubit_set(self) -> FrozenSet['cirq.GridQubit']:
return self.qubits
def qubit_list(self):
return [qubit for qubit in self.qubits]
@_compat.deprecated(
fix='Use cirq.ConvertToNeutralAtomGates() instead to decompose operations.',
deadline='v0.15',
)
def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:
return convert_to_neutral_atom_gates.ConvertToNeutralAtomGates().convert(operation)
def duration_of(self, operation: ops.Operation):
"""Provides the duration of the given operation on this device.
Args:
operation: the operation to get the duration of
Returns:
The duration of the given operation on this device
Raises:
ValueError: If the operation provided doesn't correspond to a native
gate
"""
self.validate_operation(operation)
if isinstance(operation, ops.GateOperation):
if isinstance(operation.gate, ops.MeasurementGate):
return self._measurement_duration
return self._gate_duration
def validate_gate(self, gate: ops.Gate):
"""Raises an error if the provided gate isn't part of the native gate set.
Args:
gate: the gate to validate
Raises:
ValueError: If the given gate is not part of the native gate set.
"""
if gate not in self.gateset:
if isinstance(gate, (ops.CNotPowGate, ops.CZPowGate, ops.CCXPowGate, ops.CCZPowGate)):
raise ValueError('controlled gates must have integer exponents')
raise ValueError(f'Unsupported gate: {gate!r}')
def validate_operation(self, operation: ops.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: the operation to validate
Raises:
ValueError: If the operation is not valid
"""
if not isinstance(operation, ops.GateOperation):
raise ValueError(f'Unsupported operation: {operation!r}')
# All qubits the operation acts on must be on the device
for q in operation.qubits:
if q not in self.qubits:
raise ValueError(f'Qubit not on device: {q!r}')
if operation not in self.gateset and not (
operation in self.xy_gateset_all_allowed and len(operation.qubits) == len(self.qubits)
):
raise ValueError(f'Unsupported operation: {operation!r}')
if operation in self.controlled_gateset:
if len(operation.qubits) > self._max_parallel_c:
raise ValueError(
'Too many qubits acted on in parallel by a controlled gate operation'
)
for p in operation.qubits:
for q in operation.qubits:
if self.distance(p, q) > self._control_radius:
raise ValueError(f"Qubits {p!r}, {q!r} are too far away")
def validate_moment(self, moment: circuits.Moment):
"""Raises an error if the given moment is invalid on this device.
Args:
moment: The moment to validate
Raises:
ValueError: If the given moment is invalid
"""
super().validate_moment(moment)
CATEGORIES = {
'Z': (ops.ZPowGate,),
'XY': (
ops.XPowGate,
ops.YPowGate,
ops.PhasedXPowGate,
),
'controlled': (
ops.CNotPowGate,
ops.CZPowGate,
ops.CCXPowGate,
ops.CCZPowGate,
),
'measure': (ops.MeasurementGate,),
}
categorized_ops: DefaultDict = collections.defaultdict(list)
for op in moment.operations:
assert isinstance(op, ops.GateOperation)
for k, v in CATEGORIES.items():
assert isinstance(v, tuple)
gate = _subgate_if_parallel_gate(op.gate)
if isinstance(gate, v):
categorized_ops[k].append(op)
for k in ['Z', 'XY', 'controlled']:
if len(set(_subgate_if_parallel_gate(op.gate) for op in categorized_ops[k])) > 1:
raise ValueError(f"Non-identical simultaneous {k} gates")
num_parallel_xy = sum([len(op.qubits) for op in categorized_ops['XY']])
num_parallel_z = sum([len(op.qubits) for op in categorized_ops['Z']])
has_measurement = len(categorized_ops['measure']) > 0
controlled_qubits_lists = [op.qubits for op in categorized_ops['controlled']]
if sum([len(l) for l in controlled_qubits_lists]) > self._max_parallel_c:
raise ValueError("Too many qubits acted on by controlled gates")
if controlled_qubits_lists and (num_parallel_xy or num_parallel_z):
raise ValueError(
"Can't perform non-controlled operations at same time as controlled operations"
)
if self._are_qubit_lists_too_close(*controlled_qubits_lists):
raise ValueError("Interacting controlled operations")
if num_parallel_z > self._max_parallel_z:
raise ValueError("Too many simultaneous Z gates")
if num_parallel_xy > self._max_parallel_xy and num_parallel_xy != len(self.qubits):
raise ValueError("Bad number of simultaneous XY gates")
if has_measurement:
if controlled_qubits_lists or num_parallel_z or num_parallel_xy:
raise ValueError("Measurements can't be simultaneous with other operations")
def _are_qubit_lists_too_close(self, *qubit_lists: Iterable[raw_types.Qid]) -> bool:
if len(qubit_lists) < 2:
return False
if len(qubit_lists) == 2:
a, b = qubit_lists
return any(self.distance(p, q) <= self._control_radius for p in a for q in b)
return any(
self._are_qubit_lists_too_close(a, b) for a, b in itertools.combinations(qubit_lists, 2)
)
def can_add_operation_into_moment(
self, operation: ops.Operation, moment: circuits.Moment
) -> bool:
"""Determines if it's possible to add an operation into a moment.
An operation can be added if the moment with the operation added is valid.
Args:
operation: The operation being added.
moment: The moment being transformed.
Returns:
Whether or not the moment will validate after adding the operation.
Raises:
ValueError: If either of the given moment or operation is invalid
"""
if not super().can_add_operation_into_moment(operation, moment):
return False
try:
self.validate_moment(moment.with_operation(operation))
except:
return False
return True
def validate_circuit(self, circuit: circuits.AbstractCircuit):
"""Raises an error if the given circuit is invalid on this device.
A circuit is invalid if any of its moments are invalid or if there is a
non-empty moment after a moment with a measurement.
Args:
circuit: The circuit to validate
Raises:
ValueError: If the given circuit can't be run on this device
"""
super().validate_circuit(circuit)
# Measurements must be in the last non-empty moment
has_measurement_occurred = False
for moment in circuit:
if has_measurement_occurred:
if len(moment.operations) > 0:
raise ValueError("Non-empty moment after measurement")
for operation in moment.operations:
if isinstance(operation.gate, ops.MeasurementGate):
has_measurement_occurred = True
def _value_equality_values_(self) -> Any:
return (
self._measurement_duration,
self._gate_duration,
self._max_parallel_z,
self._max_parallel_xy,
self._max_parallel_c,
self._control_radius,
self.qubits,
)
def __repr__(self) -> str:
return (
'cirq.NeutralAtomDevice('
f'measurement_duration={self._measurement_duration!r}, '
f'gate_duration={self._gate_duration!r}, '
f'max_parallel_z={self._max_parallel_z!r}, '
f'max_parallel_xy={self._max_parallel_xy!r}, '
f'max_parallel_c={self._max_parallel_c!r}, '
f'control_radius={self._control_radius!r}, '
f'qubits={sorted(self.qubits)!r})'
)
def neighbors_of(self, qubit: 'cirq.GridQubit') -> Iterable['cirq.GridQubit']:
"""Returns the qubits that the given qubit can interact with."""
possibles = [
GridQubit(qubit.row + 1, qubit.col),
GridQubit(qubit.row - 1, qubit.col),
GridQubit(qubit.row, qubit.col + 1),
GridQubit(qubit.row, qubit.col - 1),
]
return [e for e in possibles if e in self.qubits]
def distance(self, p: 'cirq.Qid', q: 'cirq.Qid') -> float:
p = cast(GridQubit, p)
q = cast(GridQubit, q)
return sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)
def __str__(self) -> str:
diagram = circuits.TextDiagramDrawer()
for q in self.qubits:
diagram.write(q.col, q.row, str(q))
for q2 in self.neighbors_of(q):
diagram.grid_line(q.col, q.row, q2.col, q2.row)
return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)
def _repr_pretty_(self, p: Any, cycle: bool):
"""iPython (Jupyter) pretty print."""
p.text("cirq.NeutralAtomDevice(...)" if cycle else self.__str__())
| StarcoderdataPython |
9712515 | # coding: utf-8
# Goal: Rename multiple mp3 files with their properties to get them ready for iTunes
"""
Idea space:
- Ask user to delete the Cover_Images folder on end
"""
import eyed3 as d3
import os, os.path, datetime, requests, re, string, PIL.Image, youtube_dl, json
from bs4 import BeautifulSoup
# path = "C:\\Users\\Flavio\\Music\\Youtube\\Weiteres"
# os.chdir(path)
d3.log.setLevel("ERROR") # So there are no warnings for non-standard genres
# todo: Cut off playlist
def nameTracks(folderpath, genre="[Hip-Hop/Rap]"):
""" Tracks are saved as "Artist - TrackName"
Tracks: Option for same genre, all title numbers get a 1, same year, different artists, track name, album like "Track Name - Single"
"""
for file in os.listdir(folderpath):
if file.endswith(".mp3"):
if file.find("-") != -1:
filepath = folderpath + "/" + file
trackArtist = file.partition("-")[0].strip()
title = file.partition(" - ")[2].partition('.mp3')[0].strip()
singleCover = findSingleCover(trackArtist, title)
audiofile = d3.load(filepath)
audiofile.tag.genre = genre
audiofile.tag.recording_date = datetime.datetime.now().year
audiofile.tag.artist = trackArtist
audiofile.tag.track_num = 1
if title.find(" ft.") != -1: # cut off features als well for the album name
audiofile.tag.album = title.partition(" ft.")[0] + " - Single"
else:
audiofile.tag.album = title + ' - Single'
if singleCover != "Error":
audiofile.tag.images.set(3, open(singleCover, "rb").read(), "image/jpeg")
audiofile.tag.title = title
audiofile.tag.save()
# also rename the whole file to have just the title of the track
os.rename(filepath, folderpath + "/" + title + ".mp3")
else:
print("File already formatted or not named properly! ")
else:
print("File not formatted because not mp3!")
print("All Tracks managed! ")
def nameAlbum(folderpath, artist, album, genre="[Hip-Hop/Rap]"):
""" Albums are saved in a folder inside the folder of the artist
Album: Same Interpret, Year, Genre, Album Name, Different Track Numbers
"""
trackList = generateTracklist(artist, album)
cover = findAlbumCover(artist, album)
for file in os.listdir(folderpath):
if file.endswith(".mp3"):
title = file.partition(".mp3")[0]
audiofile = d3.load(folderpath + "/" + file)
audiofile.tag.genre = genre
audiofile.tag.recording_date = datetime.datetime.now().year
audiofile.tag.artist = artist
try:
# todo: Check so machen, dass nur groß oder nur kleinschreibung angeschaut werden und Zeichen wie ' berücksichtigen
trackNum = trackList.index(string.capwords(title.partition(" ft.")[0].partition(" feat.")[0])) + 1 # automation of track numbers
audiofile.tag.track_num = trackNum
except:
print("Error occured, track has to be numbered manually")
number = input("Enter track number of " + title + " : ")
audiofile.tag.track_num = int(number)
audiofile.tag.album = album
if cover != "Error":
audiofile.tag.images.set(3, open(cover, "rb").read(), "image/jpeg")
audiofile.tag.title = title
audiofile.tag.save()
print("Album finished! ")
def generateTracklist(artist, album):
"""
Using genius.com pattern to get the tracklist of the album.
"""
base = "https://genius.com/albums"
url = base + "/" + artist.replace(" ", "-") + "/" + album.replace(" ", "-")
raw = requests.get(url)
soup = BeautifulSoup(raw.text, "html.parser")
try:
titles = soup.findAll(class_="chart_row-content-title")
for i in range(len(titles)):
titles[i] = re.sub(" +", " ", titles[i].text.partition("Lyrics")[0].replace("\n", "").replace("\xa0", " ")).replace("’", "").strip() # das kann noch schöner
# Cut Features off for better comparison
titles[i] = string.capwords(re.sub("[(\[].*?[)\]]", "", titles[i]).strip())
if len(titles) == 0:
print("Could not find titles to album")
return titles
except:
print("Could not find titles to album")
def findAlbumCover(artist, album):
"""
Using genius.com to find the album cover to given Artist and album
"""
base = "https://genius.com/albums"
url = base + "/" + artist.replace(" ", "-") + "/" + album.replace(" ", "-")
raw = requests.get(url)
#imagePath = "C:/Users/Flavio/Music/Youtube/CoverTemp/"
imagePath = getcwdFormat() + "/" + "Cover_Images/"
if not os.path.exists("Cover_Images"):
os.mkdir("Cover_Images")
soup = BeautifulSoup(raw.text, "html.parser")
try:
imageURL = soup.findAll(
class_="cover_art-image")[0]['srcset'].split(" ")[0] # fucking bullshit
splittedLink = imageURL.split("/")
# Download images in 1000x1000 resolution
splittedLink[4] = "1000x1000"
imageURL = "/".join(splittedLink)
coverRaw = requests.get(imageURL, stream=True)
filename = artist + "_" + album + ".jpg"
with open(imagePath + filename, "wb") as outfile:
for block in coverRaw.iter_content(1024):
if not block:
break
outfile.write(block)
print("Cover found! Resolution is: " +
str(PIL.Image.open(imagePath + filename).size))
return imagePath + filename
except:
print("Error, cover not found")
return "Error"
def findSingleCover(artist, single):
"""
Using genius.com to find the song cover to given Artist and song
"""
base = "https://genius.com/"
url = base + artist.replace(" ", "-") + "-" + single.replace(",","").replace(" ", "-") + "-lyrics"
raw = requests.get(url)
# imagePath = "C:/Users/Flavio/Music/Youtube/CoverTemp/"
imagePath = getcwdFormat() + "/" + "Cover_Images/"
if not os.path.exists("Cover_Images"):
os.mkdir("Cover_Images")
soup = BeautifulSoup(raw.text, "html.parser")
try:
imageURL = soup.findAll(class_="cover_art-image")[0]["src"]
splittedLink = imageURL.split("/")
# Download images in 1000x1000 resolution
splittedLink[4] = "1000x1000"
imageURL = "/".join(splittedLink)
coverRaw = requests.get(imageURL, stream=True)
filename = artist + "_" + single + ".jpg"
with open(imagePath + filename, "wb") as outfile:
for block in coverRaw.iter_content(1024):
if not block:
break
outfile.write(block)
print("Cover found for track " + single)
return imagePath + filename
except:
print("Error, cover not found for track " + single)
return "Error"
# Download lsit of URLs
def downLoadTracks(trackList, folder=""):
ydl_opts = {
'format': 'bestaudio',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
for i in range(len(trackList)):
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
# result = ydl.extract_info("{}".format(trackList[i]))
# filename = ydl.prepare_filename(result)
ydl.download([trackList[i]])
except:
print("Could not download track!")
pass
for file in (os.listdir(getcwdFormat())):
if file.endswith(".mp3"):
try:
os.rename(getcwdFormat() + "/" + file, getcwdFormat() + "/" + folder + "/" + renameDownloadTrack(file))
except:
print("File already exists!")
pass
# Deletes the url id of youtube_dl and cuts off things in brackets like (Audio) because no one wants this
def renameDownloadTrack(trackName):
trackName = re.sub("[(\[].*?[)\]]", "", trackName[0:trackName.rindex("-")]).strip()
return re.sub(' +', ' ', trackName) + ".mp3"
# Make os path suitable for python chdir
def pathReplace(path):
path = path.replace("\\", "/")
return path
# Get cwd with right format
def getcwdFormat():
cwd = os.getcwd().replace("\\", "/")
return cwd
# Get new foldername
def getnewFolder(folder):
folderFound = False
i = 1
while not folderFound:
if os.path.exists(folder+str(i)):
i = i+1
else:
folderFound = True
return folder + "(" + str(i) + ")"
# Mainloop
print("Welcome to YouTunes!")
while True:
question = input("Download Tracks or Album? Or just json ")
# name a couple of tracks
if question in ["Tracks", "Track", "t", "T", "tr"]:
folderName = "Singles - " + str(datetime.date.today())
if os.path.exists(folderName):
folderName = getnewFolder(folderName)
os.mkdir(folderName)
else:
os.mkdir(folderName)
folderPath = getcwdFormat() + "/" + folderName
track_urls = []
question = input("Enter a song url or \"finish\": ")
while question not in ["f", "finished", "fi", "finish"]:
track_urls.append(question)
question = input("Enter a song url or \"finish\": ")
downLoadTracks(track_urls, folderName)
print("Make sure every Track is named like Artist - TrackName Features")
print("Example: Drake - Sneakin feat. 21 Savage")
print("If Track has correct name just press enter, otherwise enter correct name and then enter")
for mp3 in (os.listdir(folderPath)):
if mp3.endswith(".mp3"):
print(mp3)
newname = input("Enter or new name: ")
if newname == "":
pass
else:
os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + newname + ".mp3")
print("Saved new name!")
print("Every file in folder " + folderName + " has been named.")
print("Next up: Setting the stats for iTunes") # todo Manage different genres here
nameTracks(folderPath)
print("You can quit now or download more tracks or albums: ")
# name an album
elif question in ["Album", "a", "A", "al"]:
# os.chdir("C:\\Users\\Flavio\\Music\\Youtube")
albumArtist = input("Which Artist? ")
albumName = input("Which Album? ")
folderName = albumArtist + " - " + albumName
if os.path.exists(folderName):
folderName = getnewFolder(folderName)
os.mkdir(folderName)
else:
os.mkdir(folderName)
folderPath = getcwdFormat() + "/" + folderName
track_urls = []
question = input("Enter a song url or \"finish\": ")
while question not in ["f", "finished", "fi", "finish", "fin"]:
track_urls.append(question)
question = input("Enter a album song url or \"finish\": ")
downLoadTracks(track_urls, folderName)
print("Make sure every Track is named like: TrackName feat. Features")
print("Example: Sneakin feat. 21 Savage (as an Drake album)")
print("If Track has correct name just press enter, otherwise enter correct name and then enter")
for mp3 in (os.listdir(folderPath)):
if mp3.endswith(".mp3"):
#print(mp3.split("-")[1])
print(mp3)
newname = input("Enter or new name: ")
if newname != "":
#os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + mp3.split("-")[1] + ".mp3")
# os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + ".mp3")
# else:
os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + newname + ".mp3")
print("Saved new name!")
specialGenre = input("Name a genre (default: [Hip-Hop/Rap]): ")
print("Now doing the iTunes stats")
if specialGenre != "":
nameAlbum(folderPath, albumArtist, albumName, specialGenre)
else:
nameAlbum(folderPath, albumArtist, albumName)
print("You can quit now or download more tracks or albums: ")
elif question in ["json","j","js"]:
format = input("Is your json an album or just tracks? ")
if format in ["a","album","al"]:
albumArtist = input("Which artist? ")
albumName = input("Which album name? ")
format = "album"
elif format in ["tr","track","tracks","single","singles"]:
format = "tracks"
pass
else:
break
trackUrls = []
tracksList = []
filename = input("Enter the name of the json file without \".json\" (has to be in same directory as this program): ")
# todo: Take the titles out of the json to name the files before asking the user
try:
with open(filename+".json", "r") as jsonfile:
jfile = json.load(jsonfile)
except:
print("Error occured, could not open file")
break
folderName = input("What should be new foldername? ")
if os.path.exists(folderName):
folderName = getnewFolder(folderName)
os.mkdir(folderName)
else:
os.mkdir(folderName)
folderPath = getcwdFormat() + "/" + folderName
for i in range(len(jfile)):
trackUrls.append(jfile[i]["url"])
tracksList.append(jfile[i]["title"])
downLoadTracks(trackUrls, folderName)
if format == "tracks":
print("\n\n\n")
print("Make sure every track ist named like: Artist - Trackname feat. Feature")
print("Example: Drake - Sneakin feat. 21 Savage")
print("If Track has correct name just press enter, otherwise enter correct name and then enter")
else:
print("\n\n\n")
print("Make sure every track ist named like: Trackname feat. Feature")
print("Example: Sneakin feat. 21 Savage (as an Album of Drake)")
print("If Track has correct name just press enter, otherwise enter correct name and then enter")
for mp3 in (os.listdir(folderPath)):
if mp3.endswith(".mp3"):
if mp3.find("-") != -1: # cut of the artist (if existing) because we dont need it for album
newname = mp3[mp3.find("-")+1:].strip()
while newname.find("-") != -1: # Maybe the filname has multiple minusses, so just cut everything off
newname = renameDownloadTrack(newname)
print(newname)
else:
newname = mp3
print(mp3)
namechange = input("Enter or new name: ")
if namechange != "":
newname = namechange
os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + newname + ".mp3")
print("Saved new name!")
print("Now doing the iTunes stats")
if format == "tracks":
nameTracks(folderPath)
elif format == "album":
specialGenre = input("Name a genre (default: [Hip-Hop/Rap]): ")
if specialGenre != "":
nameAlbum(folderPath, albumArtist, albumName, specialGenre)
else:
nameAlbum(folderPath, albumArtist, albumName)
else:
pass
print("You can quit now or download more tracks or albums: ")
# exit
elif question in ["exit","e"]:
exit()
else:
print("Please enter valid answer or \"exit\" to exit! ")
| StarcoderdataPython |
3339398 | """
Linear Regression example.
Use 1 layer linear regression model to calculate add operation.
ex) feature = [3, 5] then output should be 8
If cost graph doesn't converge, then change learning rate more smaller
"""
from Linear_Regression.model import *
from matplotlib.pyplot import *
feature = [[1, 3, 5], [1, 5, 4], [1, 7, 9], [1, 2, 5]] # 4 training examples with 3 features(bias, operand1, operand2)
output = [8, 9, 16, 7] # 4 training examples output
linear = linear_regression(feature, output, 100, 0.00015) # Create linear regression model with 100 iteration and learning rate as 0.00015
costs = linear.training() # Trainig linear regression model
print linear.forward_propagation([1, 5, 18]) # We can see linear regression model successfully calculate 5 + 18
plot(range(len(costs)), costs) # Draw cost graph per # of iterations
show() | StarcoderdataPython |
11383375 | <reponame>Asurada2015/TFAPI_translation
import tensorflow as tf
"""tf.einsum(equation, *inputs)
功能:通过equation进行矩阵乘法。
输入:equation:乘法算法定义。
# 矩阵乘
>>> einsum('ij,jk->ik', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k]
# 点乘
>>> einsum('i,i->', u, v) # output = sum_i u[i]*v[i]
# 向量乘
>>> einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j]
# 转置
>>> einsum('ij->ji', m) # output[j,i] = m[i,j]
# 批量矩阵乘
>>> einsum('aij,ajk->aik', s, t) # out[a,i,k] = sum_j s[a,i,j] * t[a, j, k]"""
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 6], [7, 8]])
z = tf.einsum('ij,jk->ik', a, b) # 矩阵乘
z1 = tf.einsum('ij,ij->ij', a, b) # 向量乘,即对应位置相乘,此时两个矩阵的形式会完全一致
z2 = tf.einsum('ij,ij->', a, b) # 矩阵的点积,又名为内积,返回的是对应位置相乘后相加的标量值
sess = tf.Session()
print(sess.run(z))
print(sess.run(z1))
print(sess.run(z2))
sess.close()
# [[19 22]
# [43 50]]
# [[ 5 12]
# [21 32]]
# 70
| StarcoderdataPython |
6515197 | from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from accounts import views as account_views
urlpatterns = [
url(_(r'^register/$'),
account_views.UserRegisterView.as_view(), name='register'),
url(_(r'^login/$'),
account_views.UserLoginView.as_view(), name='login'),
url(_(r'^confirm/(?P<activation_key>[0-9a-z-]+)'),
account_views.UserConfirmEmailView.as_view(),
name='confirm_email'),
]
| StarcoderdataPython |
4976446 | <reponame>IsaPeter/PythonProjects
#!/usr/bin/env python3
import os, sys
runpath = os.path.dirname(os.path.realpath(__file__))
approot = os.path.abspath(os.path.join(runpath, os.pardir))
sys.path.append(os.path.join(runpath,'..'))
sys.path.append(approot)
import lib.address_pool as ap
from lib.tabCompleter import tabCompleter
import readline
# Shared Variables
connected_clients = []
loaded_modules = []
addressPool = ap.addressPool() # create an adress pool
tab_complete = tabCompleter()
current_session = None
# Settings
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
| StarcoderdataPython |
3447721 | def isperfect(lst, l):
perfect = True
for i in range(l//2):
if lst[i]!=lst[-(i+1)]:
perfect = False
break
if perfect == True:
print("PERFECT")
else:
print("NOT PERFECT")
T = int(input())
for i in range(T):
n = int(input())
arr = list(map(int, input().split()))
isperfect(arr, n)
| StarcoderdataPython |
9646255 | import logging
from typing import Callable
import requests
from attr import dataclass
from returns.functions import tap
from returns.pipeline import flow
from returns.pointfree import alt, bind, rescue
from returns.result import ResultE, safe
from typing_extensions import final
@final
@dataclass(frozen=True, slots=True)
class GetOrganization(object):
"""Produce a list of Organizations."""
_brreg_base_url: str
_get: Callable[[str], ResultE[requests.Response]]
_entities = 'enheter/'
_sub_entities = 'underenheter/'
def __call__(
self,
organization_number: str,
) -> ResultE[dict]:
"""Call api get list and write to file."""
return flow(
# URL param
self._brreg_base_url + self._entities + organization_number,
# Request data (io)
self._get,
# on failure retry with _sub_entities
rescue( # type: ignore
lambda _: self._get(
self._brreg_base_url
+ self._sub_entities # noqa: W503
+ organization_number, # noqa: W503
),
),
# return json data
bind(self._get_json),
)
@safe
def _get_json(self, response: requests.Response) -> dict:
return response.json()
@final
@dataclass(frozen=True, slots=True)
class GetOrganizations(object):
"""Get a list of organizations with the given criteria."""
_brreg_base_url: str
_get: Callable
_entities = 'enheter/'
_sub_entities = 'underenheter/'
_log = logging.getLogger('api.brreg.GetOrganizations')
def __call__(
self,
search_criteria: dict,
timeout: int = 20,
) -> ResultE[list]:
"""Call api with the search criteria and and return list."""
return flow(
# Get data from brreg
self._get(
self._brreg_base_url + self._entities,
url_params=search_criteria,
timeout=timeout,
),
# log on error
alt(tap(self._log.warning)),
# return json data
bind(self._get_json),
)
@safe
def _get_json(self, response: requests.Response) -> list:
return response.json()
@final
@dataclass(frozen=True, slots=True)
class GetAllOrganizations(object):
"""Get an accumulated list of organizations with the given criteria."""
_get_organizations: Callable
_log = logging.getLogger('api.brreg.GetAllOrganizations')
_brreg_max_enterprises: int = 10000
@safe
def __call__(
self,
search_criteria: dict,
timeout: int = 20,
) -> list:
"""Call api with the search criteria and and return list."""
business_types = search_criteria['naeringskode'].split(',')
kommune_numbers = search_criteria['kommunenummer'].split(',')
search_criteria['naeringskode'] = business_types[0]
all_orgs: list = []
for kommune_number in kommune_numbers:
for business_type in business_types:
search_criteria['page'] = 0
search_criteria['naeringskode'] = business_type
search_criteria['kommunenummer'] = kommune_number
self._log.info(
'Current search criteria %s', search_criteria,
)
self._paginator(search_criteria).map(
all_orgs.extend,
).alt(
tap(print),
)
return all_orgs
@safe
def _paginator(self, search_criteria) -> list:
aggregated_data = self._get_organizations(search_criteria).unwrap()
page = aggregated_data.get('page')
total_elements = page.get('totalElements')
self._log.info(
'%s elements in %s pages',
page.get('totalElements'),
page.get('totalPages'),
)
if total_elements < 1:
return []
if total_elements > self._brreg_max_enterprises:
error = 'Number of results exceedes 10 000'
raise RuntimeError(error)
for page_number in range(1, page.get('totalPages')):
search_criteria['page'] = page_number
self._call_api(search_criteria).map(
aggregated_data['_embedded']['enheter'].extend,
).alt(
tap(self._log.warning),
)
return aggregated_data['_embedded']['enheter']
def _call_api(self, search_criteria) -> ResultE[list]:
return flow(
# search payload
search_criteria,
# get organizations (IO)
self._get_organizations,
# return list of organizations in `enheter`
bind(safe(lambda resp: resp.get('_embedded').get('enheter'))),
# log warning on failure
alt(tap(self._log.warning)),
)
| StarcoderdataPython |
385079 | <gh_stars>0
"""
The dictionaries are blocks and they the present by '{}' , inside every block is
represented by two elements, one key and one value, separated by ':' and with ','
separated every block
Example: name = {key1:value1, key2:value2,.......}
And inside the values you can have tuple, list, numbers, strings, all types of data
"""
Users = {"name": "Carlos", "surname": "Perez", "years": 20}
"""
values() is a method that return all values that has the dictionaries
keys() is a method that return all keys that has the dictionaries
"""
print(Users.values())
print(Users.keys())
"""
If you need a values specific, you can used the method get() and inside specific the key
"""
print(Users.get("name"))
"""
clear() is a method for empty the dictionaries
copy() is a method for copied the block of a dictionaries in other dictionaries
"""
UsersCopy = Users.copy()
print(UsersCopy)
Users.clear()
print(Users)
"""
update() is a method for update the dictionaries, you can add a new block placing
other key or change the value of a block, placing the same key
"""
UserNewData = {"name": "Andres"}
UsersCopy.update(UserNewData)
print(UsersCopy)
| StarcoderdataPython |
1823138 | """
Copyright (c) 2021 BEAM CONNECTIVITY LIMITED
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
Download dashboards from a Grafana web instance.
"""
import json
import logging
from pathlib import Path
from typing import Dict
import rich
import typer
from rich.tree import Tree
from .api import grafana
from .dashboard import update_dashlist_folder_ids
from .tree import walk_directory
app = typer.Typer()
logger = logging.getLogger()
@app.command()
def all(
destination_dir: Path = typer.Option(
...,
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=True,
)
): # pylint: disable=redefined-builtin
"""
Download folder-structured dashboards and write to json files at the destination_root path
"""
logger.info(f"Pulling all dashboards into {destination_dir}...")
# Get all dashboards within each folder and write to files
folders = grafana.api.get("folders")
logger.info(f"Folders found: {[x['title'] for x in folders]}")
for folder in folders:
logger.info(f"Getting dashboards from the {folder['title']} folder..")
# Find all dashboards in the folder
_write_dashboards_to_local_folder_from_grafana_folder(folder, destination_dir)
# Special case for the General folder
_write_dashboards_to_local_folder_from_grafana_folder({"title": "General"}, destination_dir)
tree = Tree(
f":open_file_folder: [link file://{destination_dir}]{destination_dir}",
guide_style="bold bright_blue",
)
rich.print(walk_directory(destination_dir, tree))
logger.info("✅")
# =============
def _write_dashboards_to_local_folder_from_grafana_folder(folder: Dict, destination_dir: Path) -> None:
"""
Finds dashboards within a given folder and writes them to the destination_dir/folder name
"""
# The General folder (id=0) is special and is not part of the Folder API so query for it separately
if folder["title"] == "General":
folder_id = 0
else:
folder_id = folder["id"]
for dashboard in grafana.api.get(f"search?folderIds={folder_id}&type=dash-db"):
logger.info(f"Found {dashboard['title']} dashboard in folder {folder['title']}")
try:
dashboard_definition = grafana.api.get(f"dashboards/uid/{dashboard['uid']}")
# Update references in dashboard pickers to folder ids, as they are auto generated
dashboard_definition = update_dashlist_folder_ids(dashboard_definition)
# Write it to file
dashboard_file: Path = (
destination_dir / folder["title"] / f"{dashboard['title'].lower().replace(' ', '_')}.json"
)
dashboard_file.parent.mkdir(parents=True, exist_ok=True)
dashboard_file.write_text(json.dumps(dashboard_definition["dashboard"], indent=2))
logger.info(f"Successfully saved {dashboard['title']} dashboard to {dashboard_file}")
except Exception:
logger.exception(f"❌ An exception occurred with {dashboard['title']}")
| StarcoderdataPython |
4909371 | <filename>tests/unit/test_webhook.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Tests for our webhooks: HTTP handling and helper methods."""
import json
import os
import unittest
from unittest.mock import patch
import flask
import pytest
from requests.exceptions import HTTPError
from requests.models import Response
import webcompat
from webcompat.db import Site
from webcompat.helpers import to_bytes
from webcompat.webhooks import helpers
from webcompat.webhooks.model import WebHookIssue
# The key is being used for testing and computing the signature.
# The key needs to be a bytes object
key = to_bytes(webcompat.app.config['HOOK_SECRET_KEY'])
# Some machinery for opening our test files
def event_data(filename):
"""Return a tuple with the content and its signature."""
current_root = os.path.realpath(os.curdir)
events_path = 'tests/fixtures/webhooks'
path = os.path.join(current_root, events_path, filename)
with open(path, 'r') as f:
json_event = json.dumps(json.load(f))
signature = 'sha1={sig}'.format(
sig=helpers.get_payload_signature(key, json_event))
return json_event, signature
class TestWebhook(unittest.TestCase):
"""Tests for our WebHook code."""
def setUp(self):
"""Set up tests."""
# sets a more detailed message when testing.
self.longMessage = True
self.maxDiff = None
webcompat.app.config['TESTING'] = True
self.app = webcompat.app.test_client()
self.headers = {'content-type': 'application/json'}
self.test_url = '/webhooks/labeler'
self.issue_body = """
<!-- @browser: Firefox 55.0 -->
<!-- @ua_header: Mozilla/5.0 (what) Gecko/20100101 Firefox/55.0 -->
<!-- @reported_with: web -->
<!-- @extra_labels: type-media, type-stylo -->
<!-- @public_url: https://foo.example.org/issues/1 -->
**URL**: https://www.example.com/
**Browser / Version**: Firefox 55.0
<!-- @browser: Chrome 48.0 -->
"""
self.issue_body2 = """
<!-- @browser: Foobar -->
<!-- @extra_labels: type-foobar -->
"""
self.issue_body3 = """
**URL**: https://www.example.com/
<!-- @browser: Firefox Mobile (Tablet) 40.0 -->
"""
self.issue_body4 = """
**URL**: https://www.example.com/
<!-- @browser: Firefox Mobile (Tablet) 40.0 -->
"""
self.issue_body5 = """
<!-- @browser: Android 8.1.0 -->
<!-- @ua_header: Mozilla/5.0 (Android 8.1.0; Mobile VR; rv:65.0) Gecko/65.0 Firefox/65.0 -->
<!-- @reported_with: browser-fxr -->
<!-- @extra_labels: browser-firefox-reality, type-media -->
**URL**: https://vrporn.com/closing-shift-shaft/
**Browser / Version**: Android 8.1.0
**Operating System**: Android 8.1.0
**Tested Another Browser**: Yes
""" # noqa
self.issue_body6 = """
**URL**: https://not-gecko.example.com/
<!-- @browser: Safari 99.0 -->
"""
self.issue_body7 = """
**URL**: https://not-gecko.example.com/
<!-- @public_url: http://test.example.org/issues/1 -->
"""
self.issue_info1 = {
'action': 'foobar',
'state': 'open',
'body': '<!-- @browser: Firefox 55.0 -->\n'
'<!-- @ua_header: Mozilla/5.0 (X11; Linux x86_64; rv:55.0) '
'Gecko/20100101 Firefox/55.0 -->\n'
'<!-- @reported_with: web -->\n'
'\n'
'**URL**: https://www.netflix.com/',
'domain': 'www.netflix.com',
'number': 600,
'original_labels': [],
'public_url': '',
'repository_url':
'https://api.github.com/repos/webcompat/webcompat-tests',
'title': 'www.netflix.com - test invalid event'}
self.issue_info2 = {
'action': 'milestoned',
'state': 'open',
'milestoned_with': 'accepted',
'milestone': 'accepted',
'body': '<!-- @browser: Firefox 55.0 -->\n'
'<!-- @ua_header: Mozilla/5.0 (X11; Linux x86_64; rv:55.0) '
'Gecko/20100101 Firefox/55.0 -->\n'
'<!-- @reported_with: web -->\n'
'<!-- @public_url: '
'https://github.com/webcompat/webcompat-tests/issues/1 -->\n'
'\n'
'**URL**: https://www.netflix.com/',
'domain': 'www.netflix.com',
'number': 600,
'original_labels': ['action-needsmoderation'],
'public_url':
'https://github.com/webcompat/webcompat-tests/issues/1',
'repository_url':
'https://api.github.com/repos/webcompat/webcompat-tests-private', # noqa
'title': 'www.netflix.com - test private issue accepted'}
self.issue_info3 = {
'action': 'closed',
'state': 'closed',
'body': '<!-- @browser: Firefox 55.0 -->\n'
'<!-- @ua_header: Mozilla/5.0 (X11; Linux x86_64; rv:55.0) '
'Gecko/20100101 Firefox/55.0 -->\n'
'<!-- @reported_with: web -->\n'
'<!-- @public_url: '
'https://github.com/webcompat/webcompat-tests/issues/1 -->\n'
'\n'
'**URL**: https://www.netflix.com/',
'domain': 'www.netflix.com',
'number': 600,
'original_labels': ['action-needsmoderation'],
'public_url':
'https://github.com/webcompat/webcompat-tests/issues/1',
'repository_url':
'https://api.github.com/repos/webcompat/webcompat-tests-private', # noqa
'title': 'www.netflix.com - test private issue accepted'}
self.issue_info4 = {
'action': 'opened',
'state': 'open',
'milestoned_with': '',
'milestone': '',
'body': '<!-- @browser: Firefox 55.0 -->\n'
'<!-- @ua_header: Mozilla/5.0 (X11; Linux x86_64; rv:55.0) '
'Gecko/20100101 Firefox/55.0 -->\n'
'<!-- @reported_with: web -->\n'
'<!-- @public_url: '
'https://github.com/webcompat/webcompat-tests/issues/1 -->\n'
'\n'
'**URL**: https://www.netflix.com/',
'domain': 'www.netflix.com',
'number': 600,
'original_labels': ['action-needsmoderation'],
'public_url':
'https://github.com/webcompat/webcompat-tests/issues/1',
'repository_url':
'https://api.github.com/repos/webcompat/webcompat-tests-private', # noqa
'title': 'www.netflix.com - test valid event'}
def tearDown(self):
"""Tear down tests."""
pass
def test_forbidden_get(self):
"""GET is forbidden on labeler webhook."""
rv = self.app.get(self.test_url, headers=self.headers)
self.assertEqual(rv.status_code, 404)
def test_fail_on_missing_signature(self):
"""POST without signature on labeler webhook is forbidden."""
self.headers.update({'X-GitHub-Event': 'ping'})
rv = self.app.post(self.test_url, headers=self.headers)
self.assertEqual(rv.status_code, 401)
self.assertEqual(rv.data, b'Nothing to see here')
self.assertEqual(rv.mimetype, 'text/plain')
def test_fail_on_bogus_signature(self):
"""POST without bogus signature on labeler webhook is forbidden."""
json_event, signature = event_data('new_event_valid.json')
self.headers.update({'X-GitHub-Event': 'ping',
'X-Hub-Signature': 'Boo!'})
rv = self.app.post(self.test_url,
data=json_event,
headers=self.headers)
self.assertEqual(rv.status_code, 401)
self.assertEqual(rv.data, b'Nothing to see here')
self.assertEqual(rv.mimetype, 'text/plain')
def test_fail_on_invalid_event_type(self):
"""POST with event not being 'issues' or 'ping' fails."""
json_event, signature = event_data('new_event_valid.json')
self.headers.update({'X-GitHub-Event': 'failme',
'X-Hub-Signature': signature})
rv = self.app.post(self.test_url,
data=json_event,
headers=self.headers)
self.assertEqual(rv.status_code, 403)
self.assertEqual(rv.mimetype, 'text/plain')
self.assertEqual(rv.data, b'Not an interesting hook')
def test_success_on_ping_event(self):
"""POST with PING events just return a 200 and contains pong."""
json_event, signature = event_data('new_event_valid.json')
self.headers.update({'X-GitHub-Event': 'ping',
'X-Hub-Signature': signature})
rv = self.app.post(self.test_url,
data=json_event,
headers=self.headers)
self.assertEqual(rv.status_code, 200)
self.assertIn(b'pong', rv.data)
def test_fails_on_not_known_action(self):
"""POST with an unknown action fails."""
json_event, signature = event_data('new_event_invalid.json')
self.headers.update({'X-GitHub-Event': 'issues',
'X-Hub-Signature': signature})
rv = self.app.post(self.test_url,
data=json_event,
headers=self.headers)
self.assertEqual(rv.status_code, 403)
self.assertEqual(rv.mimetype, 'text/plain')
self.assertEqual(rv.data, b'Not an interesting hook')
def test_extract_metadata(self):
"""Extract dictionary of metadata for an issue body."""
expected = {'reported_with': 'web',
'extra_labels': 'type-media, type-stylo',
'ua_header': ('Mozilla/5.0 (what) Gecko/20100101 '
'Firefox/55.0'),
'browser': 'Firefox 55.0',
'public_url': 'https://foo.example.org/issues/1'}
actual = helpers.extract_metadata(self.issue_body)
self.assertEqual(expected, actual)
def test_extract_browser_label(self):
"""Extract browser label name."""
metadata_tests = [
({'browser': 'Firefox'}, 'browser-fixme'),
({'browser': 'Firefox Mobile'}, 'browser-fixme'),
({'browser': 'Firefox99.0'}, 'browser-fixme'),
({'browser': 'Firefox (tablet)'}, 'browser-fixme'),
({'browser': 'Firefox 30.0'}, 'browser-firefox'),
({'browser': 'Firefox Mobile 30.0'}, 'browser-firefox-mobile'),
({'browser': 'Firefox Mobile (Tablet) 88.0'}, 'browser-firefox-tablet'), # noqa
({'browser': 'Firefox Mobile Nightly 59.0a1 (2017-12-04)'}, 'browser-firefox-mobile'), # noqa
({'browser': 'Mozilla/5.0 (Android 8.0.0; Mobile; rv:58.0) Gecko/58.0 Firefox/58.0'}, 'browser-fixme'), # noqa
({'browser': 'Firefox Developer Edition 60.0b14 (64-bit)'}, 'browser-firefox'), # noqa
({'browser': 'Firefox Mobile Nightly 61.0 & Firefox PC Nightly'}, 'browser-firefox-mobile'), # noqa
({'browser': 'LOL Mobile 55.0'}, 'browser-fixme'),
({'browser': 'LOL Mobile 55.0',
'extra_labels': 'browser-focus-geckoview'}, 'browser-fixme'),
({'browser': 'Firefox 30.0',
'extra_labels': 'browser-focus-geckoview'}, 'browser-firefox'),
({}, 'browser-fixme'),
]
for metadata_dict, expected in metadata_tests:
actual = helpers.extract_browser_label(metadata_dict)
self.assertEqual(expected, actual)
def test_extract_extra_labels(self):
"""Extract 'extra' label."""
metadata_tests = [
({'extra_labels': 'type-media'}, ['type-media']),
({'extra_labels': 'browser-focus-geckoview'},
['browser-focus-geckoview']),
({'extra_labels': 'cool, dude'}, ['cool', 'dude']),
({'extra_labels': 'weather-☁'}, ['weather-☁']),
({'extra_labels': 'weather-É'}, ['weather-é']),
({'burgers': 'french fries'}, None),
]
for metadata_dict, expected in metadata_tests:
actual = helpers.extract_extra_labels(metadata_dict)
self.assertEqual(expected, actual)
def test_extract_priority_label(self):
"""Extract priority label."""
with patch('webcompat.db.site_db.query') as db_mock:
db_mock.return_value.filter_by.return_value = [
Site('google.com', 1, '', 1)]
priority_label = helpers.extract_priority_label(self.issue_body3)
self.assertEqual(priority_label, 'priority-critical')
priority_label_none = helpers.extract_priority_label(self.issue_body)
self.assertEqual(priority_label_none, None)
def test_get_issue_labels(self):
"""Extract list of labels from an issue body."""
labels_tests = [
(self.issue_body, ['browser-firefox', 'type-media', 'type-stylo',
'engine-gecko']),
(self.issue_body2, ['browser-fixme', 'type-foobar']),
(self.issue_body3, ['browser-firefox-tablet', 'engine-gecko']),
(self.issue_body5, ['browser-firefox-reality', 'engine-gecko',
'type-media']),
(self.issue_body6, ['browser-safari']),
]
for issue_body, expected in labels_tests:
actual = helpers.get_issue_labels(issue_body)
self.assertEqual(sorted(expected), sorted(actual))
def test_is_github_hook_missing_x_github_event(self):
"""Validation tests for GitHub Webhooks: Missing X-GitHub-Event."""
json_event, signature = event_data('new_event_invalid.json')
# Lack the X-GitHub-Event
with self.app as client:
headers = self.headers.copy()
headers.update({'X-Hub-Signature': signature})
client.post(self.test_url,
data=json_event,
headers=headers)
webhook_request = helpers.is_github_hook(flask.request)
self.assertFalse(webhook_request, 'X-GitHub-Event is missing')
def test_is_github_hook_missing_x_hub_signature(self):
"""Validation tests for GitHub Webhooks: Missing X-Hub-Signature."""
json_event, signature = event_data('new_event_invalid.json')
# Lack the X-Hub-Signature
with self.app as client:
headers = self.headers.copy()
headers.update({'X-GitHub-Event': 'issues'})
client.post(self.test_url,
data=json_event,
headers=headers)
webhook_request = helpers.is_github_hook(flask.request)
self.assertFalse(webhook_request, 'X-Hub-Signature is missing')
def test_is_github_hook_wrong_signature(self):
"""Validation tests for GitHub Webhooks: Wrong X-Hub-Signature."""
json_event, signature = event_data('new_event_invalid.json')
# X-Hub-Signature is wrong
with self.app as client:
headers = self.headers.copy()
headers.update({'X-GitHub-Event': 'issues',
'X-Hub-Signature': 'failme'})
client.post(self.test_url,
data=json_event,
headers=headers)
webhook_request = helpers.is_github_hook(flask.request)
self.assertFalse(webhook_request, 'X-Hub-Signature is wrong')
def test_is_github_hook_everything_ok(self):
"""Validation tests for GitHub Webhooks: Everything ok."""
json_event, signature = event_data('new_event_invalid.json')
# Everything is fine
with self.app as client:
headers = self.headers.copy()
headers.update({'X-GitHub-Event': 'issues',
'X-Hub-Signature': signature})
client.post(self.test_url,
data=json_event,
headers=headers)
webhook_request = helpers.is_github_hook(flask.request)
self.assertTrue(webhook_request,
'X-GitHub-Event and X-Hub-Signature are correct')
def test_signature_check(self):
"""Test the signature check function for WebHooks."""
payload = 'A body'
key = 'SECRET'
post_signature = 'sha1=abacb5cff87d9e0122683d0d1d18a150809ac700'
self.assertTrue(helpers.signature_check(key, post_signature, payload))
post_signature = 'abacb5cff87d9e0122683d0d1d18a150809ac700'
self.assertFalse(helpers.signature_check(key, post_signature, payload))
post_signature = 'sha1='
self.assertFalse(helpers.signature_check(key, post_signature, payload))
post_signature = 'sha1=wrong'
self.assertFalse(helpers.signature_check(key, post_signature, payload))
def test_repo_scope_public(self):
"""Test the public scope of the repository."""
url = 'https://api.github.com/repos/webcompat/webcompat-tests'
expected = 'public'
actual = helpers.repo_scope(url)
self.assertEqual(expected, actual)
def test_repo_scope_private(self):
"""Test the private scope of the repository."""
url = 'https://api.github.com/repos/webcompat/webcompat-tests-private'
expected = 'private'
actual = helpers.repo_scope(url)
self.assertEqual(expected, actual)
def test_repo_scope_unknown(self):
"""Test the unknown of the repository."""
url = 'https://api.github.com/repos/webcompat/webcompat-foobar'
expected = 'unknown'
actual = helpers.repo_scope(url)
self.assertEqual(expected, actual)
def test_prepare_rejected_issue(self):
"""Test we prepare the right payload for the rejected issue."""
expected = {'body': "<p>The content of this issue doesn't meet our\n"
'<a href="https://webcompat.com/terms#acceptable-use">acceptable use</a>\n' # noqa
'guidelines. Its original content has been deleted.</p>',
'labels': ['status-notacceptable'],
'milestone': 8,
'state': 'closed',
'title': 'Issue rejected.'}
actual = helpers.prepare_rejected_issue()
self.assertEqual(type(actual), dict)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
315954 | # pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
from unittest import TestCase
from src.game import Board
from src.exceptions import ValidationError
class BoardTest(TestCase):
def test_start_cells(self):
board = Board(2, 2, [True, True, False, False])
self.assertEqual(board.cells, 4)
def test_rows_is_immutable(self):
board = Board(1, 1, [True])
with self.assertRaises(AttributeError):
board.rows = 2
def test_cols_is_immutable(self):
board = Board(1, 1, [True])
with self.assertRaises(AttributeError):
board.cols = 2
def test_generation_puts_mines(self):
board = Board(1, 1, [True])
self.assertTrue(board.at(0,0).has_mine())
def test_generation_puts_nothing(self):
board = Board(1, 1, [False])
self.assertFalse(board.at(0,0).has_mine())
def test_generation_raises_when_less_values(self):
with self.assertRaises(ValidationError):
Board(2, 2, [True, True, False])
def test_generation_raises_when_more_values(self):
with self.assertRaises(ValidationError):
Board(2, 1, [True, True, False])
def test_generation_raises_when_invalid_input(self):
with self.assertRaises(ValidationError):
Board(2, 2, [True, False, True, 'False'])
def test_around_not_cornered_cell(self):
board = around_board()
compare = filter_cells(board, 0, 1, 2, 3, 5, 6, 7, 8)
self.assertEqual(compare, board.around(1, 1))
def test_around_first_row(self):
board = around_board()
compare = filter_cells(board, 0, 2, 3, 4, 5)
self.assertEqual(compare, board.around(0, 1))
def test_around_last_row(self):
board = around_board()
compare = filter_cells(board, 3, 4, 5, 6, 8)
self.assertEqual(compare, board.around(2, 1))
def test_around_first_column(self):
board = around_board()
compare = filter_cells(board, 0, 1, 4, 6, 7)
self.assertTrue(compare, board.around(1, 0))
def test_around_last_column(self):
board = around_board()
compare = filter_cells(board, 1, 2, 4, 7, 8)
self.assertEqual(compare, board.around(1, 2))
def test_arount_first_row_first_column(self):
board = around_board()
compare = filter_cells(board, 1, 3, 4)
self.assertEqual(compare, board.around(0, 0))
def test_around_first_row_last_column(self):
board = around_board()
compare = filter_cells(board, 1, 4, 5)
self.assertEqual(compare, board.around(0, 2))
def test_around_last_row_first_column(self):
board = around_board()
compare = filter_cells(board, 3, 4, 7)
self.assertEqual(compare, board.around(2, 0))
def test_around_last_row_last_column(self):
board = around_board()
compare = filter_cells(board, 4, 5, 7)
self.assertEqual(compare, board.around(2, 2))
def filter_cells(board, *indexes):
ret = []
for index in indexes:
ret.append(board.at_index(index))
return ret
def around_board():
placement = [True, False, True, False, True, False, True, False, True]
return Board(3, 3, placement)
| StarcoderdataPython |
9735627 | <reponame>supercatex/ML_Lesson
#
# Copyright (c) Microsoft Corporation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for details.
#
def training(X, y, model, epochs=10, batch_size=128):
history = model.fit(
x=X,
y=y,
validation_split=0.2, # Separate data to training set and validation set.
epochs=epochs, # Repeat times.
batch_size=batch_size, # Each epoch data input size.
verbose=1 # 0 = no log. 1 = log. 2 = log when epoch ending.
)
return model, history
| StarcoderdataPython |
6400398 | <filename>plugins/module_utils/zpa_application_server.py
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.willguibr.zpacloud.plugins.module_utils.zpa_client import (
ZPAClientHelper,
delete_none,
)
class ApplicationServerService:
def __init__(self, module, customer_id):
self.module = module
self.customer_id = customer_id
self.rest = ZPAClientHelper(module)
def getByIDOrName(self, id, name):
application_server = None
if id is not None:
application_server = self.getByID(id)
if application_server is None and name is not None:
application_server = self.getByName(name)
return application_server
def getByID(self, id):
response = self.rest.get(
"/mgmtconfig/v1/admin/customers/%s/server/%s" % (self.customer_id, id)
)
status_code = response.status_code
if status_code != 200:
return None
return self.mapRespJSONToApp(response.json)
def getAll(self):
list = self.rest.get_paginated_data(
base_url="/mgmtconfig/v1/admin/customers/%s/server" % (self.customer_id),
data_key_name="list",
)
application_servers = []
for application_server in list:
application_servers.append(self.mapRespJSONToApp(application_server))
return application_servers
def getByName(self, name):
application_servers = self.getAll()
for application_server in application_servers:
if application_server.get("name") == name:
return application_server
return None
@delete_none
def mapRespJSONToApp(self, resp_json):
if resp_json is None:
return {}
return {
"id": resp_json.get("id"),
"address": resp_json.get("address"),
"config_space": resp_json.get("configSpace"),
"name": resp_json.get("name"),
"description": resp_json.get("description"),
"enabled": resp_json.get("enabled"),
"app_server_group_ids": resp_json.get("appServerGroupIds"),
}
@delete_none
def mapAppToJSON(self, application_server):
if application_server is None:
return {}
return {
"id": application_server.get("id"),
"address": application_server.get("address"),
"configSpace": application_server.get("config_space"),
"name": application_server.get("name"),
"description": application_server.get("description"),
"enabled": application_server.get("enabled"),
"appServerGroupIds": application_server.get("app_server_group_ids"),
}
def unlinkAttachedServerGroups(self, appID):
server = self.getByID(appID)
if server is None:
return None
if len(server.get("app_server_group_ids", [])) > 0:
self.module.log(
"[INFO] Removing server group ID/s from application server: %s"
% (appID)
)
server["app_server_group_ids"] = []
return self.update(server)
return server
def create(self, application_server):
"""Create new Application Serve"""
ApplicationServerJson = self.mapAppToJSON(application_server)
response = self.rest.post(
"/mgmtconfig/v1/admin/customers/%s/server" % (self.customer_id),
data=ApplicationServerJson,
)
status_code = response.status_code
if status_code > 299:
return None
return self.mapRespJSONToApp(response.json)
def update(self, application_server):
"""update the Application Serve"""
ApplicationServerJson = self.mapAppToJSON(application_server)
response = self.rest.put(
"/mgmtconfig/v1/admin/customers/%s/server/%s"
% (self.customer_id, ApplicationServerJson.get("id")),
data=ApplicationServerJson,
)
status_code = response.status_code
if status_code > 299:
return None
return application_server
def delete(self, id):
"""delete the Application Serve"""
self.unlinkAttachedServerGroups(id)
response = self.rest.delete(
"/mgmtconfig/v1/admin/customers/%s/server/%s" % (self.customer_id, id)
)
return response.status_code
| StarcoderdataPython |
11318099 | import base64
import os
import pickle
import subprocess
import sys
from gym3.util import call_func
def run_test_with_mpi(fn_path, kwargs=None, nproc=2, timeout=30):
if kwargs is None:
kwargs = {}
serialized_fn = base64.b64encode(pickle.dumps((fn_path, kwargs)))
subprocess.check_call(
[
"mpiexec",
"-n",
str(nproc),
sys.executable,
"-m",
"gym3.internal.test_with_mpi",
serialized_fn,
],
env=os.environ,
timeout=timeout,
)
if __name__ == "__main__":
fn_path, kwargs = pickle.loads(base64.b64decode(sys.argv[1]))
call_func(fn_path, **kwargs)
| StarcoderdataPython |
5114077 | from webdnn.backend.webgl.kernels import abs
from webdnn.backend.webgl.kernels import average_pooling_2d
from webdnn.backend.webgl.kernels import broadcast
from webdnn.backend.webgl.kernels import clipped_relu
from webdnn.backend.webgl.kernels import col2im
from webdnn.backend.webgl.kernels import concat
from webdnn.backend.webgl.kernels import convert_r_to_rgba
from webdnn.backend.webgl.kernels import convert_rgba_to_r
from webdnn.backend.webgl.kernels import depth2space
from webdnn.backend.webgl.kernels import elementwise
from webdnn.backend.webgl.kernels import elementwise_add
from webdnn.backend.webgl.kernels import elementwise_div
from webdnn.backend.webgl.kernels import elementwise_mul
from webdnn.backend.webgl.kernels import elementwise_pow
from webdnn.backend.webgl.kernels import elu
from webdnn.backend.webgl.kernels import exp
from webdnn.backend.webgl.kernels import hard_sigmoid
from webdnn.backend.webgl.kernels import im2col
from webdnn.backend.webgl.kernels import leaky_relu
from webdnn.backend.webgl.kernels import max
from webdnn.backend.webgl.kernels import max_pooling_2d
from webdnn.backend.webgl.kernels import min
from webdnn.backend.webgl.kernels import partial_im2col
from webdnn.backend.webgl.kernels import reduce
from webdnn.backend.webgl.kernels import reinterpret_axis
from webdnn.backend.webgl.kernels import relu
from webdnn.backend.webgl.kernels import reshape
from webdnn.backend.webgl.kernels import rsqrt
from webdnn.backend.webgl.kernels import scalar_add
from webdnn.backend.webgl.kernels import scalar_affine
from webdnn.backend.webgl.kernels import scalar_mul
from webdnn.backend.webgl.kernels import scalar_pow
from webdnn.backend.webgl.kernels import sgemm
from webdnn.backend.webgl.kernels import sigmoid
from webdnn.backend.webgl.kernels import softplus
from webdnn.backend.webgl.kernels import softsign
from webdnn.backend.webgl.kernels import space2depth
from webdnn.backend.webgl.kernels import split_axis
from webdnn.backend.webgl.kernels import sum
from webdnn.backend.webgl.kernels import tanh
from webdnn.backend.webgl.kernels import threshold_relu
from webdnn.backend.webgl.kernels import transpose
from webdnn.backend.webgl.kernels import util
| StarcoderdataPython |
3360730 | <gh_stars>0
import urllib.request
for num in range(0, 500):
url = 'http://localhost:11003/mana/getport'
req = urllib.request.Request(url)
data = urllib.request.urlopen(req).read()
print(data, '-->' , num) | StarcoderdataPython |
3322921 | <filename>No_0925_Long Pressed Name/by_two-pointers_and_iteration.py
'''
Description:
Your friend is typing his name into a keyboard. Sometimes, when typing a character c, the key might get long pressed, and the character will be typed 1 or more times.
You examine the typed characters of the keyboard. Return True if it is possible that it was your friends name, with some characters (possibly none) being long pressed.
Example 1:
Input: name = "alex", typed = "aaleex"
Output: true
Explanation: 'a' and 'e' in 'alex' were long pressed.
Example 2:
Input: name = "saeed", typed = "ssaaedd"
Output: false
Explanation: 'e' must have been pressed twice, but it wasn't in the typed output.
Example 3:
Input: name = "leelee", typed = "lleeelee"
Output: true
Example 4:
Input: name = "laiden", typed = "laiden"
Output: true
Explanation: It's not necessary to long press any character.
Constraints:
1 <= name.length <= 1000
1 <= typed.length <= 1000
The characters of name and typed are lowercase letters.
'''
class Solution:
def isLongPressedName(self, name: str, typed: str) -> bool:
idx_src= 0
size_src, size_type = len(name), len(typed)
for idx_type, char_type in enumerate(typed):
if idx_src < size_src and name[idx_src] == char_type:
# current type char is matched with friend's name char
idx_src += 1
elif idx_type == 0 or typed[idx_type] != typed[idx_type-1]:
# If first character mismatch, or it is not long-pressed repeated characters
# Reject
return False
# Accept if all character is matched with friend name
return idx_src == size_src
# t : the character lenth of input string, typed.
## Time Complexity: O( t )
#
# The overhead in time is the cost of linear scan on typed, which is of O( t ).
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for two-pointers, which is of O( 1 ).
from collections import namedtuple
TestEntry = namedtuple('TestEntry','friend_name typed_name')
def test_bench():
test_data = [
TestEntry( friend_name = "alex", typed_name = "aaleex" ),
TestEntry( friend_name = "saeed", typed_name = "ssaaedd" ),
TestEntry( friend_name = "leelee", typed_name = "lleeelee" ),
TestEntry( friend_name = "laiden", typed_name = "laiden" ),
]
# expected output:
'''
True
False
True
True
'''
for t in test_data:
print( Solution().isLongPressedName( name = t.friend_name, typed = t.typed_name ) )
return
if __name__ == '__main__':
test_bench() | StarcoderdataPython |
1940259 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'emailApp', '0001_initial'), (b'emailApp', '0002_email_textcleaned'), (b'emailApp', '0003_email_removedcontent'), (b'emailApp', '0004_auto_20150329_0757'), (b'emailApp', '0005_auto_20150329_1216'), (b'emailApp', '0006_auto_20150329_1251'), (b'emailApp', '0007_auto_20150329_1252'), (b'emailApp', '0008_auto_20150403_1346'), (b'emailApp', '0009_dashboard')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('messageId', models.SlugField(unique=True, max_length=100)),
('sender', models.EmailField(max_length=254)),
('timeStamp', models.DateTimeField()),
('subject', models.CharField(max_length=998, null=True)),
('textPlain', models.TextField(null=True, blank=True)),
('textHtml', models.TextField(null=True, blank=True)),
('removedContentHtml', models.TextField(null=True, blank=True)),
('removedContentPlain', models.TextField(null=True, blank=True)),
('textCleanHtml', models.TextField(null=True, blank=True)),
('textCleanPlain', models.TextField(null=True, blank=True)),
('category', models.CharField(default=b'', max_length=15, choices=[(b'NULL', b'Not categorized'), (b'promotional', b'Promotional'), (b'spam', b'Spam'), (b'human', b'Human'), (b'notification', b'Notification'), (b'others', b'Others')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.TextField(null=True, blank=True)),
('timeStamp', models.DateTimeField()),
('validTill', models.DateTimeField()),
('source', models.OneToOneField(to='emailApp.Email')),
],
options={
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
1838880 | <reponame>alvarlagerlof/ball-pid
import numpy as np
import cv2
import imutils
import copy
class Process:
def __init__(self):
print("[init] Post process")
def run(self, frame):
#frame = self.resize(frame, 600)
#frame = self.cropSquare(frame)
return frame
def resize(self, frame, size):
return imutils.resize(frame, width=size)
def cropSquare(self, frame):
size = frame.shape[:2]
r = [(size[1]-size[0])/2, 0, size[0], size[0]]
return frame[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
def bw(self, frame):
l = 0
u = 100
lower_black = np.array([l,l,l], dtype = "uint16")
upper_black = np.array([u,u,u], dtype = "uint16")
return cv2.inRange(frame, lower_black, upper_black)
def greyScale(self, frame):
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
| StarcoderdataPython |
1952078 | import string
print(string.punctuation)
txt = """
Deserunt, minim! fugiat^$ adipisi*&*cing mollit et proident. Id qui magna ad proident proident elit esse elit amet nostrud irure sit. In anim magna culpa nostrud. Elit qui commodo mollit Lorem nostrud esse labore sunt est officia sint. Enim incididunt anim fugiat tempor nisi culpa dolore est enim est occaecat magna tempor commodo.
"""
for word in txt.split():
new_word = ''
for character in word:
if character in string.punctuatio+'1234567980':
pass
else:
new_word += character
print(new_word.lower()) | StarcoderdataPython |
9633864 | <reponame>fgitmichael/SelfSupevisedSkillDiscovery
import os
import torch
from diayn_original_tb.algo.algo_diayn_tb import DIAYNTorchOnlineRLAlgorithmTb
from latent_with_splitseqs.main_all_in_one_horizon_step_collector import create_experiment
from latent_with_splitseqs.post_epoch_funcs.algo_saving \
import config_name, file_extension, algo_name
def load_algo(
algo_creator_fun: create_experiment,
base_dir='.',
) -> DIAYNTorchOnlineRLAlgorithmTb:
base_dir = base_dir
algo_file_name, algo_epoch = get_numbered_file(algo_name, base_dir=base_dir)
_, algo_file_extension = os.path.splitext(algo_file_name)
assert algo_file_extension == file_extension
config = torch.load(os.path.join(base_dir, config_name + file_extension))
start_algo = algo_creator_fun(
config=config,
config_path_name=None,
)
start_algo.load(
file_name=algo_file_name,
base_dir=base_dir,
)
return start_algo
def get_numbered_file(
base_name,
base_dir='.',
):
"""
Gets file_name and number of files with structure: base_name[...]number[...]
"""
with os.scandir(base_dir) as dir:
algo_files = []
for dir_entry in dir:
path = dir_entry.path
_, file_name = os.path.split(path)
if file_name.startswith(base_name):
algo_files.append(file_name)
assert len(algo_files) == 1
digit_str = ''.join(filter(lambda i: i.isdigit(), algo_files[0]))
number = int(digit_str)
return algo_files[0], number
| StarcoderdataPython |
1769281 | <gh_stars>1-10
from decouple import config, Csv
from .base import *
SECRET_KEY = '*xtipyb*z!q*! # wnca_q-2063m)+*80r2n=x)0i5sf=tafj21z'
ALLOWED_HOSTS = []
DEBUG = True
CAPTCHA_TEST_MODE = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| StarcoderdataPython |
5055192 | <gh_stars>1-10
"""Just a conftest."""
from typing import Any, Callable
import httpbin as Httpbin
import pytest
from request_session import RequestSession
@pytest.fixture(scope="function")
def request_session(httpbin):
# type: (Httpbin) -> Callable
def inner(*args, **kwargs):
# type: (*Any, **Any) -> RequestSession
return RequestSession( # type: ignore
*args, host=httpbin.url, request_category="test", **kwargs
)
return inner
| StarcoderdataPython |
1924274 | from apscheduler.schedulers.background import BackgroundScheduler
def init_tasks(app, engine):
scheduler = BackgroundScheduler()
scheduler.add_job(
engine.start,
"cron",
day_of_week="mon-fri",
hour=9,
minute=30
)
scheduler.start()
| StarcoderdataPython |
1778496 | class BaseItem:
def __init_(self, name, item_id, page_url):
self.name = name
self.id = item_id
self.page_url = page_url
class ItemDrop:
def __init__(self, enabled, level, max_level, leagues, areas, text):
self.enabled = enabled
self.level = level
self.max_level = max_level
self.leagues = leagues
self.areas = areas
self.text = text
class Requirements:
def __init__(self, dex, stren, intel, level):
self.dex = int(dex) if dex.isdigit() else None
self.str = int(stren) if stren.isdigit() else None
self.int = int(intel) if intel.isdigit() else None
self.level = int(level) if level.isdigit() and int(level) >= 1 else None
@property
def has_reqs(self):
return any([self.dex, self.str, self.int, self.level])
class Item:
def __init__(self, base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences, *args):
self.base = base
self.item_class = item_class
self.name = name
self.rarity = rarity
self.size = size
self.drop = drop
self.requirements = requirements
self.lore = lore
self.help_text = help_text
self.is_corrupted = is_corrupted
self.is_relic = is_relic
self.alt_art = alt_art
self.quality = quality
self.implicits = implicits
self.explicits = explicits
self.tags = tags
self.icon = icon
self.influences = influences
self.enchant = None
self.sockets = None
def __repr__(self):
return f"<Item: name={self.name} rarity={self.rarity}>"
class DivCard(Item):
def __init__(self, base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences, div_data):
super().__init__(base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences)
self.card_art = div_data['card_art']
self.stack_size = div_data['stack_size']
self.reward_flavor = div_data['reward_flavor']
self.reward = div_data['reward']
class Prophecy(Item):
def __init__(self, base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences, proph_data):
super().__init__(base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences)
self.prediction = proph_data['prediction text']
self.objective = proph_data['objective']
self.seal_cost = proph_data['seal cost']
class Weapon(Item):
def __init__(self, base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences, weapon_stats):
super().__init__(base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences)
self.attack_speed = weapon_stats['attack speed']
self.chaos_min = weapon_stats['chaos damage min']
self.chaos_max = weapon_stats['chaos damage max']
self.cold_min = weapon_stats['cold damage min']
self.cold_max = weapon_stats['cold damage max']
self.fire_min = weapon_stats['fire damage min']
self.fire_max = weapon_stats['fire damage max']
self.lightning_min = weapon_stats['lightning damage min']
self.lightning_max = weapon_stats['lightning damage max']
self.physical_min = weapon_stats['physical damage min']
self.physical_max = weapon_stats['physical damage max']
self.range = f"{weapon_stats['weapon range']}"
self.critical_chance = f"{weapon_stats['critical strike chance']}"
self.quality = 20
@property
def chaos_damage(self):
if self.chaos_max != "0" and self.chaos_max != 0:
return f"{self.chaos_min}-{self.chaos_max}"
else:
return None
@property
def cold_damage(self):
if self.cold_max != "0" and self.cold_max != 0:
return f"{self.cold_min}-{self.cold_max}"
else:
return None
@property
def fire_damage(self):
if self.fire_max != "0" and self.fire_max != 0:
return f"{self.fire_min}-{self.fire_max}"
else:
return None
@property
def lightning_damage(self):
if self.lightning_max != "0" and self.lightning_max != 0:
return f"{self.lightning_min}-{self.lightning_max}"
else:
return None
@property
def physical_damage(self):
if self.physical_max != "0" and self.physical_max != 0:
return f"{self.physical_min}-{self.physical_max}"
else:
return None
class Armour(Item):
def __init__(self, base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences, armour_stats):
super().__init__(base, item_class, name, rarity, size, drop, requirements, lore, help_text, is_corrupted,
is_relic, alt_art, quality, implicits, explicits, tags, icon, influences)
if armour_stats['armour'] != '0':
self.armour = armour_stats['armour']
else:
self.armour = None
if armour_stats['evasion'] != '0':
self.evasion = armour_stats['evasion']
else:
self.evasion = None
if armour_stats['energy shield'] != '0':
self.energy_shield = armour_stats['energy shield']
else:
self.energy_shield = None
if 'block range average' in armour_stats and armour_stats['block range average']:
self.block = armour_stats['block range average']
else:
self.block = None
self.quality = 20
class Mod:
def __init__(self, mod_id, name, group, mod_type, domain, gen_type, level_requirement, stat_text):
self.mod_id = mod_id
self.name = name
self.group = group
self.type = mod_type
self.domain = domain
self.gen_type = gen_type
self.level_requirement = level_requirement
self.stat_text = stat_text
class PassiveSkill:
def __init__(self, asc_class, flavor_text, icon, is_keystone, is_notable, name, reminder_text, stat_text, int_id):
self.asc_class = asc_class if asc_class else None
self.flavor_text = flavor_text if flavor_text else None
self.icon = icon if icon else None
self.is_keystone = is_keystone
self.is_notable = is_notable
self.name = name
self.reminder_text = reminder_text if reminder_text else None
self.stat_text = stat_text if stat_text else None
self.int_id = int_id if int_id else None
self.tags = []
class Gem:
def __init__(self, gem_id, cast_time, description, name, weapon_type_restriction, stat_text, quality_bonus,
radius, radius_description, radius_secondary, radius_secondary_description, radius_tertiary,
radius_tertiary_description, skill_icon, skill_screenshot, inventory_icon, gem_tags, tags,
stats_per_level, is_aura, vendors, requirements):
self.id = gem_id
self.cast_time = cast_time
self.description = description
self.name = name
self.weapon_type_restriction = weapon_type_restriction
self.stat_text = stat_text
self.quality_bonus = quality_bonus
self.radius = radius
self.radius_description = radius_description
self.radius_secondary = radius_secondary
self.radius_secondary_description = radius_secondary_description
self.radius_tertiary = radius_tertiary
self.radius_tertiary_description = radius_tertiary_description
self.skill_icon = skill_icon
self.icon = inventory_icon
self.skill_screenshot = skill_screenshot
self.gem_tags = gem_tags
self.tags = tags
self.stats_per_level = stats_per_level
self.is_aura = is_aura
self.vendors = vendors
self.requirements = requirements
self.base = None
def __repr__(self):
return f"<Gem: name={self.name}>"
| StarcoderdataPython |
49081 |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
import radical.utils as ru
from .base import LaunchMethod
# ------------------------------------------------------------------------------
#
class DPlace(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
LaunchMethod.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def _configure(self):
# dplace: job launcher for SGI systems (e.g. on Blacklight)
self.launch_command = ru.which('dplace')
# --------------------------------------------------------------------------
#
def construct_command(self, t, launch_script_hop):
slots = t['slots']
td = t['description']
task_exec = td['executable']
task_cores = td['cpu_processes'] # FIXME: also use cpu_threads
task_args = td.get('arguments') or []
task_argstr = self._create_arg_string(task_args)
if 'task_offsets' not in slots :
raise RuntimeError('insufficient information to launch via %s: %s'
% (self.name, slots))
# FIXME: This is broken due to changes lot structure
task_offsets = slots['task_offsets']
assert(len(task_offsets) == 1)
dplace_offset = task_offsets[0]
task_command = "%s %s" % (task_exec, task_argstr)
dplace_command = "%s -c %d-%d %s" % (self.launch_command, dplace_offset,
dplace_offset + task_cores - 1,
task_command)
return dplace_command, None
# ------------------------------------------------------------------------------
| StarcoderdataPython |
6614289 | """
This module computes finite size supercell charge corrections for
defects in anistropic systems using extended Freysoldt (or Kumagai) method
developed by Kumagai and Oba.
Kumagai method includes
a) anisotropic PC energy
b) potential alignment by atomic site averaging at Wigner Seitz cell
edge
If you use the corrections implemented in this module, cite
a) Kumagai and Oba, Phys. Rev. B. 89, 195205 (2014) and
b) Freysoldt, Neugebauer, and Van <NAME>,
Phys. Status Solidi B. 248, 1067-1076 (2011) and
in addition to the pycdt paper
"""
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import math
import logging
import numpy as np
from pymatgen.io.vasp.outputs import Locpot, Outcar
from pymatgen.core.lattice import Lattice
from pycdt.corrections.utils import *
from pycdt.utils.units import hart_to_ev
import warnings
norm = np.linalg.norm
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def kumagai_init(structure, dieltens):
angset = structure.lattice.get_cartesian_coords(1)
dieltens = np.array(dieltens)
if not len(dieltens.shape):
dieltens = dieltens*np.identity(3)
elif len(dieltens.shape) == 1:
dieltens = np.diagflat(dieltens)
logging.getLogger(__name__).debug('Lattice constants (in Angs): '
+ str(cleanlat(angset)))
[a1, a2, a3] = ang_to_bohr * angset # convert to bohr
bohrset = [a1, a2, a3]
vol = np.dot(a1, np.cross(a2, a3))
logging.getLogger(__name__).debug('Lattice constants (in Bohr): '
+ str(cleanlat([a1, a2, a3])))
determ = np.linalg.det(dieltens)
invdiel = np.linalg.inv(dieltens)
logging.getLogger(__name__).debug('inv dielectric tensor: ' + str(invdiel))
return angset, bohrset, vol, determ, invdiel
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def real_sum(a1, a2, a3, r, q, dieltens, gamma, tolerance):
invdiel = np.linalg.inv(dieltens)
determ = np.linalg.det(dieltens)
realpre = q / np.sqrt(determ)
tolerance /= hart_to_ev
#Real space sum by converging with respect to real space vectors
#create list of real space vectors that satisfy |i*a1+j*a2+k*a3|<=N
Nmaxlength = 40 #tolerance for stopping real space sum convergence
N = 2
r_sums = []
while N < Nmaxlength:
r_sum = 0.0
if norm(r):
for i in range(-N, N+1):
for j in range(-N, N+1):
for k in range(-N, N+1):
r_vec = i*a1 + j*a2 + k*a3 - r
loc_res = np.dot(r_vec, np.dot(invdiel, r_vec))
nmr = math.erfc(gamma * np.sqrt(loc_res))
dmr = np.sqrt(determ * loc_res)
r_sum += nmr / dmr
else:
for i in range(-N, N+1):
for j in range(-N, N+1):
for k in range(-N, N+1):
if i == j == k == 0:
continue
else:
r_vec = i*a1 + j*a2 + k*a3
loc_res = np.dot(r_vec, np.dot(invdiel, r_vec))
nmr = math.erfc(gamma * np.sqrt(loc_res))
dmr = np.sqrt(determ * loc_res)
r_sum += nmr / dmr
r_sums.append([N, realpre * r_sum])
if N == Nmaxlength-1:
logging.getLogger(__name__).warning(
'Direct part could not converge with real space translation '
'tolerance of {} for gamma {}'.format(Nmaxlength-1, gamma))
return
elif len(r_sums) > 3:
if abs(abs(r_sums[-1][1]) - abs(r_sums[-2][1])) < tolerance:
r_sum = r_sums[-1][1]
logging.debug("gamma is {}".format(gamma))
logging.getLogger(__name__).debug(
"convergence for real summatin term occurs at step {} "
"where real sum is {}".format(N, r_sum * hart_to_ev))
break
N += 1
return r_sum
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def get_g_sum_at_r(g_sum, structure, dim, r):
"""
Args:
g_sum: Reciprocal summation calculated from reciprocal_sum method
structure: Bulk structure pymatgen object
dim : ngxf dimension
r: Position relative to defect (in cartesian coords)
Returns:
reciprocal summ value at g_sum[i_rx,j_ry,k_rz]
"""
fraccoord = structure.lattice.get_fractional_coords(r)
i, j, k = getgridind(structure, dim, fraccoord)
return g_sum[i, j, k]
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def anisotropic_madelung_potential(structure, dim, g_sum, r, dieltens, q,
gamma, tolerance):
"""
Compute the anisotropic Madelung potential at r not equal to 0.
For r=(0,0,0) use anisotropic_pc_energy function
Args:
structure: Bulk pymatgen structure type
dim : ngxf dimension
g_sum: Precomputed reciprocal sum for all r_vectors
r: r vector (in cartesian coordinates) relative to defect position.
Non zero r is expected
dieltens: dielectric tensor
q: Point charge (in units of e+)
tolerance: Tolerance parameter for numerical convergence
gamma (float): Convergence parameter
silence (bool): Verbosity flag. If False, messages are printed.
"""
angset, [a1, a2, a3], vol, determ, invdiel = kumagai_init(
structure, dieltens)
recippartreal = q * get_g_sum_at_r(g_sum, structure, dim, r)
directpart = real_sum(a1, a2, a3, r, q, dieltens, gamma, tolerance)
#now add up total madelung potential part with two extra parts:
#self interaction term
selfint = q * np.pi / (vol * (gamma ** 2))
logging.getLogger(__name__).debug('self interaction piece is {}'.format(
selfint * hart_to_ev))
pot = hart_to_ev * (directpart + recippartreal - selfint)
return pot
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def anisotropic_pc_energy(structure, g_sum, dieltens, q, gamma, tolerance):
"""
Compute the anistropic periodic point charge interaction energy.
Args:
structure: Bulk pymatgen structure type
g_sum : comes from KumagaiBulkInit class
dieltens: dielectric tensor
q: Point charge (in units of e+)
gamma : convergence parameter optimized in KumagaiBulkInit class
silence (bool): Verbosity flag. If False, messages are printed.
"""
angset, [a1, a2, a3], vol, determ, invdiel = kumagai_init(
structure, dieltens)
g_part = q*g_sum[0,0,0]
r_part = real_sum(a1, a2, a3, [0,0,0], q, dieltens, gamma, tolerance)
selfint = q*np.pi / (vol * (gamma**2)) #self interaction term
#surface term (only for r not at origin)
surfterm = 2*gamma*q / np.sqrt(np.pi*determ)
logger = logging.getLogger(__name__)
logger.debug('reciprocal part: {}'.format(g_part * hart_to_ev))
logger.debug('real part: {}'.format(r_part * hart_to_ev))
logger.debug('self interaction part: {}'.format(selfint * hart_to_ev))
logger.debug('surface term: {}'.format(surfterm * hart_to_ev))
pc_energy = -(q*0.5*hart_to_ev) * (r_part + g_part - selfint - surfterm)
logging.debug('Final PC Energy term: {} eV'.format(pc_energy))
return pc_energy
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def getgridind(structure, dim, r, gridavg=0.0):
"""
Computes the index of a point, r, in the locpot grid
Args:
structure:
Pymatgen structure object
dim:
dimension of FFT grid (NGXF dimension list in VASP)
r:
Relative co-ordinates with respect to abc lattice vectors
gridavg:
If you want to do atomic site averaging, set gridavg to
the radius of the atom at r
Returns:
[i,j,k]: Indices as list
TODO: Once final, remove the getgridind inside disttrans function
"""
abc = structure.lattice.abc
grdind = []
if gridavg:
radvals = [] #radius in terms of indices
dxvals = []
for i in range(3):
if r[i] < 0:
while r[i] < 0:
r[i] += 1
elif r[i] >= 1:
while r[i] >= 1:
r[i] -= 1
r[i] *= abc[i]
num_pts = dim[i]
x = [now_num / float(num_pts) * abc[i] for now_num in range(num_pts)]
dx = x[1] - x[0]
x_rprojection_delta_abs = np.absolute(x - r[i])
ind = np.argmin(x_rprojection_delta_abs)
if x_rprojection_delta_abs[ind] > dx*1.1: #to avoid numerical errors
logger = logging.getLogger(__name__)
logger.error("Input position not within the locpot grid")
logger.error("%d, %d, %f", i, ind, r)
logger.error("%f", x_rprojection_delta_abs)
raise ValueError("Input position is not within the locpot grid")
grdind.append(ind)
if gridavg:
radvals.append(int(np.ceil(gridavg/dx)))
dxvals.append(dx)
if gridavg:
grdindfull = []
for i in range(-radvals[0], radvals[0]+1):
for j in range(-radvals[1], radvals[1]+1):
for k in range(-radvals[2], radvals[2]+1):
dtoc = [i*dxvals[0], j*dxvals[1], k*dxvals[2]]
if norm(dtoc) < gridavg:
ival = (i+grdind[0]) % dim[0]
jval = (j+grdind[1]) % dim[1]
kval = (k+grdind[2]) % dim[2]
grdindfull.append((ival, jval, kval))
grdind = grdindfull
return grdind
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def disttrans(struct, defstruct, defpos=None):
"""
To calculate distance from defect to each atom and finding NGX grid
pts at each atom.
Args:
struct: Bulk structure object
defstruct: Defect structure object
defpos: (if known) defect position as a pymatgen Site object within bulk supercell
"""
#Find defect location in bulk and defect cells
blksite, defsite = find_defect_pos(struct, defstruct, defpos=defpos)
logger = logging.getLogger(__name__)
if blksite is None and defsite is None:
logger.error('Not able to determine defect site')
return
if blksite is None:
logger.debug('Found defect to be Interstitial type at %s',
repr(defsite))
elif defsite is None:
logger.debug('Found defect to be Vacancy type at %s', repr(blksite))
else:
logger.debug('Found defect to be antisite/subsitution type at %s ' \
' in bulk, and %s in defect cell',
repr(blksite), repr(defsite))
if blksite is None:
blksite = defsite
elif defsite is None:
defsite = blksite
def_ccoord = blksite[:]
defcell_def_ccoord = defsite[:]
if len(struct.sites) >= len(defstruct.sites):
sitelist = struct.sites[:]
else: #for interstitial list
sitelist = defstruct.sites[:]
#better image getter since pymatgen wasnt working well for this
def returnclosestr(vec):
from operator import itemgetter
listvals = []
abclats = defstruct.lattice.matrix
trylist = [-1, 0, 1]
for i in trylist:
for j in trylist:
for k in trylist:
transvec = i*abclats[0] + j*abclats[1] + k*abclats[2]
rnew = vec - (defcell_def_ccoord + transvec)
listvals.append([norm(rnew), rnew, transvec])
listvals.sort(key=itemgetter(0))
return listvals[0] #will return [dist,r to defect, and transvec for defect]
grid_sites = {} # dictionary with indices keys in order of structure list
for i in sitelist:
if np.array_equal(i.coords, def_ccoord):
logging.debug('Site {} is defect! Skipping '.format(i))
continue
blksite, defsite = closestsites(struct, defstruct, i.coords)
blkindex = blksite[-1]
defindex = defsite[-1]
dcart_coord = defsite[0].coords
closeimage = returnclosestr(dcart_coord)
cart_reldef = closeimage[1]
defdist = closeimage[0]
if abs(norm(cart_reldef) - defdist) > 0.1:
logger.warning('Image locater issue encountered for site = %d',
blkindex)
logger.warning('In defect supercell')
logger.warning('Distance should be %f', defdist)
logger.warning('But, calculated distance is %f', norm(cart_reldef))
if blkindex in grid_sites:
logger.warning('Index %d already exists in potinddict!', blkindex)
logger.warning('Overwriting information.')
grid_sites[blkindex] = {
'dist': defdist,
'cart': dcart_coord,
'cart_reldef': cart_reldef,
'siteobj': [i.coords, i.frac_coords, i.species_string],
'bulk_site_index': blkindex,
'def_site_index': defindex}
return grid_sites
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def wigner_seitz_radius(structure):
"""
Calculate the Wigner Seitz radius for the given structure.
Args:
structure: pymatgen Structure object
"""
wz = structure.lattice.get_wigner_seitz_cell()
dist = []
for facet in wz:
midpt = np.mean(np.array(facet), axis=0)
dist.append(norm(midpt))
wsrad = min(dist)
return wsrad
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
def read_ES_avg_fromlocpot(locpot):
"""
Reads Electrostatic potential at each atomic
site from Locpot Pymatgen object
"""
structure = locpot.structure
radii = {specie: 1.0 for specie in set(structure.species)}
# TODO: The above radii could be smarter (related to ENAUG?)
# but turns out you get a similar result to Outcar differences
# when taking locpot avgd differences
ES_data = {'sampling_radii': radii, 'ngxf_dims': locpot.dim}
pot = []
for site in structure.sites:
indexlist = getgridind(structure, locpot.dim, site.frac_coords,
gridavg=radii[site.specie])
samplevals = []
for u,v,w in indexlist:
samplevals.append(locpot.data["total"][u][v][w])
pot.append(np.mean(samplevals))
ES_data.update({'potential': pot})
return ES_data
warnings.warn("Replacing PyCDT usage of Kumagai base classes with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
class KumagaiBulkInit(object):
"""
Compute the anisotropic madelung potential array from the bulk
locpot. This helps in evaluating the bulk supercell related part
once to speed up the calculations.
"""
def __init__(self, structure, dim, epsilon, encut=520, tolerance=0.0001,
optgamma=False):
"""
Args
structure:
Pymatgen structure object of bulk cell
dim:
Fine FFT grid dimensions as a list
For vasp this is NGXF grid dimensions
epsilon:
Dielectric tensor
encut (float):
Energy cutoff for optimal gamma
tolerance (float):
Accuracy parameter
optgamma:
if you know optimized gamma, give its value.
Otherwise it will be computed.
"""
self.structure = structure
self.dim = dim
self.epsilon = epsilon
self.encut = encut
self.tolerance = tolerance
#self.silence = silence
if not optgamma:
self.gamma = self.find_optimal_gamma()
else:
self.gamma = optgamma
self.g_sum = self.reciprocal_sum()
logging.getLogger(__name__).info('optimized gamma: %f', self.gamma)
def find_optimal_gamma(self):
"""
Find optimal gamma by evaluating the brute force reciprocal
summation and seeing when the values are on the order of 1,
This calculation is the anisotropic Madelung potential at r = (0,0,0).
Note this only requires the STRUCTURE not the LOCPOT object.
"""
angset, [a1, a2, a3], vol, determ, invdiel = kumagai_init(
self.structure, self.epsilon)
optgam = None
#do brute force recip summation
def get_recippart(encut, gamma):
recippart = 0.0
for rec in genrecip(a1, a2, a3, encut):
Gdotdiel = np.dot(rec, np.dot(self.epsilon, rec))
summand = math.exp(-Gdotdiel / (4 * (gamma ** 2))) / Gdotdiel
recippart += summand
recippart *= 4*np.pi/vol
return recippart, 0.0
def do_summation(gamma):
# Do recip sum until it is bigger than 1eV
# First do Recip space sum convergence with respect to encut for
# this gamma
encut = 20 #start with small encut for expediency
recippartreal1, recippartimag1 = get_recippart(encut, gamma)
encut += 10
recippartreal, recippartimag = get_recippart(encut, gamma)
converge = [recippartreal1, recippartreal]
logger = logging.getLogger(__name__)
while abs(abs(converge[0]) - abs(converge[1])) * hart_to_ev > \
self.tolerance:
encut += 10
recippartreal, recippartimag = get_recippart(encut, gamma)
converge.reverse()
converge[1] = recippartreal
if encut > self.encut:
msg = 'Optimal gamma not found at {} eV cutoff'.format(
self.encut)
logger.error(msg)
raise ValueError(msg)
if abs(recippartimag) * hart_to_ev > self.tolerance:
logger.error("Imaginary part of reciprocal sum not converged.")
logger.error("Imaginary sum value is {} (eV)".format(
recippartimag * hart_to_ev))
return None, None
logger.debug('Reciprocal sum converged to %f eV',
recippartreal * hart_to_ev)
logger.debug('Convergin encut = %d eV', encut)
if (abs(converge[1]) * hart_to_ev < 1 and not optgam):
logger.warning('Reciprocal summation value is less than 1 eV.')
logger.warning('Might lead to errors')
logger.warning('Change gamma.')
return None, 'Try Again'
return recippartreal, gamma
logger = logging.getLogger(__name__)
#start with gamma s.t. gamma*L=5 (this is optimal)
#optimizing gamma for the reciprocal sum to improve convergence
gamma = 5.0/(vol ** (1/3.0))
optimal_gamma_found = False
while not optimal_gamma_found:
recippartreal, optgamma = do_summation(gamma)
if optgamma == gamma:
logger.debug('optimized gamma found to be %f', optgamma)
optimal_gamma_found = True
elif 'Try Again' in optgamma:
gamma *= 1.5
else:
logger.error('Had problem in gamma optimization process.')
return None
if gamma > 50:
logger.error('Could not optimize gamma before gamma = %d', 50)
return None
return optgamma
def reciprocal_sum(self):
"""
Compute the reciprocal summation in the anisotropic Madelung
potential.
TODO: Get the input to fft cut by half by using rfft instead of fft
"""
logger = logging.getLogger(__name__)
logger.debug('Reciprocal summation in Madeling potential')
over_atob = 1.0 / ang_to_bohr
atob3 = ang_to_bohr ** 3
latt = self.structure.lattice
vol = latt.volume * atob3 # in Bohr^3
reci_latt = latt.reciprocal_lattice
[b1, b2, b3] = reci_latt.get_cartesian_coords(1)
b1 = np.array(b1) * over_atob # In 1/Bohr
b2 = np.array(b2) * over_atob
b3 = np.array(b3) * over_atob
nx, ny, nz = self.dim
logging.debug('nx: %d, ny: %d, nz: %d', nx, ny, nz)
ind1 = np.arange(nx)
for i in range(int(nx/2), nx):
ind1[i] = i - nx
ind2 = np.arange(ny)
for i in range(int(ny/2), ny):
ind2[i] = i - ny
ind3 = np.arange(nz)
for i in range(int(nz/2), nz):
ind3[i] = i - nz
g_array = np.zeros(self.dim, np.dtype('c16'))
gamm2 = 4*(self.gamma**2)
for i in ind1:
for j in ind2:
for k in ind3:
g = i*b1 + j*b2 + k*b3
g_eps_g = np.dot(g, np.dot(self.epsilon, g))
if i == j == k == 0:
continue
else:
g_array[i,j,k] = math.exp(-g_eps_g/gamm2) / g_eps_g
r_array = np.fft.fftn(g_array)
over_vol = 4*np.pi/vol # Multiply with q later
r_array *= over_vol
r_arr_real = np.real(r_array)
r_arr_imag = np.imag(r_array)
max_imag = r_arr_imag.max()
logger.debug('Max imaginary part found to be %f', max_imag)
return r_arr_real
warnings.warn("Replacing PyCDT usage of Kumagai base classes and plotting with calls to "
"corresponding objects in pymatgen.analysis.defects.corrections\n"
"All core Kumagai code will be removed with Version 2.5 of PyCDT."
" (note these functions all exist in pymatgen)",
DeprecationWarning)
class KumagaiCorrection(object):
"""
Extended freysoldt correction developed by Kumagai and Oba.
"""
def __init__(self, dielectric_tensor, q, gamma, g_sum, bulk_structure,
defect_structure, energy_cutoff=520, madetol=0.0001,
lengths=None, **kw):
"""
Args:
dielectric_tensor:
Macroscopic dielectric tensor
Include ionic also if defect is relaxed, othewise ion clamped.
Can be a matrix array or scalar.
q:
Charge associated with the defect. Typically integer
gamma:
Convergence parameter. Obtained from KumagaiBulkPart
g_sum:
value that is dependent on the Bulk only.
Obtained from KumagaiBulkPart
bulk_structure:
bulk Pymatgen structure object. Need to specify this if
using Outcar method for atomic site avg.
(If you specify outcar files for bulk_file_path but dont
specify structure then code will break)
(TO DO: resolve this dumb dependency by being smarter
about where structure comes from?)
defect_structure:
defect structure. Needed if using Outcar method
energy_cutoff:
Energy for plane wave cutoff (in eV).
If not given, Materials Project default 520 eV is used.
madetol:
Tolerance for convergence of energy terms in eV
lengths:
Lengths of axes, for speeding up plotting slightly
keywords:
1) bulk_locpot: Bulk Locpot file path OR Bulk Locpot
defect_locpot: Defect Locpot file path or defect Locpot
2) (Or) bulk_outcar: Bulk Outcar file path
defect_outcar: Defect outcar file path
3) defect_position: Defect position as a pymatgen Site object in the bulk supercell structure
NOTE: this is optional but recommended, if not provided then analysis is done to find
the defect position; this analysis has been rigorously tested, but has broken in an example with
severe long range relaxation
(at which point you probably should not be including the defect in your analysis...)
"""
if isinstance(dielectric_tensor, int) or \
isinstance(dielectric_tensor, float):
self.dieltens = np.identity(3) * dielectric_tensor
else:
self.dieltens = np.array(dielectric_tensor)
if 'bulk_locpot' in kw:
if isinstance(kw['bulk_locpot'], Locpot):
self.locpot_blk = kw['bulk_locpot']
else:
self.locpot_blk = Locpot.from_file(kw['bulk_locpot'])
if isinstance(kw['defect_locpot'], Locpot):
self.locpot_def = kw['defect_locpot']
else:
self.locpot_def = Locpot.from_file(kw['defect_locpot'])
self.dim = self.locpot_blk.dim
self.outcar_blk = None
self.outcar_def = None
self.do_outcar_method = False
if 'bulk_outcar' in kw:
self.outcar_blk = Outcar(str(kw['bulk_outcar']))
self.outcar_def = Outcar(str(kw['defect_outcar']))
self.do_outcar_method = True
self.locpot_blk = None
self.locpot_def = None
self.dim = self.outcar_blk.ngf
if 'defect_position' in kw:
self._defpos = kw['defect_position']
else:
self._defpos = None
self.madetol = madetol
self.q = q
self.encut = energy_cutoff
self.structure = bulk_structure
self.defstructure = defect_structure
self.gamma = gamma
self.g_sum = g_sum
self.lengths=lengths
def correction(self, title=None, partflag='All'):
"""
Computes the extended Freysoldt correction for anistropic systems
developed by <NAME> and <NAME> (Ref: PRB 89, 195205 (2014)
Args:
title:
If plot of potential averaging process is wanted set title
partflag:
Specifies the part of correction computed
'pc': periodic interaction of defect charges (point charge) only
'potalign': potential alignmnet correction only,
'All' (default): pc and potalign combined into one value,
'AllSplit' for correction in form [PC, potterm, full]
"""
logger = logging.getLogger(__name__)
logger.info('This is Kumagai Correction.')
if not self.q:
if partflag == 'AllSplit':
return [0., 0., 0.]
else:
return 0.0
if partflag != 'potalign':
energy_pc = self.pc()
if partflag != 'pc':
potalign = self.potalign(title=title)
#logger.info('Kumagai Correction details:')
#if partflag != 'potalign':
# logger.info('PCenergy (E_lat) = %f', round(energy_pc, 5))
#if partflag != 'pc':
# logger.info('potential alignment (-q*delta V) = %f',
# round(potalign, 5))
if partflag in ['All','AllSplit']:
logger.info('Total Kumagai correction = %f',
round(energy_pc+potalign, 5))
if partflag == 'pc':
return round(energy_pc, 5)
elif partflag == 'potalign':
return round(potalign, 5)
elif partflag == 'All':
return round(energy_pc+potalign, 5)
else:
return map(lambda x: round(x, 5),
[energy_pc, potalign, energy_pc+potalign])
def pc(self):
energy_pc = anisotropic_pc_energy(
self.structure, self.g_sum, self.dieltens, self.q,
self.gamma, self.madetol)
logger = logging.getLogger(__name__)
logger.info('PC energy determined to be %f eV (%f Hartree)',
energy_pc, energy_pc/hart_to_ev)
return energy_pc
def potalign(self, title=None, output_sr=False):
"""
Potential alignment for Kumagai method
Args:
title: Title for the plot. None will not generate the plot
output_sr allows for output of the short range potential
(Good for delocalization analysis)
"""
logger = logging.getLogger(__name__)
logger.info('\nRunning potential alignment (atomic site averaging)')
angset, [a1, a2, a3], vol, determ, invdiel = kumagai_init(
self.structure, self.dieltens)
potinddict = disttrans(self.structure, self.defstructure, defpos=self._defpos)
minlat = min(norm(a1), norm(a2), norm(a3))
lat_perc_diffs = [100 * abs(norm(a1) - norm(lat)) / minlat for lat \
in [a2, a3]]
lat_perc_diffs.append(100 * abs(norm(a2) - norm(a3)) / minlat)
if not all(i < 45 for i in lat_perc_diffs):
logger.warning('Detected that cell was not very cubic.')
logger.warning('Sampling atoms outside wigner-seitz cell may '\
'not be optimal')
wsrad = wigner_seitz_radius(self.structure)
logger.debug('wsrad %f', wsrad)
for i in potinddict.keys():
logger.debug("Atom %d, distance: %f", i, potinddict[i]['dist'])
if potinddict[i]['dist'] > wsrad:
potinddict[i]['OutsideWS'] = True
else:
potinddict[i]['OutsideWS'] = False
if not self.do_outcar_method:
puredat = read_ES_avg_fromlocpot(self.locpot_blk)
defdat = read_ES_avg_fromlocpot(self.locpot_def)
else:
puredat = {'potential': self.outcar_blk.electrostatic_potential}
defdat = {'potential': self.outcar_def.electrostatic_potential}
jup = 0
for i in potinddict.keys():
jup += 1
if (not title and not potinddict[i]['OutsideWS']):
#dont need to calculate inside WS if not printing plot
continue
j = potinddict[i]['def_site_index'] #assuming zero defined
k = potinddict[i]['bulk_site_index']
v_qb = defdat['potential'][j] - puredat['potential'][k]
cart_reldef = potinddict[i]['cart_reldef']
v_pc = anisotropic_madelung_potential(
self.structure, self.dim, self.g_sum, cart_reldef,
self.dieltens, self.q, self.gamma, self.madetol)
v_qb *= -1 #change charge sign convention
potinddict[i]['Vpc'] = v_pc
potinddict[i]['Vqb'] = v_qb
logger.debug('Atom: %d, anisotropic madelung potential: %f',
i, v_pc)
logger.debug('Atom: %d, bulk/defect difference = %f', i, v_qb)
if title:
fullspecset = self.structure.species
specset = list(set(fullspecset))
shade, forplot = {}, {}
for i in specset:
shade[i.symbol] = {'r': [], 'Vpc': [], 'Vqb': []}
forplot[i.symbol] = {'r': [], 'Vpc': [], 'Vqb': [],'sites':[]}
forcorrection = []
for i in potinddict.keys():
if (not title and not potinddict[i]['OutsideWS']):
continue
if potinddict[i]['OutsideWS']:
forcorrection.append(potinddict[i]['Vqb']-potinddict[i]['Vpc'])
if title:
elt = fullspecset[i].symbol
shade[elt]['r'].append(potinddict[i]['dist'])
shade[elt]['Vpc'].append(potinddict[i]['Vpc'])
shade[elt]['Vqb'].append(potinddict[i]['Vqb'])
if title:
elt = fullspecset[i].symbol
forplot[elt]['r'].append(potinddict[i]['dist'])
forplot[elt]['Vpc'].append(potinddict[i]['Vpc'])
forplot[elt]['Vqb'].append(potinddict[i]['Vqb'])
forplot[elt]['sites'].append(potinddict[i]['siteobj'])
potalign = np.mean(forcorrection)
if title:
forplot['EXTRA'] = {'wsrad': wsrad, 'potalign': potalign}
try:
forplot['EXTRA']['lengths']=self.structure.lattice.abc
except:
forplot['EXTRA']['lengths']=self.lengths
if title != 'written':
KumagaiCorrection.plot(forplot, title=title)
else:
#TODO: use a more descriptive fname that describes the defect
from monty.serialization import dumpfn
from monty.json import MontyEncoder
fname = 'KumagaiData.json'
dumpfn(forplot, fname, cls=MontyEncoder)
logger.info('potential alignment (site averaging): %f',
np.mean(forcorrection))
logger.info('Potential correction energy: %f eV',
-self.q * np.mean(forcorrection))
if output_sr:
outpot = {'sampled': forcorrection, 'alldata':potinddict}
return ((-self.q * np.mean(forcorrection)), outpot) #pot align energy correction (eV)
else:
return (-self.q * np.mean(forcorrection)) #pot align energy correction (eV)
@classmethod
def plot(cls, forplot, title):
"""
Plotting of locpot data
TODO: Rename forplot to a more descriptive name
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
plt.figure()
plt.clf()
collis = ['b', 'g', 'c', 'm', 'y', 'w', 'k']
ylis = []
rlis = []
for i in range(len(forplot.keys())):
inkey = list(forplot.keys())[i]
if inkey == 'EXTRA':
continue
for k in forplot[inkey]['r']:
rlis.append(k)
for k in ['Vqb', 'Vpc']:
for u in forplot[inkey][k]:
ylis.append(u)
plt.plot(forplot[inkey]['r'], forplot[inkey]['Vqb'],
color=collis[i], marker='^', linestyle='None',
label=str(inkey) + ': $V_{q/b}$')
plt.plot(forplot[inkey]['r'], forplot[inkey]['Vpc'],
color=collis[i], marker='o', linestyle='None',
label=str(inkey) + ': $V_{pc}$')
full = []
for i in forplot.keys():
if i == 'EXTRA':
continue
for k in range(len(forplot[i]['Vpc'])):
full.append([
forplot[i]['r'][k],
forplot[i]['Vqb'][k] - forplot[i]['Vpc'][k]
])
realfull = sorted(full, key=lambda x: x[0])
r, y = [], []
for i in realfull:
r.append(i[0])
y.append(i[1])
wsrad = forplot['EXTRA']['wsrad']
potalign = forplot['EXTRA']['potalign']
plt.plot(r, y, color=collis[-1], marker='x', linestyle='None',
label='$V_{q/b}$ - $V_{pc}$')
plt.xlabel('Distance from defect ($\AA$)',fontsize=20)
plt.ylabel('Potential (V)',fontsize=20)
x = np.arange(wsrad, max(forplot['EXTRA']['lengths']), 0.01)
plt.fill_between(x, min(ylis) - 1, max(ylis) + 1, facecolor='red',
alpha=0.15, label='sampling region')
plt.axhline(y=potalign, linewidth=0.5, color='red',
label='pot. align. / q')
fontP = FontProperties()
fontP.set_size('small')
plt.legend(bbox_to_anchor=(1.05, 0.5), prop=fontP)
plt.axhline(y=0, linewidth=0.2, color='black')
plt.ylim([min(ylis) - 0.5, max(ylis) + 0.5])
plt.xlim([0, max(rlis) + 3])
plt.title('%s atomic site potential plot' % title)
plt.savefig('%s_kumagaisiteavgPlot.pdf' % title)
@classmethod
def plot_from_datfile(cls, name='KumagaiData.json', title='default'):
"""
Takes data file called 'name' and does plotting.
Good for later plotting of locpot data after running run_correction()
"""
from monty.serialization import loadfn
from monty.json import MontyDecoder
forplot = loadfn(name, cls=MontyDecoder)
cls.plot(forplot, title=title)
| StarcoderdataPython |
4814308 | <filename>tests/mocks.py
import datetime
from random import randint
from telegram.chat import Chat
from telegram.message import Message
from telegram.user import User
class MockBot:
last_message = {}
def send_message(self, chat_id, text, **kwargs):
self.last_message[chat_id] = text
def sendMessage(self, *args, **kwargs):
self.send_message(*args, **kwargs)
def send_photo(self, chat_id, photo, *args, **kwargs):
self.last_message[chat_id] = photo
def sendPhoto(self, *args, **kwargs):
self.send_photo(*args, **kwargs)
class MockChat(Chat):
def __init__(self, id=None, type=Chat.PRIVATE):
_id = id
if not _id:
_id = randint(1, 1000)
super().__init__(id=_id, type=type)
class MockUser(User):
def __init__(self, id=None, first_name=None, is_bot=False):
super().__init__(
id=id or randint(1, 1000),
first_name=first_name or "MockUser",
is_bot=is_bot or False,
)
class MockMessage(Message):
def __init__(
self,
text="",
reply_to_message=None,
from_user=None,
chat=None,
date=datetime.datetime.now(),
):
message_id = randint(1, 1000)
chat = chat or MockChat()
from_user = from_user or MockUser()
super().__init__(
message_id=message_id,
from_user=from_user,
date=date,
chat=chat,
text=text,
reply_to_message=reply_to_message,
)
class MockUpdate:
message = None
def __init__(self, message=MockMessage()):
self.message = message
"""
def PaDondeHoy(bot, update):
day = datetime.date.today().weekday()
@lru_cache()
def cached_response(day_of_week, chat):
with open('res/texts/days.pickle', 'rb') as f:
days = pickle.load(f)
return random.choice(days[day_of_week])
response = cached_response(day, update.message.chat_id)
bot.sendMessage(chat_id=update.message.chat_id, text=response)
"""
| StarcoderdataPython |
207872 | <reponame>ryanwersal/pyinfra<gh_stars>1-10
from __future__ import division
import math
import os
import platform
import sys
from collections import deque
from contextlib import contextmanager
from threading import Event, Thread
from time import sleep
import pyinfra
IS_WINDOWS = platform.system() == 'Windows'
WAIT_TIME = 1 / 5
WAIT_CHARS = deque(('-', '/', '|', '\\'))
# Hacky way of getting terminal size (so can clear lines)
# Source: http://stackoverflow.com/questions/566746
IS_TTY = sys.stdout.isatty() and sys.stderr.isatty()
TERMINAL_WIDTH = 0
if IS_TTY:
try:
TERMINAL_WIDTH = os.get_terminal_size().columns
except AttributeError:
if not IS_WINDOWS:
terminal_size = os.popen('stty size', 'r').read().split()
if len(terminal_size) == 2:
TERMINAL_WIDTH = int(terminal_size[1])
def _print_spinner(stop_event, progress_queue):
if not IS_TTY or os.environ.get('PYINFRA_PROGRESS') == 'off':
return
progress = ''
text = ''
while True:
# Stop when asked too
if stop_event.is_set():
break
WAIT_CHARS.rotate(1)
try:
progress = progress_queue[-1]
except IndexError:
pass
text = ' {0}'.format(
' '.join((WAIT_CHARS[0], progress)),
)
text = '{0}\r'.format(text)
sys.stdout.write(text)
sys.stdout.flush()
# In pyinfra_cli's __main__ we set stdout & stderr to be line buffered,
# so write this escape code (clear line) into the buffer but don't flush,
# such that any next print/log/etc clear the line first.
if not IS_WINDOWS:
sys.stdout.write('\033[K')
sys.stderr.write('\033[K')
sleep(WAIT_TIME)
@contextmanager
def progress_spinner(items, prefix_message=None):
# If there's no pseudo state we're not in CLI mode, so just return a noop
# handler and exit.
if not pyinfra.is_cli:
yield lambda complete_item: None
return
if not isinstance(items, set):
items = set(items)
total_items = len(items)
stop_event = Event()
def make_progress_message(include_items=True):
message_bits = []
# If we only have 1 item, don't show %
if total_items > 1:
percentage_complete = 0
complete = total_items - len(items)
percentage_complete = int(math.floor(complete / total_items * 100))
message_bits.append('{0}% ({1}/{2})'.format(
percentage_complete,
complete,
total_items,
))
if prefix_message:
message_bits.append(prefix_message)
if include_items and items:
# Plus 3 for the " - " joining below
message_length = sum((len(message) + 3) for message in message_bits)
# -8 for padding left+right, -2 for {} wrapping
items_allowed_width = TERMINAL_WIDTH - 10 - message_length
if items_allowed_width > 0:
items_string = '{%s}' % (', '.join('{0}'.format(i) for i in items))
if len(items_string) >= items_allowed_width:
items_string = '%s...}' % (
# -3 for the ...
items_string[:items_allowed_width - 3],
)
message_bits.append(items_string)
return ' - '.join(message_bits)
progress_queue = deque((make_progress_message(),))
def progress(complete_item):
if complete_item not in items:
raise ValueError('Invalid complete item: {0} not in {1}'.format(
complete_item, items,
))
items.remove(complete_item)
progress_queue.append(make_progress_message())
# Kick off the spinner thread
spinner_thread = Thread(
target=_print_spinner,
args=(stop_event, progress_queue),
)
spinner_thread.daemon = True
spinner_thread.start()
# Yield allowing the actual code the spinner waits for to run
yield progress
# Finally, stop the spinner
stop_event.set()
spinner_thread.join()
| StarcoderdataPython |
5197397 | # coding: utf-8
"""
This file was created by Backlog APIGenerator
"""
from __future__ import unicode_literals, absolute_import
from deprecated import deprecated
from BacklogPy.base import BacklogBase
class Statuses(BacklogBase):
def __init__(self, space_id, api_key):
super(Statuses, self).__init__(space_id, api_key)
@deprecated(reason="This API has been deprecated and is no longer recommended for use. Please replace it with Get Status List of Project.https://developer.nulab.com/docs/backlog/api/2/get-status-list-of-project/")
def get_status_list(self):
"""
Returns list of statuses. ※ Deprecated API. https://developer.nulab.com/docs/backlog/api/2/get-status-list-of-project/
:return: requests Response object
:rtype: requests.Response
"""
return self._request('/statuses', method='GET')
| StarcoderdataPython |
9677270 | <filename>mplop/__init__.py
from .mplop import show
from .mplop import figure
__all__ = ["mplop"]
| StarcoderdataPython |
6695356 | # this file is supposed to show the fibonacci sequence only with recursion.
# for a more user friendly presentation of the fibonacci sequence there is 'src/fibonacci_sequence.py'
# only functionality and error handling is to be expected from this file
def fibonacci_recursion(n):
if n < 2:
return n
if n == 2:
return 1
return fibonacci_recursion(n-1) + fibonacci_recursion(n-2)
def main():
n = 0
while n != -1:
try:
n = int(input('enter N of the fibonacci sequence '))
if n == -1:
continue
except:
print('you must put a number between 0 and above')
else:
print(f'F[{n}]= {fibonacci_recursion(n)}')
if __name__ == '__main__':
main() | StarcoderdataPython |
5166591 | #!/usr/bin/env python3
# 2019-5-5
class Spice:
def __init__(self, name, price, quantity):
self.__name = name
self.__price = price
self.__quantity = quantity
def __str__(self):
# return f'name={self.name}; price={self.price}; quantity={self.quantity};'
return self.name
def repr(self):
return self.name
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def price(self):
return self.__price
@price.setter
def price(self, price):
self.__price = price
@property
def quantity(self):
return self.__quantity
@quantity.setter
def quantity(self, quantity):
self.__quantity = quantity | StarcoderdataPython |
3220407 | <gh_stars>0
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World"
@app.route("/")
def create():
return "create"
@app.route("/")
def update():
return "update"
@app.route("/")
def remove():
return "remove"
@app.route("/")
def get():
return "get"
if __name__ == "__main__":
app.run() | StarcoderdataPython |
3300968 |
from sklearn import model_selection
from sklearn.tree import DecisionTreeClassifier
from ml_factory.helper import Helper
class DecisionTreeClassifierService:
def __init__(self, url, columns, scoring):
self._url = url
self._columns = columns
self._scoring = scoring
def predict(self):
print('Preparing the data')
s1 = Helper()
self._X, self._Y = s1.fetch_data(self._url, self._columns)
print('Predicting with Decision Tree Classifier')
kfold = model_selection.KFold(n_splits=10)
cv_results = model_selection.cross_val_score(DecisionTreeClassifier(), self._X, self._Y, cv=kfold, scoring=self._scoring)
modelScore = {}
modelScore['Algorithm'] = 'Decision Tree Classifier'
modelScore['Mean Accuracy'] = cv_results.mean()
modelScore['Standard deviation Accuracy'] = cv_results.std()
return modelScore
class DecisionTreeClassifierServiceBuilder:
def __init__(self):
self._instance = None
def __call__(self, source_url, names, scoring, **_ignored):
if not self._instance:
self._instance = DecisionTreeClassifierService(source_url, names, scoring)
return self._instance
| StarcoderdataPython |
11272425 | import requests
from bs4 import BeautifulSoup
import http.cookiejar as HC
import json
import html
session = requests.session()
session.cookies = HC.LWPCookieJar(filename='secret/cookies')
try:
session.cookies.load(ignore_discard=True)
except:
pass
def detectInfo(nextpage=False):
error_count = 0
page = 1 if not nextpage else 2
while error_count <= 1:
try:
csrf = session.cookies._cookies['info2021.tsinghua.edu.cn']['/']['XSRF-TOKEN'].value
url = f'https://info2021.tsinghua.edu.cn/b/info/xxfb_fg/xnzx/template/more?oType=mr&lmid=all&lydw=¤tPage={page}&length=30&_csrf={csrf}'
res = session.post(url, timeout=(5, 10)).text
res = json.loads(res)
assert res['result'] == 'success'
except Exception as e:
error_count += 1
session.get(
'https://info2021.tsinghua.edu.cn/f/info/xxfb_fg/xnzx/template/more?lmid=all', timeout=(5, 10))
session.cookies.save(ignore_discard=True)
continue
news = []
for x in res['object']['dataList']:
news.append({
'title': html.unescape(x['bt']),
'source': x['dwmc_show'],
'date': x['time'].split(' ')[0].replace('-', '.'),
'url': f'https://info2021.tsinghua.edu.cn' + x['url']})
if len(news) == 0:
raise Exception(url)
return news
raise Exception('error in detectInfo')
def detectInfoAcademic(nextpage=False):
if nextpage:
return []
error_count = 0
while error_count <= 1:
try:
csrf = session.cookies._cookies['info2021.tsinghua.edu.cn']['/']['XSRF-TOKEN'].value
url = f'https://info2021.tsinghua.edu.cn/b/hdrc_fg/api/xxfb?_csrf={csrf}'
res = session.post(url, timeout=(5, 10)).text
res = json.loads(res)
assert res['result'] == 'success'
except Exception:
error_count += 1
session.get(
'https://info2021.tsinghua.edu.cn/f/info/xxfb_fg/xnzx/template/more?lmid=all', timeout=(5, 10))
session.cookies.save(ignore_discard=True)
continue
news = []
for x in res['object']['lm_1']['hdrcList']:
news.append({
'title': html.unescape(x['bt']),
'source': res['object']['lm_1']['hdlxmc'],
'date': x['hdrq'].replace('-', '.'),
'url': f'https://info2021.tsinghua.edu.cn' + x['url']})
for x in res['object']['lm_2']['hdrcList']:
news.append({
'title': html.unescape(x['bt']),
'source': res['object']['lm_2']['hdlxmc'],
'date': x['hdrq'].replace('-', '.'),
'url': f'https://info2021.tsinghua.edu.cn' + x['url']})
if len(news) == 0:
raise Exception(url)
return news
raise Exception('error in detectInfoAcademic')
def detectLibrary(url, nextpage=False):
html = requests.get(url, timeout=(5, 10)).content
bs = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
if nextpage:
__url = bs.select(
'body > div.main > div > div > div > span.p_pages > span.p_next.p_fun > a')
if len(__url) == 0:
return []
url = url[::-1].split('/', 1)[1][::-1] + '/' + __url[0].get('href')
html = requests.get(url, timeout=(5, 10)).content
bs = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
content = bs.select(
'body > div.main > div > div > ul > li > div.notice-list-tt > a')
bs_date = bs.select(
'body > div.main > div > div > ul > li > div.notice-date')
source = bs.select('body > div.ban > h3')[0].get_text().strip()
news = []
for each, date in zip(content, bs_date):
news.append({
'title': each.get_text(),
'source': '图书馆'+source,
'date': date.get_text().strip()[:10].replace('/', '.'),
'url': 'https://lib.tsinghua.edu.cn/'+each.get('href').replace('../', '')})
if len(news) == 0:
raise Exception(url)
return news
def detectMyhome(nextpage=False):
url = 'http://myhome.tsinghua.edu.cn/Netweb_List/News_notice.aspx'
if nextpage:
url += '?page=2'
html = requests.get(url, timeout=(5, 10)).content
bs = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
content = bs.select(
'table > tr > td:nth-child(2) > div > div.blueline.margin5 > div > table > tr > td:nth-child(2) > a')
dates = bs.select(
'table > tr > td:nth-child(2) > div > div.blueline.margin5 > div > table > tr > td:nth-child(3)')[1:]
sources = bs.select(
'table > tr > td:nth-child(2) > div > div.blueline.margin5 > div > table > tr > td:nth-child(4)')[1:]
news = []
for each, date, source in zip(content, dates, sources):
news.append({
'title': each.get('title').strip(),
'source': source.get_text().strip(),
'date': date.get_text().strip()[:10].replace('-', '.'),
'url': 'http://myhome.tsinghua.edu.cn/Netweb_List/'+each.get('href')})
if len(news) == 0:
raise Exception(url)
return news
def detectNews(nextpage=False):
url = 'https://www.tsinghua.edu.cn/news/zxdt.htm'
html = requests.get(url, timeout=(5, 10)).content
bs = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
if nextpage:
__url = bs.select(
'body > div.rem12 > div.left > div.fanye.pcfyt > ul > div > span.p_pages > span.p_next.p_fun > a')[0]
url = url[::-1].split('/', 1)[1][::-1] + '/' + __url.get('href')
html = requests.get(url, timeout=(5, 10)).content
bs = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
titles = bs.select(
'body > div.rem12 > div.left > ul > li > a > div.tit > p')
dates = bs.select('body > div.rem12 > div.left > ul > li > a > div.sj')
urls = bs.select('body > div.rem12 > div.left > ul > li > a')
news = []
for title, date, url in zip(titles, dates, urls):
_date = date.get_text().strip().split('\n')
news.append({
'title': title.get_text().strip(),
'source': '清华新闻网',
'date': _date[1] + '.' + _date[0],
'url': url.get('href').replace('../', 'http://www.tsinghua.edu.cn/')})
if len(news) == 0:
raise Exception(url)
return news
def detectOffice(url, nextpage=False):
if nextpage:
url += '&pageno=2'
html = requests.get(url, timeout=(5, 10)).content
bs = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
source = bs.select(
'body > table:nth-child(2) > tr > td > table > tr > td:nth-child(2)')[0].get_text().strip()
content = bs.select(
'body > table:nth-child(3) > tr:nth-child(2) > td > table > tr > td:nth-child(2) > table > tr:nth-child(1) > td > table:nth-child(2) > tr > td:nth-child(2) > a')
titles = [x.get('title') for x in content]
urls = [x.get('href') for x in content]
dates = bs.select(
'body > table:nth-child(3) > tr:nth-child(2) > td > table > tr > td:nth-child(2) > table > tr:nth-child(1) > td > table:nth-child(2) > tr > td:nth-child(2) > font')
news = []
for title, date, url in zip(titles, dates, urls):
date = date.get_text().strip().replace('-', '.')[1:-1]
if date != '':
news.append({
'title': title,
'source': source,
'date': date,
'url': 'http://xxbg.cic.tsinghua.edu.cn/oath/' + url})
if len(news) == 0:
raise Exception(url)
return news
| StarcoderdataPython |
1754516 | import enum
from numpy import e, nested_iters, printoptions
from numpy.lib.npyio import load
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_iris
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
import csv
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
TRAIN_PATH = './Processed_Data/one_hot_Data/training_one_hot.csv'
TEST_PATH = './Processed_Data/one_hot_Data/test_one_hot.csv'
def load_training_data():
X = []
y = []
with open(TRAIN_PATH, 'r') as f:
f_csv = csv.reader(f)
next(f_csv)
for line in f_csv:
X.append(line[:-1])
y.append(line[-1])
return (X, y)
def load_test_data():
res = []
with open(TEST_PATH, 'r') as f:
f_csv = csv.reader(f)
next(f_csv)
for line in f_csv:
res.append(line)
return res
X, y = load_training_data()
clf = LogisticRegression(random_state=0, max_iter=1000).fit(X, y)
print(clf.score(X, y))
test_data = load_test_data()
predictions = clf.predict(test_data)
# store into the result
pre_res = [['ID', 'Prediction']]
for index, value in enumerate(predictions):
pre_res.append([index+1, value])
with open('./Results/logistic.csv', 'w') as f:
csv_f = csv.writer(f, lineterminator='\n')
csv_f.writerows(pre_res)
| StarcoderdataPython |
11250319 | # -*- coding: utf-8 -*-
# @Author: anh-tuan.vu
# @Date: 2021-01-27 07:50:00
# @Last Modified by: anh-tuan.vu
# @Last Modified time: 2021-01-27 20:03:29
import vtt2text
if __name__ == '__main__':
filepath = "files/transports_en_commun.vtt"
# get clean content
content = vtt2text.clean(filepath)
print(content)
# save clean content to text file
print()
vtt2text.to_file(filepath) | StarcoderdataPython |
11389294 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
l1tStage2CaloLayer2DEClientSummary = DQMEDHarvester("L1TStage2CaloLayer2DEClientSummary",
monitorDir = cms.untracked.string('L1TEMU/L1TStage2CaloLayer2/L1TdeStage2CaloLayer2')
)
| StarcoderdataPython |
11366544 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
# DEPRECATED MODULE!!!!
# This module is deprecated avoid to use anything from it.
# Use sardana.release module instead
####
"""Release data for the Spock project.
"""
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'spock'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '1'
#version = '0.8.1.svn.r' + revision.rstrip('M')
version = '2.1.1'
description = "An enhanced interactive Macro Server shell."
long_description = \
"""
Spock provides an interactive environment for interacting with the Tango
MacroServer Device. It is completely based on IPython which itself provides a
replacement for the interactive Python interpreter with extra functionality.
"""
license = 'GNU'
authors = {'Tiago': ('<NAME>', '<EMAIL>'),
'Reszela': ('<NAME>', '<EMAIL>'),
'Pascual-Izarra': ('<NAME>', '<EMAIL>')}
url = ''
download_url = ''
platforms = ['Linux', 'Windows XP/2000/NT', 'Windows 95/98/ME']
keywords = ['Sardana', 'Interactive', 'MacroServer', 'Tango', 'Shell']
| StarcoderdataPython |
12800552 | from alpha_vantage.timeseries import TimeSeries
from numpy.lib.index_tricks import _diag_indices_from
from pandas.core.frame import DataFrame
from sqlalchemy import create_engine
from urllib.parse import quote
from datetime import date
import mysql.connector
import pymysql
import pandas as pd
import pandas as pd
import requests
import calendar
import xlwt
import csv
class Alpha_VantageAPI(object):
def __init__(self, apikey : str):
self.apikey = apikey
# Get Stock Information
# daily stock price
def GetDailyStockPrice(self, stock_id : str) -> DataFrame:
ts = TimeSeries( key = self.apikey )
#data, meta_data = ts.get_daily_adjusted(stock_id, outputsize='full')
data, meta_data = ts.get_daily(stock_id, outputsize='full')
symbol_df = pd.DataFrame.from_dict( data, orient = 'index' )
symbol_df = symbol_df.apply(pd.to_numeric)
symbol_df.index = pd.to_datetime( symbol_df.index )
#symbol_df.columns = [ 'open', 'high', 'low', 'close', 'adjusted_close', 'volume', 'dividend_amt','split_coef' ]
symbol_df.columns = [ 'open', 'high', 'low', 'close', 'volume' ]
symbol_df = symbol_df.sort_index( ascending = False )
#symbol_df = symbol_df.drop('split_coef', axis = 1)
symbol_df = symbol_df.rename_axis('datetime').reset_index()
col_name = symbol_df.columns.tolist()
col_name.insert(0,'symbol')
symbol_df = symbol_df.reindex(columns=col_name)
symbol_df['symbol'] = stock_id
return symbol_df
# weekly stock price
def GetWeeklyStockPrice(self, stock_id : str) -> DataFrame:
ts = TimeSeries( key = self.apikey )
data, meta_data = ts.get_weekly_adjusted(stock_id)
symbol_df = pd.DataFrame.from_dict( data, orient = 'index' )
symbol_df = symbol_df.apply(pd.to_numeric)
symbol_df.index = pd.to_datetime( symbol_df.index )
symbol_df.columns = [ 'open', 'high', 'low', 'close', 'adjusted_close', 'volume', 'dividend_amt' ]
symbol_df = symbol_df.sort_index( ascending = False )
symbol_df = symbol_df.rename_axis('datetime').reset_index()
col_name = symbol_df.columns.tolist()
col_name.insert(0,'symbol')
symbol_df = symbol_df.reindex(columns=col_name)
symbol_df['symbol'] = stock_id
return symbol_df
# monthly stock price
def GetMonthlyStockPrice(self, stock_id : str) -> DataFrame:
ts = TimeSeries( key = self.apikey )
data, meta_data = ts.get_monthly_adjusted(stock_id)
symbol_df = pd.DataFrame.from_dict( data, orient = 'index' )
symbol_df = symbol_df.apply(pd.to_numeric)
symbol_df.index = pd.to_datetime( symbol_df.index )
symbol_df.columns = [ 'open', 'high', 'low', 'close', 'adjusted_close', 'volume', 'dividend_amt' ]
symbol_df = symbol_df.sort_index( ascending = False )
symbol_df = symbol_df.rename_axis('datetime').reset_index()
col_name = symbol_df.columns.tolist()
col_name.insert(0,'symbol')
symbol_df = symbol_df.reindex(columns=col_name)
symbol_df['symbol'] = stock_id
return symbol_df
# intraday stock price - most recent 1 to 2 months data
def GetIntradayStockPrice(self, stock_id : str) -> DataFrame:
ts = TimeSeries( key = self.apikey )
data, meta_data = ts.get_intraday( stock_id, interval = '5min', outputsize = 'full')
symbol_df = pd.DataFrame.from_dict( data, orient = 'index' )
symbol_df = symbol_df.apply(pd.to_numeric)
symbol_df.index = pd.to_datetime( symbol_df.index )
symbol_df.columns = [ 'open', 'high', 'low', 'close', 'volume']
symbol_df = symbol_df.sort_index( ascending = False )
symbol_df = symbol_df.rename_axis('datetime').reset_index()
col_name = symbol_df.columns.tolist()
col_name.insert(0,'symbol')
symbol_df = symbol_df.reindex(columns=col_name)
symbol_df['symbol'] = stock_id
return symbol_df
# Get more stocks price - no more than 5 stocks due to API call limits
def GetMultiStockPrice(self, stocks_id : list) -> DataFrame:
df = pd.DataFrame()
function_use = input('Choose the stock price time interval you want (daily, weekly, monthly, intraday) : ')
if function_use.lower() == 'daily':
for stock in stocks_id:
df = df.append(self.GetDailyStockPrice(stock))
elif function_use.lower() == 'weekly':
for stock in stocks_id:
df = df.append(self.GetWeeklyStockPrice(stock))
elif function_use.lower() == 'monthly':
for stock in stocks_id:
df = df.append(self.GetMonthlyStockPrice(stock))
elif function_use.lower() == 'intraday':
for stock in stocks_id:
df = df.append(self.GetIntradayStockPrice(stock))
else:
print('We do not have this function to use')
# Check DataFrame empty
if df.empty:
return None
else:
return df
# Company Information
# Currency, GrossProfit in last 5 years - from 2016/12/31 to 2020/12/31, Total Revenue, NetIncome
def GetIncomeStatement(self, stock_id : str) -> DataFrame:
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {'function': 'INCOME_STATEMENT', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
data_annual = data['annualReports']
for dict in data_annual:
df = df.append(pd.DataFrame([dict]))
df_new = df.loc[:,['fiscalDateEnding','reportedCurrency','grossProfit', 'totalRevenue', 'netIncome']]
col_name = df_new.columns.tolist()
col_name.insert(0,'Symbol')
df_new = df_new.reindex(columns=col_name)
df_new['Symbol'] = stock_id
return df_new
# Symbol, Name, Exchange, Country, Sector, Industry, Fiscal year end, 52 Week high, 52 Week low, 50DayMovingAverage, 200DayMovingAverage,
def GetCompanyOverview(self, stock_id : str) -> DataFrame:
base_url = 'https://www.alphavantage.co/query?'
df_new = pd.DataFrame()
params = {'function': 'OVERVIEW', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
df = pd.DataFrame([data])
df_new = df.loc[:,['Symbol', 'Name','Exchange','Country', 'Sector', 'Industry', 'FiscalYearEnd', '52WeekHigh', '52WeekLow','50DayMovingAverage', '200DayMovingAverage']]
return df_new
# Symbol, Name, Exchange, AssetType, IPO Date, Delisting Date, Status
# This is the old version of function
def GetListingDelistingStatus_bkp(self) -> DataFrame:
CSV_URL ='https://www.alphavantage.co/query?function=LISTING_STATUS&apikey=' + self.apikey
data_lst = []
with requests.Session() as s:
download = s.get(CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
data_lst.append(row)
df = pd.DataFrame(columns=data_lst[0], data = data_lst[1:])
return df
# Symbol, Name, Exchange, AssetType, IPO Date, Delisting Date, Status
# This is the new version of function
def GetListingDelistingStatus(self) -> DataFrame:
CSV_URL ='https://www.alphavantage.co/query?function=LISTING_STATUS&apikey=' + self.apikey
r = requests.get(CSV_URL)
decoded_content = r.content.decode('utf-8')
df = pd.DataFrame()
for i in decoded_content.splitlines():
data_list = i.split(',')
df = df.append(pd.DataFrame(data_list).T, ignore_index=True)
df = df.rename(columns=df.iloc[0])
df = df.drop(df.index[0])
df.loc[(df["delistingDate"] == "null"), "delistingDate"] = "1970-01-01"
return df
# Symbol, Name, Type, Region, MarketOpen, MarketClose, Timezone, Currency, MatchScore
def GetSearchEndpoint(self, find_stock : str) -> DataFrame:
ts = TimeSeries( key = self.apikey )
data = ts.get_symbol_search(find_stock)
data = data[0]
df = pd.DataFrame()
for dict in data:
df = df.append(pd.DataFrame([dict]))
df.columns = ['Symbol', 'Name', 'Type', 'Region', 'MarketOpen', 'MarketClose', 'Timezone', 'Currency', 'MatchScore']
return df
# Find IPO companies in the next three months
# 'symbol', 'name', 'ipoDate', 'priceRangeLow', 'priceRangeHigh', 'currency', 'exchange'
# This is the old version
def FindIPOCalender_bkp(self) -> DataFrame:
CSV_URL = 'https://www.alphavantage.co/query?function=IPO_CALENDAR&apikey=' + self.apikey
data_lst = []
with requests.Session() as s:
download = s.get(CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
data_lst.append(row)
df = pd.DataFrame(columns=data_lst[0], data = data_lst[1:])
return df
# 'symbol', 'name', 'ipoDate', 'priceRangeLow', 'priceRangeHigh', 'currency', 'exchange'
# This is the new version
def FindIPOCalender(self) -> DataFrame:
CSV_URL = 'https://www.alphavantage.co/query?function=IPO_CALENDAR&apikey=' + self.apikey
r = requests.get(CSV_URL)
decoded_content = r.content.decode('utf-8')
df = pd.DataFrame()
for i in decoded_content.splitlines():
data_list = i.split(',')
df = df.append(pd.DataFrame(data_list).T, ignore_index=True)
df = df.rename(columns=df.iloc[0])
df = df.drop(df.index[0])
return df
# CSV file - Filter data
def CSV_Output(self, stock_id : str) -> DataFrame:
workbook = xlwt.Workbook()
workbook.add_sheet('Daily Price')
workbook.add_sheet('Weekly Price')
workbook.add_sheet('Monthly Price')
workbook.add_sheet('Intraday Price')
workbook.add_sheet('Income Statement Annual Reports')
workbook.add_sheet('Company Overview')
workbook.add_sheet('Search Endpoint Results')
workbook.add_sheet('US ListingDelisting Status')
workbook.add_sheet('IPO Calender')
workbook.save('Filter_Data.xlsx')
writer = pd.ExcelWriter('Filter_Data.xlsx', engine='xlsxwriter')
self.GetDailyStockPrice(stock_id).to_excel(writer, sheet_name='Daily Price')
self.GetWeeklyStockPrice(stock_id).to_excel(writer, sheet_name='Weekly Price')
self.GetMonthlyStockPrice(stock_id).to_excel(writer, sheet_name='Monthly Price')
self.GetIntradayStockPrice(stock_id).to_excel(writer, sheet_name='Intraday Price')
self.GetIncomeStatement(stock_id).to_excel(writer, sheet_name='Income Statement Annual Reports')
self.GetCompanyOverview(stock_id).to_excel(writer, sheet_name='Company Overview')
self.GetSearchEndpoint(stock_id).to_excel(writer, sheet_name='Search Endpoint Results')
self.GetListingDelistingStatus().to_excel(writer, sheet_name='US ListingDelisting Status')
self.FindIPOCalender().to_excel(writer, sheet_name='IPO Calender')
writer.save()
# CSV file - Original data
def GetDailyStockPrice_Original(self, stock_id : str) -> DataFrame:
ts = TimeSeries( key = self.apikey )
data, meta_data = ts.get_daily_adjusted(stock_id, outputsize='full')
symbol_df = pd.DataFrame.from_dict( data, orient = 'index' )
symbol_df = symbol_df.apply(pd.to_numeric)
symbol_df.index = pd.to_datetime( symbol_df.index )
symbol_df.columns = [ 'open', 'high', 'low', 'close', 'adjusted_close', 'volume', 'dividend_amt','split_coef' ]
symbol_df = symbol_df.sort_index( ascending = False )
symbol_df = symbol_df.rename_axis('Datetime').reset_index()
col_name = symbol_df.columns.tolist()
col_name.insert(0,'Symbol')
symbol_df = symbol_df.reindex(columns=col_name)
symbol_df['Symbol'] = stock_id
return symbol_df
def GetIncomeStatement_Original(self, stock_id : str) -> DataFrame:
base_url = 'https://www.alphavantage.co/query?'
df_annual = pd.DataFrame()
df_quarterly = pd.DataFrame()
params = {'function': 'INCOME_STATEMENT', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
data_annual = data['annualReports']
data_quarterly = data['quarterlyReports']
for dict_1 in data_annual:
df_annual = df_annual.append(pd.DataFrame([dict_1]))
col_name = df_annual.columns.tolist()
col_name.insert(0,'Symbol')
df_annual = df_annual.reindex(columns=col_name)
df_annual['Symbol'] = stock_id
for dict_2 in data_quarterly:
df_quarterly = df_quarterly.append(pd.DataFrame([dict_2]))
col_name = df_quarterly.columns.tolist()
col_name.insert(0,'Symbol')
df_quarterly = df_quarterly.reindex(columns=col_name)
df_quarterly['Symbol'] = stock_id
return df_annual, df_quarterly
def GetCompanyOverview_Original(self, stock_id : str) -> DataFrame:
base_url = 'https://www.alphavantage.co/query?'
df_new = pd.DataFrame()
params = {'function': 'OVERVIEW', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
df = pd.DataFrame([data])
return df
# Company overview combine with the IPO date information in the listing&delisting data
def Database_Center(self, stock_id : str) -> DataFrame:
df_income_statement = self.GetListingDelistingStatus()
df_company_overview = self.GetCompanyOverview_Original(stock_id)
df_company_overview = df_company_overview.loc[:,['Symbol', 'AssetType', 'Name', 'Exchange','Country', 'Sector', 'Industry']]
df_company_IPO_date = df_income_statement.loc[df_income_statement['symbol'] == stock_id]
df_company_overview['IpoDate'] = str(df_company_IPO_date['ipoDate'].values[0])
df_company_overview['DelistingDate'] = str(df_company_IPO_date['delistingDate'].values[0])
df_company_overview.loc[(df_company_overview["DelistingDate"] == "null"), "DelistingDate"] = "1970-01-01"
df_company_overview['Status'] = str(df_company_IPO_date['status'].values[0])
return df_company_overview
def CSV_Output_Original(self, stock_id : str) -> DataFrame:
workbook = xlwt.Workbook()
workbook.add_sheet('Daily Price')
workbook.add_sheet('Weekly Price')
workbook.add_sheet('Monthly Price')
workbook.add_sheet('Intraday Price')
workbook.add_sheet('Income Statement Annual')
workbook.add_sheet('Income Statement Quarterly')
workbook.add_sheet('Company Overview')
workbook.add_sheet('Search Endpoint Results')
workbook.add_sheet('US ListingDelisting Status')
workbook.add_sheet('IPO Calender')
workbook.save('Original_Data.xlsx')
df = self.GetIncomeStatement_Original(stock_id)
writer = pd.ExcelWriter('Original_Data.xlsx', engine='xlsxwriter')
self.GetDailyStockPrice_Original(stock_id).to_excel(writer, sheet_name='Daily Price')
self.GetWeeklyStockPrice(stock_id).to_excel(writer, sheet_name='Weekly Price')
self.GetMonthlyStockPrice(stock_id).to_excel(writer, sheet_name='Monthly Price')
self.GetIntradayStockPrice(stock_id).to_excel(writer, sheet_name='Intraday Price')
df[0].to_excel(writer, sheet_name='Income Statement Annual')
df[1].to_excel(writer, sheet_name='Income Statement Quarterly')
self.GetCompanyOverview_Original(stock_id).to_excel(writer, sheet_name='Company Overview')
self.GetSearchEndpoint(stock_id).to_excel(writer, sheet_name='Search Endpoint Results')
self.GetListingDelistingStatus().to_excel(writer, sheet_name='US ListingDelisting Status')
self.FindIPOCalender().to_excel(writer, sheet_name='IPO Calender')
writer.save()
# only for this project
class Info_Collected(object):
def __init__(self, apikey : str, api_port : classmethod) -> None:
self.apikey = apikey
self.api_port = api_port
def Get_Stock_Price(self, stock_id:str) -> DataFrame:
# Stock price
df_daily = self.api_port.GetDailyStockPrice(stock_id)
df_weekly = self.api_port.GetWeeklyStockPrice(stock_id)
df_monthly = self.api_port.GetMonthlyStockPrice(stock_id)
df_intraday = self.api_port.GetIntradayStockPrice(stock_id)
return df_daily, df_weekly, df_monthly, df_intraday
def Get_Stock_Status(self, stock_id:str) -> DataFrame:
# listing and delisting status
df_status = self.api_port.GetListingDelistingStatus()
return df_status
def Get_IPO_Calender(self) -> DataFrame:
# IPO list in next three months
df_ipo = self.api_port.FindIPOCalender()
return df_ipo
def Search_Endpoint_Backup(self, stock_id:str) -> DataFrame:
# Search endpoint data backup
df_search = self.api_port.GetSearchEndpoint(stock_id)
return df_search
def Database_Center(self, stock_id : str) -> DataFrame:
# Company overview combine with the IPO date information in the listing&delisting data
df_income_statement = self.api_port.GetListingDelistingStatus()
df_company_overview = self.api_port.GetCompanyOverview_Original(stock_id)
df_company_overview = df_company_overview.loc[:,['Symbol', 'AssetType', 'Name', 'Exchange','Country', 'Sector', 'Industry']]
df_company_IPO_date = df_income_statement.loc[df_income_statement['symbol'] == stock_id]
df_company_overview['IpoDate'] = str(df_company_IPO_date['ipoDate'].values[0])
df_company_overview['DelistingDate'] = str(df_company_IPO_date['delistingDate'].values[0])
df_company_overview.loc[(df_company_overview["DelistingDate"] == "null"), "DelistingDate"] = "1970-01-01"
df_company_overview['Status'] = str(df_company_IPO_date['status'].values[0])
return df_company_overview
| StarcoderdataPython |
1680867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tarfile
import pandas as pd
from libs.mppandas import mp_apply
# =============================================================================
# CLASSES
# =============================================================================
class Filter2Bands(object):
def __call__(self, df):
df["two_bands"] = df.ID.apply(self.extract)
return df
def extract(self, oid):
print("Filtering {}...".format(oid))
o_path = os.path.join('data', "lc", "{}.tar".format(oid))
try:
with tarfile.TarFile(o_path) as tfp:
tfp.getmember("./{}.I.dat".format(oid))
tfp.getmember("./{}.V.dat".format(oid))
except:
return pd.Series({"two_bands": False})
return pd.Series({"two_bands": True})
# =============================================================================
# FUNCTIONS
# =============================================================================
def main():
if os.path.exists("data/ogle3_2bc.pkl"):
return
filter2Bands = Filter2Bands()
df = pd.read_pickle("data/ogle3.pkl")
df2bands = mp_apply(df, filter2Bands)
df2bands.to_pickle("data/ogle3_2bc.pkl")
df2bands.to_pickle("data/ogle3_2bc.csv")
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
main()
| StarcoderdataPython |
11297986 | <filename>docs/ETC/Modes/S - Cone Scope/info.py<gh_stars>1-10
name = "S - Cone Scope"
description = "Line oscilloscope with angle"
knob1 = "X Position"
knob2 = "Angle"
knob3 = "Line Width"
knob4 = "Color"
released = "March 21 2017"
| StarcoderdataPython |
5045836 | <gh_stars>1-10
# coding: utf-8
#
import os
import sys
#
def list_uniq(item_s):
item_s_uniq = []
for item in item_s:
if item not in item_s_uniq:
item_s_uniq.append(item)
return item_s_uniq
# "exe" means executable, not just paths ending with ".exe"
def find_exe_paths(prog):
# 8f1kRCu
env_pathext = os.environ.get('PATHEXT', None)
# 4fpQ2RB
if not env_pathext:
# 9dqlPRg
return []
# 6qhHTHF
# Split into a list of extensions
ext_s = env_pathext.split(os.pathsep)
# 2pGJrMW
# Strip
ext_s = [x.strip() for x in ext_s]
# 2gqeHHl
# Remove empty.
# Must be done after the stripping at 2pGJrMW.
ext_s = [x for x in ext_s if x != '']
# 2zdGM8W
# Convert to lowercase
ext_s = [x.lower() for x in ext_s]
# 2fT8aRB
# Uniquify
ext_s = list_uniq(ext_s)
# 4ysaQVN
env_path = os.environ.get('PATH', None)
# 5gGwKZL
if not env_path:
# 7bVmOKe
# Go ahead with "dir_path_s" being empty
dir_path_s = []
else:
# 6mPI0lg
# Split into a list of dir paths
dir_path_s = env_path.split(os.pathsep)
# 5rT49zI
# Insert empty dir path to the beginning.
#
# Empty dir handles the case that "prog" is a path, either relative or
# absolute. See code 7rO7NIN.
dir_path_s.insert(0, '')
# 2klTv20
# Uniquify
dir_path_s = list_uniq(dir_path_s)
# 9gTU1rI
# Check if "prog" ends with one of the file extension in "ext_s".
#
# "ext_s" are all in lowercase, ensured at 2zdGM8W.
prog_lc = prog.lower()
prog_has_ext = prog_lc.endswith(tuple(ext_s))
# "endswith" requires tuple, not list.
# 6bFwhbv
exe_path_s = []
for dir_path in dir_path_s:
# 7rO7NIN
# Synthesize a path
if dir_path == '':
path = prog
else:
path = os.path.join(dir_path, prog)
# 6kZa5cq
# If "prog" ends with executable file extension
if prog_has_ext:
# 3whKebE
if os.path.isfile(path):
# 2ffmxRF
exe_path_s.append(path)
# 2sJhhEV
# Assume user has omitted the file extension
for ext in ext_s:
# 6k9X6GP
# Synthesize a path with one of the file extensions in PATHEXT
path_2 = path + ext
# 6kabzQg
if os.path.isfile(path_2):
# 7dui4cD
exe_path_s.append(path_2)
# 8swW6Av
# Uniquify
exe_path_s = list_uniq(exe_path_s)
# 7y3JlnS
return exe_path_s
#
def main():
# 9mlJlKg
# If not exactly one command argument is given
if len(sys.argv) != 2:
# 7rOUXFo
# Print program usage
usage = r"""Usage: aoikwinwhich PROG
#/ PROG can be either name or path
aoikwinwhich notepad.exe
aoikwinwhich C:\Windows\notepad.exe
#/ PROG can be either absolute or relative
aoikwinwhich C:\Windows\notepad.exe
aoikwinwhich Windows\notepad.exe
#/ PROG can be either with or without extension
aoikwinwhich notepad.exe
aoikwinwhich notepad
aoikwinwhich C:\Windows\notepad.exe
aoikwinwhich C:\Windows\notepad"""
print(usage)
# 3nqHnP7
return 1
#
assert len(sys.argv) == 2
# 9m5B08H
# Get executable name or path
prog = sys.argv[1]
# 8ulvPXM
# Find executable paths
exe_path_s = find_exe_paths(prog)
# 5fWrcaF
# If has found none
if not exe_path_s:
# 3uswpx0
return 2
# If has found some
else:
# 9xPCWuS
# Print result
print('\n'.join(exe_path_s))
# 4s1yY1b
return 0
#
assert 0
# 4zKrqsC
# Program entry
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
8155342 | import torch
import pydestruct.input
class Dict:
def __init__(self, words, unk=None, boundaries=False, pad=False, lower=False):
self._boundaries = boundaries
self._unk = unk
self._lower = lower
self._word_to_id = dict()
self._id_to_word = list()
if pad:
if "**pad**" in words:
raise RuntimeError("Pad is already in dict")
self.pad_index = self._add_word("**pad**")
if boundaries:
if "**bos**" in words or "**eos**" in words:
raise RuntimeError("Boundaries ids are already in dict")
self._bos = self._add_word("**bos**")
self._eos = self._add_word("**eos**")
if unk in words:
raise RuntimeError("UNK word exists in vocabulary")
if unk is not None:
self.unk_index = self._add_word(unk)
if lower:
words = set(w.lower() for w in words)
for word in words:
self._add_word(word)
# for internal use only!
def _add_word(self, word):
if self._lower:
word = word.lower()
id = len(self._id_to_word)
self._word_to_id[word] = id
self._id_to_word.append(word)
return id
def contains(self, word):
if self._lower:
word = word.lower()
return word in self._word_to_id
def word_to_id(self, word):
if self._lower:
word = word.lower()
if self._unk is not None:
return self._word_to_id.get(word, self.unk_index)
else:
return self._word_to_id[word]
def id_to_word(self, id):
return self._id_to_word[id]
def __len__(self):
return len(self._word_to_id)
def has_unk(self):
return self._unk is not None
def has_boundaries(self):
return self._boundaries
def bos_id(self):
return self._bos
def eos_id(self):
return self._eos
def build_dictionnaries(data, boundaries=False, char_boundaries=False, postag="tags"):
dict_labels = set()
dict_disc_labels = set()
dict_tags = set()
dict_words = set()
dict_chars = set()
for sentence in data:
dict_words.update(sentence["words"])
if postag in sentence:
dict_tags.update(sentence[postag])
for word in sentence["words"]:
dict_chars.update(word)
if "constituents" in sentence:
for cst in sentence["constituents"]:
if len(cst) == 3 or cst[2] < 0:
dict_labels.add(cst[0])
elif len(cst) == 5:
dict_disc_labels.add(cst[0])
else:
RuntimeError("Weird constituent: ", cst)
dict_chars = Dict(dict_chars, boundaries=char_boundaries)
dict_words = Dict(dict_words, unk="#UNK#", boundaries=boundaries, pad=True)
dict_tags = Dict(dict_tags, boundaries=boundaries, pad=True)
dict_labels = Dict(dict_labels)
dict_disc_labels = Dict(dict_disc_labels)
return {"chars": dict_chars, "words": dict_words, "labels": dict_labels, "disc_labels": dict_disc_labels, "tags": dict_tags}
| StarcoderdataPython |
9657169 | feat_settings = {
"orient": 18, # 9 for rbf
"pix_per_cell": 8,
"cell_per_block": 2,
"spatial_size": (16, 16),
"n_bins": 68
}
| StarcoderdataPython |
194654 | import numpy as np
from .other import clip_boxes
from .text_proposal_graph_builder import TextProposalGraphBuilder
class TextProposalConnector:
def __init__(self):
self.graph_builder=TextProposalGraphBuilder()
def group_text_proposals(self, text_proposals, scores, im_size):
graph=self.graph_builder.build_graph(text_proposals, scores, im_size)
return graph.sub_graphs_connected()
def fit_y(self, X, Y, x1, x2):
len(X)!=0
# if X only include one point, the function will get line y=Y[0]
if np.sum(X==X[0])==len(X):
return Y[0], Y[0]
p=np.poly1d(np.polyfit(X, Y, 1))
return p(x1), p(x2)
def get_text_lines(self, text_proposals, scores, im_size):
# tp=text proposal
tp_groups=self.group_text_proposals(text_proposals, scores, im_size)
text_lines=np.zeros((len(tp_groups), 5), np.float32)
for index, tp_indices in enumerate(tp_groups):
text_line_boxes=text_proposals[list(tp_indices)]
x0=np.min(text_line_boxes[:, 0])
x1=np.max(text_line_boxes[:, 2])
offset=(text_line_boxes[0, 2]-text_line_boxes[0, 0])*0.5
lt_y, rt_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0+offset, x1-offset)
lb_y, rb_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0+offset, x1-offset)
# the score of a text line is the average score of the scores
# of all text proposals contained in the text line
score=scores[list(tp_indices)].sum()/float(len(tp_indices))
text_lines[index, 0]=x0
text_lines[index, 1]=min(lt_y, rt_y)
text_lines[index, 2]=x1
text_lines[index, 3]=max(lb_y, rb_y)
text_lines[index, 4]=score
text_lines=clip_boxes(text_lines, im_size)
text_recs = np.zeros((len(text_lines), 9), np.float)
index = 0
for line in text_lines:
xmin,ymin,xmax,ymax=line[0],line[1],line[2],line[3]
text_recs[index, 0] = xmin
text_recs[index, 1] = ymin
text_recs[index, 2] = xmax
text_recs[index, 3] = ymin
text_recs[index, 4] = xmin
text_recs[index, 5] = ymax
text_recs[index, 6] = xmax
text_recs[index, 7] = ymax
text_recs[index, 8] = line[4]
index = index + 1
return text_recs
| StarcoderdataPython |
9642093 | <reponame>ONSdigital/ras-frontstage<filename>frontstage/views/account/account_survey_share.py
import json
import logging
from flask import flash, render_template, request
from flask import session as flask_session
from flask import url_for
from structlog import wrap_logger
from werkzeug.utils import redirect
from frontstage import app
from frontstage.common.authorisation import jwt_authorization
from frontstage.controllers import party_controller, survey_controller
from frontstage.controllers.party_controller import (
get_business_by_id,
get_list_of_business_for_party,
get_surveys_listed_against_party_and_business_id,
get_user_count_registered_against_business_and_survey,
register_pending_shares,
)
from frontstage.exceptions.exceptions import ShareSurveyProcessError
from frontstage.models import (
AccountSurveySelectBusinessForm,
AccountSurveyShareRecipientEmailForm,
ConfirmEmailChangeForm,
)
from frontstage.views.account import account_bp
logger = wrap_logger(logging.getLogger(__name__))
@account_bp.route("/share-surveys", methods=["GET"])
@jwt_authorization(request)
def share_survey_overview(session):
# 'share_survey_data' holds business and surveys selected for share
flask_session.pop("share_survey_data", None)
# 'share_survey_recipient_email_address' holds the recipient email address
flask_session.pop("share_survey_recipient_email_address", None)
# 'validation_failure_share_surveys_list' holds list of surveys which has failed max share validation
# this will be used to show red mark on UI
flask_session.pop("validation_failure_share_surveys_list", None)
# 'share_surveys_selected_list' holds list of surveys selected by user so that its checked in case of any error
flask_session.pop("share_surveys_selected_list", None)
return render_template("surveys/surveys-share/overview.html")
@account_bp.route("/share-surveys/business-selection", methods=["GET"])
@jwt_authorization(request)
def share_survey_business_select(session):
flask_session.pop("share_survey_recipient_email_address", None)
flask_session.pop("validation_failure_share_surveys_list", None)
flask_session.pop("share_surveys_selected_list", None)
form = AccountSurveySelectBusinessForm(request.values)
party_id = session.get_party_id()
businesses = get_list_of_business_for_party(party_id)
return render_template("surveys/surveys-share/business-select.html", businesses=businesses, form=form)
@account_bp.route("/share-surveys/business-selection", methods=["POST"])
@jwt_authorization(request)
def share_survey_post_business_select(session):
flask_session.pop("share_survey_data", None)
share_survey_business_selected = request.form.getlist("checkbox-answer")
if len(share_survey_business_selected) == 0:
flash("Select an answer")
return redirect(url_for("account_bp.share_survey_business_select"))
flask_session["share_survey_data"] = {k: [] for k in share_survey_business_selected}
return redirect(url_for("account_bp.share_survey_survey_select"))
@account_bp.route("/share-surveys/survey-selection", methods=["GET"])
@jwt_authorization(request)
def share_survey_survey_select(session):
party_id = session.get_party_id()
share_dict = {}
for business_id in flask_session["share_survey_data"]:
selected_business = get_business_by_id(business_id)
surveys = get_surveys_listed_against_party_and_business_id(business_id, party_id)
share_dict[selected_business[0]["id"]] = {"name": selected_business[0]["name"], "surveys": surveys}
error = request.args.get("error", "")
failed_surveys_list = flask_session.get("validation_failure_share_surveys_list")
selected_survey_list = flask_session.get("share_surveys_selected_list")
return render_template(
"surveys/surveys-share/survey-select.html",
share_dict=share_dict,
error=error,
failed_surveys_list=failed_surveys_list if failed_surveys_list is not None else [],
selected_survey_list=selected_survey_list if selected_survey_list is not None else [],
)
def validate_max_shared_survey(business_id: str, share_survey_surveys_selected: list):
"""
This is a validation for maximum user reached against a survey
param: business_id : business id str
param: share_survey_surveys_selected : selected business list
return:boolean
"""
is_valid = True
failed_surveys_list = []
for survey_selected in share_survey_surveys_selected:
logger.info(
"Getting count of users registered against business and survey",
business_id=business_id,
survey_id=survey_selected,
)
user_count = get_user_count_registered_against_business_and_survey(business_id, survey_selected, False)
if user_count > app.config["MAX_SHARED_SURVEY"]:
is_valid = False
failed_surveys_list.append(survey_selected)
flask_session["validation_failure_share_surveys_list"] = failed_surveys_list
return is_valid
def get_selected_businesses():
"""
This function returns list of business objects against selected business_ids in flask session
return: list
"""
selected_businesses = []
for business_id in flask_session["share_survey_data"]:
selected_businesses.append(get_business_by_id(business_id))
return selected_businesses
def set_surveys_selected_list(selected_businesses, form):
"""
This function sets the flask session key 'share_surveys_selected_list' with users selection
param: selected_businesses : list of businesses
param: form : request form
return:None
"""
flask_session.pop("share_surveys_selected_list", None)
share_surveys_selected_list = []
for business in selected_businesses:
share_surveys_selected_list.append(form.getlist(business[0]["id"]))
flask_session["share_surveys_selected_list"] = [item for sublist in share_surveys_selected_list for item in sublist]
def is_surveys_selected_against_selected_businesses(selected_businesses, form):
"""
This function validates if all selected business have survey selection and creates flash messages in case of
validation failures
param: selected_businesses : list of businesses
param: form : request form
return:boolean
"""
surveys_not_selected = False
for business in selected_businesses:
share_surveys_selected_against_business = form.getlist(business[0]["id"])
if len(share_surveys_selected_against_business) == 0:
flash("Select an answer", business[0]["id"])
surveys_not_selected = True
return surveys_not_selected
def is_max_share_survey_exceeded(selected_businesses, form):
"""
This function validates if selected surveys has not exceeded max share and creates flash messaged in case of
validation failures
param: selected_businesses : list of businesses
param: form : request form
return:boolean
"""
is_max_share_survey = False
for business in selected_businesses:
share_surveys_selected_against_business = form.getlist(business[0]["id"])
if not validate_max_shared_survey(business[0]["id"], share_surveys_selected_against_business):
flash(
"You have reached the maximum amount of emails you can enroll on one or more surveys", business[0]["id"]
)
is_max_share_survey = True
return is_max_share_survey
@account_bp.route("/share-surveys/survey-selection", methods=["POST"])
@jwt_authorization(request)
def share_survey_post_survey_select(session):
share_dictionary_copy = flask_session["share_survey_data"]
flask_session.pop("validation_failure_share_surveys_list", None)
selected_businesses = get_selected_businesses()
set_surveys_selected_list(selected_businesses, request.form)
# this is to accommodate multiple business survey selection error messages on UI.
# the validation needs to be carried out in two steps one all the surveys are selected
# second max share survey validation
if is_surveys_selected_against_selected_businesses(selected_businesses, request.form):
return redirect(url_for("account_bp.share_survey_survey_select", error="surveys_not_selected"))
if is_max_share_survey_exceeded(selected_businesses, request.form):
return redirect(url_for("account_bp.share_survey_survey_select", error="max_share_survey_exceeded"))
for business in selected_businesses:
share_surveys_selected_against_business = request.form.getlist(business[0]["id"])
share_dictionary_copy[business[0]["id"]] = share_surveys_selected_against_business
flask_session.pop("validation_failure_share_surveys_list", None)
flask_session.pop("share_surveys_selected_list", None)
flask_session.pop("share", None)
flask_session["share_survey_data"] = share_dictionary_copy
return redirect(url_for("account_bp.share_survey_email_entry"))
@account_bp.route("/share-surveys/recipient-email-address", methods=["GET"])
@jwt_authorization(request)
def share_survey_email_entry(session):
form = AccountSurveyShareRecipientEmailForm(request.values)
flask_session["share_survey_recipient_email_address"] = None
return render_template("surveys/surveys-share/recipient-email-address.html", form=form, errors=form.errors)
@account_bp.route("/share-surveys/recipient-email-address", methods=["POST"])
@jwt_authorization(request)
def share_survey_post_email_entry(session):
form = AccountSurveyShareRecipientEmailForm(request.values)
party_id = session.get_party_id()
respondent_details = party_controller.get_respondent_party_by_id(party_id)
if not form.validate() or respondent_details["emailAddress"].lower() == form.data["email_address"].lower():
if respondent_details["emailAddress"].lower() == form.data["email_address"].lower():
errors = {"email_address": ["You can not share surveys with yourself."]}
else:
errors = form.errors
return render_template("surveys/surveys-share/recipient-email-address.html", form=form, errors=errors)
flask_session["share_survey_recipient_email_address"] = form.data["email_address"]
return redirect(url_for("account_bp.send_instruction_get"))
@account_bp.route("/share-surveys/send-instruction", methods=["GET"])
@jwt_authorization(request)
def send_instruction_get(session):
email = flask_session["share_survey_recipient_email_address"]
share_dict = {}
for business_id in flask_session["share_survey_data"]:
selected_business = get_business_by_id(business_id)
surveys = []
for survey_id in flask_session["share_survey_data"][business_id]:
surveys.append(survey_controller.get_survey(app.config["SURVEY_URL"], app.config["BASIC_AUTH"], survey_id))
share_dict[selected_business[0]["id"]] = {"name": selected_business[0]["name"], "surveys": surveys}
return render_template(
"surveys/surveys-share/send-instructions.html",
email=email,
share_dict=share_dict,
form=ConfirmEmailChangeForm(),
)
def build_payload(respondent_id):
"""
This method builds payload required for the party endpoint to register new pending shares.
TODO: The logic should change for multiple business once the story is in play.
payload example:
{ pending_shares: [{
"business_id": "business_id"
"survey_id": "survey_id",
"email_address": "email_address",
"shared_by": "party_uuid"
},
{
"business_id": "business_id":
"survey_id": "survey_id",
"email_address": "email_address",
"shared_by": "party_uuid"
}]
}
"""
email = flask_session["share_survey_recipient_email_address"]
payload = {}
pending_shares = []
share_dictionary = flask_session["share_survey_data"]
for business_id in share_dictionary:
for survey in share_dictionary[business_id]:
pending_share = {
"business_id": business_id,
"survey_id": survey,
"email_address": email,
"shared_by": respondent_id,
}
pending_shares.append(pending_share)
payload["pending_shares"] = pending_shares
return json.dumps(payload)
@account_bp.route("/share-surveys/send-instruction", methods=["POST"])
@jwt_authorization(request)
def send_instruction(session):
form = ConfirmEmailChangeForm(request.values)
email = flask_session["share_survey_recipient_email_address"]
party_id = session.get_party_id()
respondent_details = party_controller.get_respondent_party_by_id(party_id)
if form["email_address"].data != email:
raise ShareSurveyProcessError("Process failed due to session error")
json_data = build_payload(respondent_details["id"])
response = register_pending_shares(json_data)
if response.status_code == 400:
flash(
"You have already transferred or shared these surveys with someone with this email address. They have 72 "
"hours to accept your request. If you have made an error then wait for the transfer/share to expire or "
"contact us.",
)
return redirect(url_for("account_bp.send_instruction_get"))
return render_template("surveys/surveys-share/almost-done.html")
@account_bp.route("/share-surveys/done", methods=["GET"])
@jwt_authorization(request)
def share_survey_done(session):
flask_session.pop("share", None)
flask_session.pop("share_survey_recipient_email_address", None)
return redirect(url_for("surveys_bp.get_survey_list", tag="todo"))
| StarcoderdataPython |
128361 | <reponame>PeterRydberg/rl_peg_solitaire
from .Actor import Actor
from .Critic import Critic
from game.PegGame import PegGame
import itertools
import matplotlib.pyplot as plt
class ReinforcementLearner:
def __init__(
self,
episodes,
game_settings,
critic_settings,
actor_settings
):
self.episodes = episodes
self.game_settings = game_settings
self.critic = Critic(
critic_settings["c_type"],
critic_settings["c_learning_rate"],
critic_settings["c_eligibility_decay"],
critic_settings["c_discount_factor"],
critic_settings["c_nn_layers"],
self.get_board_shape()
)
self.actor = Actor(
actor_settings["a_learning_rate"],
actor_settings["a_eligibility_decay"],
actor_settings["a_discount_factor"],
actor_settings["a_e_greediness"],
episodes
)
def train_model(self):
performance = []
# Iterate through all episodes
for episode in range(self.episodes):
# Initializes the episode game
current_game, board_state, legal_moves = self.init_game(
display_game=(episode+1) in self.game_settings["display_game"],
game_name=f'Episode {episode+1}'
)
# Initializes eligibilities at start of episode, using states
self.init_eligibilities(board_state, legal_moves)
actions_taken = []
# Loops until game is lost/won
while legal_moves:
# Handles new board state, if actor/critic needs inits
self.actor.handle_board_state(board_state, legal_moves)
self.critic.handle_board_state(board_state)
# Makes move and parses results
prev_state, reward, board_state, legal_moves = \
self.make_game_choice(
board_state, current_game, actions_taken
)
# Update critic temporal difference
temporal_diff = self.critic.calc_temp_diff(
reward,
board_state,
prev_state
)
# Updates critic values, actor policy and eligibilities
# for each state action pair
self.value_policy_update(
actions_taken,
temporal_diff
)
self.actor.increase_greediness(self.episodes)
performance.append(current_game.get_filled_holes())
return performance
# Handles updating of policy and critic values
def value_policy_update(self, actions_taken, temporal_diff):
self.critic.actions_update(actions_taken, temporal_diff)
self.actor.actions_update(actions_taken, temporal_diff)
# Make an action choice and parse the results
def make_game_choice(self, board_state, current_game, actions_taken):
# Get and make the next move
prev_state = board_state
prev_action = self.actor.get_move(board_state, training=True)
result = current_game.try_move(prev_action, return_reward=True)
actions_taken.append((prev_state, prev_action))
# Parse move result
reward, board_state, legal_moves = result
board_state = self.convert_flat_state_string(board_state)
return prev_state, reward, board_state, legal_moves
# Initializes eligibilities before each episode
def init_eligibilities(self, board_state, legal_moves):
# Reset all eligibilities before episode
self.critic.reset_eligibilities()
self.actor.reset_eligibilities()
# Converts the Peghole object state to bitstring (label)
def convert_flat_state_string(self, board_state):
state_string = ""
for peghole in list(itertools.chain(*board_state)):
if(peghole.content == "filled"):
state_string += "1"
elif(peghole.content == "empty"):
state_string += "0"
return state_string
# Displays the performance graph
def display_performance_graph(self, performance):
plt.plot(performance)
plt.ylabel('Amount of pegs left')
plt.xlabel('Episode number')
plt.show()
# Gets the board shape for critic input
def get_board_shape(self):
game = PegGame(
self.game_settings["board_type"],
self.game_settings["board_size"],
self.game_settings["initial_empty"],
self.game_settings["live_update_frequency"],
False,
None
)
return len(self.convert_flat_state_string(game.board.board_content))
# Initializes new game
def init_game(
self,
display_game=False,
game_name="Peg solitaire"
):
# Initializes new game using game settings
current_game = PegGame(
self.game_settings["board_type"],
self.game_settings["board_size"],
self.game_settings["initial_empty"],
self.game_settings["live_update_frequency"],
display_game,
game_name
)
# Gets initial board and move states
board_state = self.convert_flat_state_string(
current_game.get_board_state()
)
legal_moves = current_game.get_legal_moves(True)
return current_game, board_state, legal_moves
# Runs a single game using greedy on-policy strategy
def run_game(self):
self.actor.set_greedy() # Makes actor fully greedy
# Initializes the game
current_game, board_state, legal_moves = self.init_game(
True,
'Peg solitaire'
)
while legal_moves:
# Get and make the next move
action = self.actor.get_move(board_state, legal_moves)
result = current_game.try_move(action, return_reward=False)
# Parse move result
board_state, legal_moves = result
board_state = self.convert_flat_state_string(board_state)
| StarcoderdataPython |
1968233 | <gh_stars>0
# Experiment that generates several sets of networks of varying CH-divergence types
# then trains an msbm of a single type in a "consensus" type of way. Then we report the
# average rand_index and average entropy of the z variables, which are indicators of how well
# the algorithm is learning the true model.
import os, sys
import pickle
import pdb
import pandas as pd
from ggplot import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def main():
stats_url = os.path.join('stats', 'stats_' + 'one_net.pickle')
print("generating plots from: {}".format(stats_url))
statistics = pickle.load(open(stats_url, 'rb'), encoding='latin1')
#density plot of entropies
data = pd.DataFrame( statistics['entro_list'][0] , columns = ['Z_entropy'])
plot1 = ggplot(data, aes('Z_entropy')) + geom_density() + ggtitle("Density of Z entropies")
plot1_file = os.path.join('plots', 'plot_' + 'entropies.pdf')
plot1.save(plot1_file)
#line plot of ELBO
plt.plot(list(range(len(elbos))), statistics['elbo_seq'], '-o')
plt.xlabel(r'time, $t$', fontsize=12)
plt.ylabel(r'ELBO, $\mathfrac{L}$', fontsize=12)
plt.title(r"Evolution of Elbo", fontsize=16)
plt.grid(True)
plot2_file = os.path.join('plots', 'plot_' + 'elbos.pdf')
plt.savefig(plot2_file, format="svg")
#True vs Obtained Gamma, Pi in single rectangle:
Q = statistics['actual_gamma'].shape[0]
N = len(statistics['entro_list'][0])
data3 = np.zeros((N, N))
data4 = np.zeros((N, N))
for i in range(N): #rows
for j in range(N): #columns
q_order = list(i/N >= np.cumsum(statistics['actual_gamma']))
q = q_order.index(0)
q_prime_order = list(i/N >= np.cumsum(statistics['resulting_gamma']))
q_prime= q_prime_order.index(0)
r_order = list(j/N >= np.cumsum(statistics['actual_gamma']))
r_prime_order = list(j/N >= np.cumsum(statistics['resulting_gamma']))
r = r_order.index(0)
r_prime = r_prime_order.index(0)
data3[i,j] = statistics['actual_pi'][q,r]
data4[i,j] = statistics['resulting_pi'][q_prime, r_prime]
plt.imshow(data3, origin="upper", norm=colors.PowerNorm(0.5), cmap='Blues', interpolation='nearest')
plt.colorbar()
plt.title(r"Real edge probabilities, $\alpha$", fontsize= 16)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plot_file = os.path.join('plots', 'plot_' + 'actual_pi.svg')
plt.savefig(plot_file, format="svg")
plt.clf()
#and we plot the resulting ones
plt.imshow(data4, origin="upper", norm=colors.PowerNorm(0.5), cmap='Blues', interpolation='nearest')
plt.colorbar()
plt.title("Predicted edge probabilities", fontsize= 16)
plot_file = os.path.join('plots', 'plot_' + 'pred_pi.svg')
plt.savefig(plot_file, format="svg")
sys.exit()
if __name__ == '__main__':
main() | StarcoderdataPython |
3420651 | <reponame>PavelTkachen/lost
from datetime import datetime
#from py3nvml.py3nvml import *
import sys
from lost.db import model, state, dtype
import json
import lost
from lost.logic.pipeline import pipe_model
import os
import shutil
from lost.logic.file_man import FileMan
from lost.logic import anno_task as at_man
from lost.pyapi import script as script_api
import subprocess
from lost.logic import script as script_man
from lost.logic.anno_task import update_anno_task
from distutils import dir_util
import importlib
import traceback
import lost.logic.log
import logging
from celery.utils.log import get_task_logger
from celery import task
from lost.db.access import DBMan
from lost.logic.config import LOSTConfig
from lost.logic.pipeline.worker import WorkerMan, CurrentWorker
from lost.logic import email
class PipeEngine(pipe_model.PipeEngine):
def __init__(self, dbm, pipe, lostconfig):
'''
:type dbm: lost.db.access.DBMan
:type pipe: lost.db.model.Pipe
'''
super().__init__(dbm=dbm, pipe=pipe)
self.lostconfig = lostconfig #type: lost.logic.config.LOSTConfig
self.file_man = FileMan(self.lostconfig)
# self.logger = lost.logic.log.get_file_logger(
# 'Executor: {}'.format(self.lostconfig.env),
# self.file_man.app_log_path)
self.logger = get_task_logger(__name__)
def process_annotask(self, pipe_e):
anno_task = self.dbm.get_anno_task(pipe_element_id=pipe_e.idx)
if anno_task.state == state.AnnoTask.IN_PROGRESS or \
anno_task.state == state.AnnoTask.PAUSED:
if not at_man.has_annotation_in_iteration(self.dbm, anno_task.idx, pipe_e.iteration):
at_man.set_finished(self.dbm, anno_task.idx)
self.logger.warning('No Annotations have been requested for AnnoTask {}'\
.format(anno_task.idx))
self.logger.warning("%d: AnnoTask has been finished (ID: %d, Name: %s)"\
%(self.pipe.idx, anno_task.idx, anno_task.name))
# if pipe_e.anno_task.dtype == dtype.AnnoTask.MIA:
# if anno_task.progress is None:
# anno_task.progress = 0.0
# if anno_task.progress >= 100.0:
# anno_task.state = state.AnnoTask.FINISHED
# self.dbm.add(anno_task)
# pipe_e.state = state.PipeElement.FINISHED
# self.dbm.add(pipe_e)
# self.dbm.commit()
# print("%d: AnnoTask has been finished (ID: %d, Name: %s)"\
# %(self.pipe.idx, anno_task.idx, anno_task.name))
# else:
# return
# state = finished will be set in annotation tool
if anno_task.state == state.AnnoTask.PENDING:
anno_task.state = state.AnnoTask.IN_PROGRESS
self.dbm.save_obj(anno_task)
self.logger.info("%d: AnnoTask IN_PROGRESS (ID: %d, Name: %s)"\
%(self.pipe.idx, anno_task.idx, anno_task.name))
def __gen_run_cmd(self, program, pipe_e):
# script = self.dbm.get_script(pipe_e.script_id)
script_path = os.path.join(self.lostconfig.project_path, pipe_e.script.path)
cmd = self.lostconfig.py3_init + " && "
cmd += program + " " + script_path + " --idx " + str(pipe_e.idx)
return cmd
def make_debug_session(self, pipe_e):
debug_path = self.file_man.create_debug_path(pipe_element=pipe_e)
debug_file_path = os.path.join(debug_path, 'debug.sh')
# init = self.lostconfig.py3_init + '\n'
cmd = self.__gen_run_cmd('pudb3', pipe_e)
# script_content = init + cmd
script_content = cmd
with open(debug_file_path, 'w') as dfile:
dfile.write(script_content)
script_path = os.path.join(self.lostconfig.project_path, pipe_e.script.path)
dsession_str = "For DEBUG start: bash " + debug_file_path
dsession_str += "<br>If you want to EDIT go to: " + script_path
pipe_e.debug_session = dsession_str
self.dbm.save_obj(pipe_e)
self.logger.info('Created debug script: {}'.format(debug_file_path))
self.logger.info(pipe_e.debug_session)
def __release_loop_iteration(self, pipe_e):
pipe_e.loop.iteration += 1
self.logger.info('{}: Run loop with id {} in iteration {}'.format(self.pipe.idx,
pipe_e.loop.idx,
pipe_e.loop.iteration))
loop_pes = self.get_loop_pes(pipe_e.loop.pe_jump, pipe_e)
for pe in loop_pes:
pe.iteration += 1
pe.state = state.PipeElement.PENDING
if pe.dtype == dtype.PipeElement.ANNO_TASK:
pe.anno_task.state = state.AnnoTask.PENDING
elif pe.dtype == dtype.PipeElement.SCRIPT:
pe.progress = 0.0
elif pe.dtype == dtype.PipeElement.LOOP:
# Check for loop in loop case; Set iteration of all inner loops
# to zero.
if pe is not pipe_e:
pe.loop.iteration = 0
self.set_to_visit(pe)
self.dbm.add(pe)
def process_loop(self, pipe_e):
if pipe_e.loop.break_loop:
pipe_e.state = state.PipeElement.FINISHED
self.dbm.add(pipe_e)
self.logger.info('{}: Break loop with id {}'.format(self.pipe.idx,
pipe_e.loop.idx))
return
if pipe_e.loop.max_iteration is not None:
if pipe_e.loop.iteration is None:
pipe_e.loop.iteration = 0
if pipe_e.loop.iteration < pipe_e.loop.max_iteration-1:
self.__release_loop_iteration(pipe_e)
else:
pipe_e.state = state.PipeElement.FINISHED
self.logger.info('{}: Loop ({}) terminated. Max iterations = {}'\
.format(self.pipe.idx, pipe_e.loop.idx,
pipe_e.loop.max_iteration))
else:
self.__release_loop_iteration(pipe_e)
self.dbm.add(pipe_e)
def select_env_for_script(self, pipe_e):
'''Select an environment where the script should be executed'''
w_man = WorkerMan(self.dbm, self.lostconfig)
if pipe_e.script.envs is not None:
script_envs = json.loads(pipe_e.script.envs)
if len(script_envs) == 0:
return 'celery'
else:
script_envs = list()
return 'celery' # Return default queue
worker_envs = w_man.get_worker_envs()
for script_env in script_envs:
if script_env in worker_envs:
return script_env
self.logger.warning('No suitable env to execute script: {}'.format(pipe_e.script.path))
return None
def process_pipe_element(self):
pipe_e = self.get_next_element()
while (pipe_e is not None):
# if pipe_e is None:
# return
if pipe_e.dtype == dtype.PipeElement.SCRIPT:
if pipe_e.state != state.PipeElement.SCRIPT_ERROR:
# if pipe_e.is_debug_mode:
# pipe_e.state = state.PipeElement.IN_PROGRESS
# self.dbm.save_obj(pipe_e)
# self.make_debug_session(pipe_e)
# else:
if pipe_e.state == state.PipeElement.PENDING:
env = self.select_env_for_script(pipe_e)
if env is None:
return
celery_exec_script.apply_async(args=[pipe_e.idx], queue=env)
elif pipe_e.dtype == dtype.PipeElement.ANNO_TASK:
if pipe_e.state == state.PipeElement.PENDING:
update_anno_task(self.dbm, pipe_e.anno_task.idx)
try:
email.send_annotask_available(self.dbm, pipe_e.anno_task)
except:
msg = "Could not send Email. \n"
msg += traceback.format_exc()
self.logger.error(msg)
pipe_e.state = state.PipeElement.IN_PROGRESS
self.dbm.save_obj(pipe_e)
self.process_annotask(pipe_e)
elif pipe_e.dtype == dtype.PipeElement.DATASOURCE:
pipe_e.state = state.PipeElement.FINISHED
self.dbm.save_obj(pipe_e)
elif pipe_e.dtype == dtype.PipeElement.VISUALIZATION:
pipe_e.state = state.PipeElement.FINISHED
self.dbm.save_obj(pipe_e)
elif pipe_e.dtype == dtype.PipeElement.DATA_EXPORT:
pipe_e.state = state.PipeElement.FINISHED
self.dbm.save_obj(pipe_e)
elif pipe_e.dtype == dtype.PipeElement.LOOP:
self.process_loop(pipe_e)
self.dbm.commit()
pipe_e = self.get_next_element()
def process_pipeline(self):
try:
p = self.pipe
# print('Process pipe: {}'.format(self.pipe.name))
if p.is_locked is None:
p.is_locked = False
if not p.is_locked:
p.is_locked = True
self.dbm.save_obj(p)
else:
return
if p.state == state.Pipe.PENDING:
p.state = state.Pipe.IN_PROGRESS
self.dbm.save_obj(p)
self.process_pipe_element()
elif p.state == state.Pipe.IN_PROGRESS:
self.process_pipe_element()
elif p.state == state.Pipe.FINISHED:
return
elif p.state == state.Pipe.ERROR:
self.__report_error(p)
else:
raise Exception("Unknown PipeState!")
p.is_locked = False
self.dbm.save_obj(p)
except:
p.is_locked = False
self.dbm.save_obj(p)
raise
def get_next_element(self):
pe_wait = None
for candidate in self.get_to_visit():
if candidate is None:
if self.pipe_finished():
self.pipe.state = state.Pipe.FINISHED
self.pipe.timestamp_finished = datetime.now()
self.dbm.save_obj(self.pipe)
self.logger.info("%d: Task is finished (Name: %s)"%(self.pipe.idx,
self.pipe.name))
try:
email.send_pipeline_finished(self.pipe)
except:
msg = "Could not send Email. \n"
msg += traceback.format_exc()
self.logger.error(msg)
return None
else:
continue
else:
pe = self.check_candiate(candidate)
if pe is None:
continue
#If there is a loop under candidates, it should be executed as
#last possible element. Since a loop will set all other elements
#within the loop to pending when processed. So if the last element
#before the loop has subsequent elements. These elements would never
#be executed since the loop would set the last element in the loop
#to pending.
elif pe.dtype == dtype.PipeElement.LOOP:
pe_wait = pe
continue
else:
self.set_visited(pe)
return pe
return pe_wait
def pipe_finished(self):
for pe in self.get_final_pes():
if pe.state != state.PipeElement.FINISHED:
return False
return True
def check_candiate(self, candidate):
# If all prev elements are finished return candidate
for pe_prev in self.get_prev_pes(candidate):
if pe_prev is not None:
if pe_prev.state != state.PipeElement.FINISHED:
return None
# if pe_prev.state == state.PipeElement.FINISHED:
# if candidate.state == state.PipeElement.PENDING:
# return candidate
# elif candidate.dtype == dtype.PipeElement.ANNOTATION_TASK and\
# candidate.state == state.PipeElement.IN_PROGRESS:
# return candidate
else:
# if pe_prev is None and candidate.state == PENDING
if candidate.state == state.PipeElement.PENDING:
return candidate
return candidate
def __report_error(self, pipe):
for pipe_element in self.dbm.get_script_errors(pipe.idx):
# Send mail to inform user about script error.
try:
email.send_script_error(pipe, pipe_element)
pipe_element.error_reported = True
self.dbm.add(pipe_element)
self.dbm.commit()
except:
pipe_element.error_reported = True
pipe_element.error_msg += traceback.format_exc()
self.dbm.add(pipe_element)
self.dbm.commit()
def gen_run_cmd(program, pipe_e, lostconfig):
# script = self.dbm.get_script(pipe_e.script_id)
script_path = os.path.join(lostconfig.project_path, pipe_e.script.path)
cmd = lostconfig.py3_init + "\n"
cmd += program + " " + script_path + " --idx " + str(pipe_e.idx)
return cmd
@task
def celery_exec_script(pipe_element_id):
try:
# Collect context information for celery task
logger = get_task_logger(__name__)
lostconfig = LOSTConfig()
dbm = DBMan(lostconfig)
pipe_e = dbm.get_pipe_element(pipe_e_id=pipe_element_id)
worker = CurrentWorker(dbm, lostconfig)
if not worker.enough_resources(pipe_e.script):
logger.warning('Not enough resources! Rejected {} (PipeElement ID {})'.format(pipe_e.script.path, pipe_e.idx))
return
pipe_e.state = state.PipeElement.IN_PROGRESS
dbm.save_obj(pipe_e)
file_man = FileMan(lostconfig)
pipe = pipe_e.pipe
cmd = gen_run_cmd("pudb3", pipe_e, lostconfig)
debug_script_path = file_man.get_instance_path(pipe_e)
debug_script_path = os.path.join(debug_script_path, 'debug.sh')
with open(debug_script_path, 'w') as sfile:
sfile.write(cmd)
cmd = gen_run_cmd("python3", pipe_e, lostconfig)
start_script_path = file_man.get_instance_path(pipe_e)
start_script_path = os.path.join(start_script_path, 'start.sh')
with open(start_script_path, 'w') as sfile:
sfile.write(cmd)
p = subprocess.Popen('bash {}'.format(start_script_path), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
logger.info("{} ({}): Started script\n{}".format(pipe.name, pipe.idx, cmd))
worker.add_script(pipe_e, pipe_e.script)
out, err = p.communicate()
worker.remove_script(pipe_e, pipe_e.script)
if p.returncode != 0:
raise Exception(err.decode('utf-8'))
logger.info('{} ({}): Executed script successful: {}'.format(pipe.name,
pipe.idx, pipe_e.script.path))
dbm.close_session()
except:
pipe = pipe_e.pipe
logger.info('{} ({}): Exception occurred in script: {}'.format(pipe.name,
pipe.idx, pipe_e.script.path))
msg = traceback.format_exc()
logger.error(msg)
script_api.report_script_err(pipe_e, pipe, dbm, msg)
dbm.close_session()
| StarcoderdataPython |
11365232 | '''
Classes from the 'CVNLP' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
CVNLPTextDecodingContext = _Class('CVNLPTextDecodingContext')
CVNLPDecodingLexicon = _Class('CVNLPDecodingLexicon')
CVNLPCaptionRuntimeReplacements = _Class('CVNLPCaptionRuntimeReplacements')
CVNLPCaptionRuntimeExcludeGenderTrigger = _Class('CVNLPCaptionRuntimeExcludeGenderTrigger')
CVNLPCTCBeamState = _Class('CVNLPCTCBeamState')
CVNLPTextDecodingPath = _Class('CVNLPTextDecodingPath')
CVNLPCTCTextDecodingPath = _Class('CVNLPCTCTextDecodingPath')
CVNLPCaptionPerformance = _Class('CVNLPCaptionPerformance')
CVNLPCaptionPerformanceResult = _Class('CVNLPCaptionPerformanceResult')
CVNLPTokenIDConverter = _Class('CVNLPTokenIDConverter')
CVNLPTextDecoderUtilities = _Class('CVNLPTextDecoderUtilities')
CVNLPTextDecodingResult = _Class('CVNLPTextDecodingResult')
CVNLPTextDecodingResultCandidate = _Class('CVNLPTextDecodingResultCandidate')
CVNLPCaptionRuntimeParameters = _Class('CVNLPCaptionRuntimeParameters')
CVNLPTextDecoder = _Class('CVNLPTextDecoder')
CVNLPCTCTextDecoder = _Class('CVNLPCTCTextDecoder')
CVNLPTextDecodingConfiguration = _Class('CVNLPTextDecodingConfiguration')
CVNLPTextDecodingBeamSearchConfiguration = _Class('CVNLPTextDecodingBeamSearchConfiguration')
BundleHelper = _Class('BundleHelper')
CVNLPCaptionPostProcessingHandler = _Class('CVNLPCaptionPostProcessingHandler')
CVNLPLexiconCursors = _Class('CVNLPLexiconCursors')
CVNLPLexiconCursor = _Class('CVNLPLexiconCursor')
CVNLPActivationMatrix = _Class('CVNLPActivationMatrix')
CVNLPCaptionModelBase = _Class('CVNLPCaptionModelBase')
CVNLPVisionRequestHandler = _Class('CVNLPVisionRequestHandler')
CVNLPCaptionDecoderBlock = _Class('CVNLPCaptionDecoderBlock')
CVNLPCaptionDecoder = _Class('CVNLPCaptionDecoder')
CVNLPCaptionDecoderLSTM = _Class('CVNLPCaptionDecoderLSTM')
CVNLPCaptionDecoderTransformer = _Class('CVNLPCaptionDecoderTransformer')
CVNLPCaptionEncoder = _Class('CVNLPCaptionEncoder')
CVNLPCaptionEncoderTransformer = _Class('CVNLPCaptionEncoderTransformer')
CVNLPCaptionEncoderLSTM = _Class('CVNLPCaptionEncoderLSTM')
CVNLPLanguageResourceBundle = _Class('CVNLPLanguageResourceBundle')
CVNLPInformationStream = _Class('CVNLPInformationStream')
CVNLPDecodingLexicons = _Class('CVNLPDecodingLexicons')
CVNLPDecodingLanguageModel = _Class('CVNLPDecodingLanguageModel')
CVNLPCaptionSensitiveImageParameters = _Class('CVNLPCaptionSensitiveImageParameters')
CVNLPTextDecodingToken = _Class('CVNLPTextDecodingToken')
| StarcoderdataPython |
1704376 | <filename>2019/Day 13/13.py
from icc import ICC
ID_EMPTY = 0
ID_WALL = 1
ID_BLOCK = 2
ID_PADDLE = 3
ID_BALL = 4
def gen_output():
out = []
while True:
r = icc.run()
if r is None:
return out
out.append(r)
def play():
data = []
paddle_x, ball_x = 0, 0
inp = 0
max_score = 0
while True:
r = icc.run(inp)
if r is None:
return max_score
data.append(r)
if len(data) == 3:
x, y, tid = data
data = []
if x == -1 and y == 0:
max_score = max(tid, max_score)
if tid == ID_PADDLE:
paddle_x = x
elif tid == ID_BALL:
ball_x = x
if paddle_x > ball_x:
inp = -1
elif paddle_x < ball_x:
inp = 1
else:
inp = 0
with open("input.txt") as file:
icc = ICC(file.read())
# part 1
ids = gen_output()[2::3]
print(ids.count(ID_BLOCK))
# part 2
icc.restart()
icc.intcode[0] = 2 # play for free
score = play()
print(score) | StarcoderdataPython |
9732802 | <filename>pulse/uix/vtk/vtkSymbols.py<gh_stars>10-100
import vtk
import numpy as np
from pulse.uix.vtk.actor.actorArrow import ActorArrow
from pulse.uix.vtk.actor.actorSpring import ActorSpring
class vtkSymbols:
def __init__(self, project):
self.project = project
def getElasticLink(self, nodeA, nodeB):
source = vtk.vtkLineSource()
source.SetPoint1(nodeA.x, nodeA.y, nodeA.z)
source.SetPoint2(nodeB.x, nodeB.y, nodeB.z)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0/255,255/255,152/255)
return actor
def arrow(self, start, end):
base_length = self.project.preprocessor.structure_principal_diagonal/10
arrowSource = vtk.vtkArrowSource()
startPoint = start
endPoint = end
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070) # For testing.
normalizedX = [0] * 3
normalizedY = [0] * 3
normalizedZ = [0] * 3
vtk.vtkMath.Subtract(endPoint, startPoint, normalizedX)
length = vtk.vtkMath.Norm(normalizedX)
vtk.vtkMath.Normalize(normalizedX)
arbitrary = [0] * 3
for i in range(0, 3):
rng.Next()
arbitrary[i] = rng.GetRangeValue(-10, 10)
vtk.vtkMath.Cross(normalizedX, arbitrary, normalizedZ)
vtk.vtkMath.Normalize(normalizedZ)
vtk.vtkMath.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtk.vtkMatrix4x4()
matrix.Identity()
for i in range(0, 3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
transform = vtk.vtkTransform()
transform.Translate(startPoint)
transform.Concatenate(matrix)
#transform.Scale(length, length, length)
transform.Scale(self.project.get_element_size(), self.project.get_element_size(), self.project.get_element_size())
transformPD = vtk.vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(arrowSource.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
actor = vtk.vtkActor()
mapper.SetInputConnection(arrowSource.GetOutputPort())
actor.SetUserMatrix(transform.GetMatrix())
actor.SetMapper(mapper)
return actor
def getElementAxe(self, element):
center, direction = element.get_local_coordinate_system_info()
arrows = []
x = self.arrow(center, center+direction[0])
x.GetProperty().SetColor(1,0,0)
y = self.arrow(center, center+direction[1])
y.GetProperty().SetColor(0,1,0)
z = self.arrow(center, center+direction[2])
z.GetProperty().SetColor(0,0,1)
arrows.append(x)
arrows.append(y)
arrows.append(z)
return arrows
def getSpring(self, node, u_def=[]):
a = self.getReal(node.get_lumped_stiffness())
base_length = self.project.preprocessor.structure_principal_diagonal/10
element_length = self.project.get_element_size()
if base_length/10 < element_length*1.5:
shift = base_length/10
else:
shift = element_length*1.5
v = [1,2,3]
for i in range(0,3):
try:
if a[i] is None or a[i] == 0:
v[i] = 0
elif a[i] < 0:
v[i] = -1*v[i]
except Exception as log_error:
if isinstance(a[i], np.ndarray):
pass
else:
print(str(log_error))
if v.count(0) == 3:
return []
arrows = []
for i in range(3):
if v[i] == 0:
continue
b = ActorSpring(node, self.project.get_element_size(), base_length, xyz=v[i], u_def=u_def)
b.setShiftValue(shift)
b.setNormalizedColor([1,0,1])
b.setNormalizedColor([242/255,121/255,0])
b.build()
arrows.append(b.getActor())
return arrows
def getDamper(self, node, u_def=[]):
a = self.getReal(node.get_lumped_dampings())
base_length = self.project.preprocessor.structure_principal_diagonal/10
element_length = self.project.get_element_size()
if base_length/20 < element_length*1.5:
shift = base_length/20
else:
shift = element_length/2
v = [1,2,3]
for i in range(0,3):
try:
if a[i] is None or a[i] == 0:
v[i] = 0
elif a[i] < 0:
v[i] = -1*v[i]
except Exception as log_error:
if isinstance(a[i], np.ndarray):
pass
else:
print(str(log_error))
if v.count(0) == 3:
return []
arrows = []
for i in range(3):
if v[i] == 0:
continue
a = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i], u_def=u_def)
a.setNormalizedColor([242/255,121/255,0])
a.setNormalizedColor([1,0,1])
a.setShiftValue(shift)
a.removeTipLenght()
a.build()
arrows.append(a.getActor())
return arrows
def getArrowBC(self, node, u_def=[]):
a = self.getReal(node.getStructuralBondaryCondition())
base_length = self.project.preprocessor.structure_principal_diagonal/10
element_length = self.project.get_element_size()
if base_length/20 < element_length/2:
shift = base_length/20
else:
shift = element_length/2
v = [1,2,3]
for i in range(0,3):
try:
if a[i] is None:
v[i] = 0
elif a[i] < 0:
v[i] = -1*v[i]
except Exception as log_error:
if isinstance(a[i], np.ndarray):
pass
else:
print(str(log_error))
if v.count(0) == 3:
return []
arrows = []
for i in range(3):
if v[i] == 0:
continue
a = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i], u_def=u_def)
a.removeShaftRadius()
a.setNormalizedColor([0,1,0])
a.setShiftValue(shift)
a.build()
arrows.append(a.getActor())
return arrows
def getArrowForce(self, node):
a = self.getReal(node.get_prescribed_loads())
base_length = self.project.preprocessor.structure_principal_diagonal/10
element_length = self.project.get_element_size()
if base_length/20 < element_length/2:
shift = base_length/20
else:
shift = element_length/2
v = [1,2,3]
for i in range(0,3):
try:
if a[i] is None or a[i] == 0:
v[i] = 0
elif a[i] < 0:
v[i] = -1*v[i]
except Exception as log_error:
if isinstance(a[i], np.ndarray):
pass
else:
print(str(log_error))
if v.count(0) == 3:
return []
arrows = []
for i in range(3):
if v[i] == 0:
continue
a = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i])
a.setNormalizedColor([1,0,0])
a.setShiftValue(shift)
a.build()
arrows.append(a.getActor())
return arrows
def getArrowRotation(self, node):
a = self.getReal(node.getStructuralBondaryCondition())
base_length = self.project.preprocessor.structure_principal_diagonal/10
element_length = self.project.get_element_size()
if base_length/20 < element_length/2:
shift1 = base_length/20
shift2 = 2.7*shift1
else:
shift1 = element_length/2
shift2 = 6.5*shift1
v = [1,2,3]
for i in range(3,6):
try:
if a[i] is None or a[i] == 0:
v[i-3] = 0
elif a[i] < 0:
v[i-3] = -1*v[i-3]
except Exception as log_error:
if isinstance(a[i], np.ndarray):
pass
else:
print(str(log_error))
if v.count(0) == 3:
return []
arrows = []
for i in range(3):
if v[i] == 0:
continue
a = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i])
a.removeShaftRadius()
a.setNormalizedColor([0, 1, 1])
a.setShiftValue(shift1)
a.build()
arrows.append(a.getActor())
b = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i])
b.removeShaftRadius()
b.setNormalizedColor([0,1,1])
b.setShiftValue(shift2)
b.build()
arrows.append(b.getActor())
return arrows
def getArrowMomento(self, node):
a = self.getReal(node.get_prescribed_loads())
base_length = self.project.preprocessor.structure_principal_diagonal/10
element_length = self.project.get_element_size()
if base_length/20 < element_length/2:
shift1 = base_length/20
shift2 = 2.7*shift1
else:
shift1 = element_length/2
shift2 = 6.5*shift1
v = [1,2,3]
for i in range(3,6):
try:
if a[i] is None or a[i] == 0:
v[i-3] = 0
elif a[i] < 0:
v[i-3] = -1*v[i-3]
except Exception as log_error:
if isinstance(a[i], np.ndarray):
pass
else:
print(str(log_error))
if v.count(0) == 3:
return []
arrows = []
for i in range(3):
if v[i] == 0:
continue
a = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i])
a.setNormalizedColor([0,0,1])
a.setShiftValue(shift1)
a.build()
arrows.append(a.getActor())
b = ActorArrow(node, self.project.get_element_size(), base_length, xyz=v[i])
b.setNormalizedColor([0,0,1])
b.setShiftValue(shift2)
b.removeShaftRadius()
b.build()
arrows.append(b.getActor())
return arrows
def getReal(self, vector):
new_vector = vector.copy()
for i in range(len(vector)):
if type(vector[i]) == complex:
new_vector[i] = vector[i].real
return new_vector | StarcoderdataPython |
391297 | <filename>subgroup_analysis/WhiteSubset/run_RandomForest.py
import os
import sys
import numpy as np
import argparse
from easydict import EasyDict as edict
from tqdm import trange
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier as rfc
YOUR_PATH = os.environ['YOUR_PATH']
sys.path.insert(0, os.path.join(YOUR_PATH, 'fNIRS-mental-workload-classifiers/helpers'))
import models
import brain_data
from utils import SubgroupAnalysisWhite_GetTrainValTestSubjects, seed_everything, featurize, makedir_if_not_exist, plot_confusion_matrix, save_pickle, write_performance_info_FixedTrainValSplit
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--data_dir', default='../data/Leon/Visual/size_2sec_10ts_stride_3ts/', help='folder to the train data')
parser.add_argument('--window_size', default=200, type=int, help='window size')
parser.add_argument('--result_save_rootdir', default='./experiments', help='folder to the result')
parser.add_argument('--classification_task', default='four_class', help='binary or four-class classification')
parser.add_argument('--setting', default='seed1', help='which predefined train val test split scenario')
def train_classifier(args_dict, train_subjects, val_subjects, test_subjects_URG, test_subjects_WHITE, test_subjects_ASIAN):
#convert to string list
train_subjects = [str(i) for i in train_subjects]
val_subjects = [str(i) for i in val_subjects]
test_subjects_URG = [str(i) for i in test_subjects_URG]
test_subjects_WHITE = [str(i) for i in test_subjects_WHITE]
test_subjects_ASIAN = [str(i) for i in test_subjects_ASIAN]
#combined the test subjects
test_subjects = test_subjects_URG + test_subjects_WHITE + test_subjects_ASIAN
#parse args:
data_dir = args_dict.data_dir
window_size = args_dict.window_size
result_save_rootdir = args_dict.result_save_rootdir
classification_task = args_dict.classification_task
num_chunk_this_window_size = 1488
if classification_task == 'binary':
data_loading_function = brain_data.read_subject_csv_binary
confusion_matrix_figure_labels = ['0back', '2back']
# elif classification_task == 'four_class':
# data_loading_function = brain_data.read_subject_csv
# confusion_matrix_figure_labels = ['0back', '1back', '2back', '3back']
else:
raise NameError('not supported classification type')
#create the group train data
group_model_sub_train_feature_list = []
group_model_sub_train_label_list = []
for subject in train_subjects:
sub_feature, sub_label = data_loading_function(os.path.join(data_dir, 'sub_{}.csv'.format(subject)), num_chunk_this_window_size=num_chunk_this_window_size)
group_model_sub_train_feature_list.append(sub_feature)
group_model_sub_train_label_list.append(sub_label)
group_model_sub_train_feature_array = np.concatenate(group_model_sub_train_feature_list, axis=0).astype(np.float32)
group_model_sub_train_label_array = np.concatenate(group_model_sub_train_label_list, axis=0)
transformed_group_model_sub_train_feature_array = featurize(group_model_sub_train_feature_array, classification_task)
#create the group val data
group_model_sub_val_feature_list = []
group_model_sub_val_label_list = []
for subject in val_subjects:
sub_feature, sub_label = data_loading_function(os.path.join(data_dir, 'sub_{}.csv'.format(subject)), num_chunk_this_window_size=num_chunk_this_window_size)
group_model_sub_val_feature_list.append(sub_feature)
group_model_sub_val_label_list.append(sub_label)
group_model_sub_val_feature_array = np.concatenate(group_model_sub_val_feature_list, axis=0).astype(np.float32)
group_model_sub_val_label_array = np.concatenate(group_model_sub_val_label_list, axis=0)
transformed_group_model_sub_val_feature_array = featurize(group_model_sub_val_feature_array, classification_task)
#cross validation
max_features_list = [0.166, 0.333, 0.667, 0.1]
min_samples_leaf_list = [4, 16, 64]
for max_features in max_features_list:
for min_samples_leaf in min_samples_leaf_list:
experiment_name = 'MaxFeatures{}_MinSamplesLeaf{}'.format(max_features, min_samples_leaf)
#create test subjects dict
test_subjects_dict = dict()
for test_subject in test_subjects:
#load this subject's test data
sub_feature_array, sub_label_array = data_loading_function(os.path.join(data_dir, 'sub_{}.csv'.format(test_subject)), num_chunk_this_window_size=num_chunk_this_window_size)
sub_data_len = len(sub_label_array)
assert sub_data_len == int(num_chunk_this_window_size/2), 'subject {} len is not {} for binary classification'.format(test_subject, int(num_chunk_this_window_size/2))
half_sub_data_len = int(sub_data_len/2)
print('half_sub_data_len: {}'.format(half_sub_data_len), flush=True)
sub_test_feature_array = sub_feature_array[half_sub_data_len:]
transformed_sub_test_feature_array = featurize(sub_test_feature_array, classification_task)
sub_test_label_array = sub_label_array[half_sub_data_len:]
#create the dict for this subject:
#each subject's dict has: 'transformed_sub_test_feature_array', 'sub_test_label_array',
# 'resutl_save_subjectdir', 'resutl_save_subject_checkpointdir',
# 'result_save_subject_predictiondir', 'result_save_subject_resultanalysisdir'
# 'result_save_subject_trainingcurvedir', 'result_save_dir',
test_subjects_dict[test_subject] = dict()
test_subjects_dict[test_subject]['transformed_sub_test_feature_array'] = transformed_sub_test_feature_array
test_subjects_dict[test_subject]['sub_test_label_array'] = sub_test_label_array
#derived args
result_save_subjectdir = os.path.join(result_save_rootdir, test_subject, experiment_name)
result_save_subject_checkpointdir = os.path.join(result_save_subjectdir, 'checkpoint')
result_save_subject_predictionsdir = os.path.join(result_save_subjectdir, 'predictions')
result_save_subject_resultanalysisdir = os.path.join(result_save_subjectdir, 'result_analysis')
result_save_subject_trainingcurvedir = os.path.join(result_save_subjectdir, 'trainingcurve')
makedir_if_not_exist(result_save_subjectdir)
makedir_if_not_exist(result_save_subject_checkpointdir)
makedir_if_not_exist(result_save_subject_predictionsdir)
makedir_if_not_exist(result_save_subject_resultanalysisdir)
makedir_if_not_exist(result_save_subject_trainingcurvedir)
test_subjects_dict[test_subject]['result_save_subjectdir'] = result_save_subjectdir
test_subjects_dict[test_subject]['result_save_subject_checkpointdir'] = result_save_subject_checkpointdir
test_subjects_dict[test_subject]['result_save_subject_predictionsdir'] = result_save_subject_predictionsdir
test_subjects_dict[test_subject]['result_save_subject_resultanalysisdir'] = result_save_subject_resultanalysisdir
test_subjects_dict[test_subject]['result_save_subject_trainingcurvedir'] = result_save_subject_trainingcurvedir
test_subjects_dict[test_subject]['result_save_dict'] = dict()
#create Logistic Regression object
model = rfc(max_features=max_features, min_samples_leaf=min_samples_leaf).fit(transformed_group_model_sub_train_feature_array, group_model_sub_train_label_array)
# val performance
val_accuracy = model.score(transformed_group_model_sub_val_feature_array, group_model_sub_val_label_array) * 100
# test performance
for test_subject in test_subjects:
test_subjects_dict[test_subject]['result_save_dict']['bestepoch_val_accuracy'] = val_accuracy
test_accuracy = model.score(test_subjects_dict[test_subject]['transformed_sub_test_feature_array'], test_subjects_dict[test_subject]['sub_test_label_array']) * 100
test_logits = model.predict_proba(test_subjects_dict[test_subject]['transformed_sub_test_feature_array'])
test_class_predictions = test_logits.argmax(1)
test_subjects_dict[test_subject]['result_save_dict']['bestepoch_test_accuracy'] = test_accuracy
test_subjects_dict[test_subject]['result_save_dict']['bestepoch_test_logits'] = test_logits
test_subjects_dict[test_subject]['result_save_dict']['bestepoch_test_class_labels'] = test_subjects_dict[test_subject]['sub_test_label_array']
plot_confusion_matrix(test_class_predictions, test_subjects_dict[test_subject]['sub_test_label_array'], confusion_matrix_figure_labels, test_subjects_dict[test_subject]['result_save_subject_resultanalysisdir'], 'test_confusion_matrix.png')
save_pickle(test_subjects_dict[test_subject]['result_save_subject_predictionsdir'], 'result_save_dict.pkl', test_subjects_dict[test_subject]['result_save_dict'])
#write performance to txt file
write_performance_info_FixedTrainValSplit('NA', test_subjects_dict[test_subject]['result_save_subject_resultanalysisdir'], val_accuracy, test_accuracy)
if __name__=='__main__':
#parse args
args = parser.parse_args()
seed = args.seed
data_dir = args.data_dir
window_size = args.window_size
result_save_rootdir = args.result_save_rootdir
classification_task = args.classification_task
setting = args.setting
train_subjects, val_subjects, test_subjects_URG, test_subjects_WHITE, test_subjects_ASIAN = SubgroupAnalysisWhite_GetTrainValTestSubjects(setting)
#sanity check
print('data_dir: {}, type: {}'.format(data_dir, type(data_dir)))
print('window_size: {}, type: {}'.format(window_size, type(window_size)))
print('result_save_rootdir: {}, type: {}'.format(result_save_rootdir, type(result_save_rootdir)))
print('classification_task: {}, type: {}'.format(classification_task, type(classification_task)))
print('setting: {} type: {}'.format(setting, type(setting)))
args_dict = edict()
args_dict.data_dir = data_dir
args_dict.window_size = window_size
args_dict.result_save_rootdir = result_save_rootdir
args_dict.classification_task = classification_task
seed_everything(seed)
train_classifier(args_dict, train_subjects, val_subjects, test_subjects_URG, test_subjects_WHITE, test_subjects_ASIAN)
| StarcoderdataPython |
8086093 | <reponame>tirkarthi/python-cybox
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import u
from cybox.objects.win_hook_object import WinHook
from cybox.test.objects import ObjectTestCase
from cybox.test.objects.win_handle_test import TestWinHandle
class TestWinHook(ObjectTestCase, unittest.TestCase):
object_type = "WindowsHookObjectType"
klass = WinHook
_full_dict = {
'type': u("Test Hook"),
'handle': TestWinHandle._full_dict,
'hooking_function_name': u("test_function"),
#TODO: add 'hooking_module'
'thread_id': 2,
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9689778 | <filename>src/cogs/server_management/management_core.py<gh_stars>1-10
from __future__ import annotations
from typing import Optional
from src.single_guild_bot import SingleGuildBot as Bot
from src.custom_help_command import CommandWithDocs
from discord import Member
from discord.ext import commands, tasks
from discord.ext.commands import Context
import discord.errors
from src.cogs.access_levels import *
from .punishments import *
from .punishments import WarnPunishment
from src.collection_handlers import ActivePunishments, PunishmentRegistry
import bson
UNPUNISH_LOOP_DURATION_MINUTES = 1
class View:
def active(self, amounts: dict) -> discord.Embed:
return discord.Embed.from_dict(
{"title": "Amount of active punishments"} | self._fields_from_types(amounts)
)
def registry(self, amounts: dict) -> discord.Embed:
return discord.Embed.from_dict(
{"title": "Amount of punishments"} | self._fields_from_types(amounts)
)
def registry_per_member(self, amounts: dict, member: Member) -> discord.Embed:
embed = discord.Embed.from_dict(
{"title": f"Amount of punishments for user {member.display_name}"}
| self._fields_from_types(amounts)
)
embed.set_thumbnail(url=member.avatar_url)
return embed
@staticmethod
def _fields_from_types(amounts: dict) -> dict:
fields = [
{"name": _type, "value": amount, "inline": True}
for _type, amount in amounts.items()
]
return {
"fields": fields,
"color": discord.Color.red().value,
}
class ServerManagement(commands.Cog):
def __init__(
self, bot: Bot, active: ActivePunishments, registry: PunishmentRegistry
):
self.bot = bot
self.database_info = View()
self.registry = registry
self.active = active
self.lift_punishments.start()
@tasks.loop(minutes=UNPUNISH_LOOP_DURATION_MINUTES)
async def lift_punishments(self):
records = await self.active.get_to_deactivate()
async for record in records:
punishment_type = timed_punishment_from_id.get(record["punishment_id"])
await punishment_type.unpunish(record["user_id"], self.bot)
await self.active.deactivate()
async def record_punishment(self, punishment: Punishment) -> None:
data = punishment.encode_to_mongo()
_id = bson.ObjectId()
to_registry = data.get("registry")
to_active = data.get("active")
if to_active is not None:
await self.active.new_punishment(_id, to_active)
await self.registry.new_punishment(_id, to_registry)
await self.bot.admin_log(
f"**Punished user** <{punishment.to_punish.id}> ({punishment.to_punish.display_name}) with"
f" {punishment.punishment_id.value}\n"
f" **Punished by:** {punishment.punished_by.mention}\n"
f" **Reason:** {punishment.reason}\n"
f" **Punishment registry id:** {_id}"
)
@property
async def muted_role(self) -> discord.Role:
return (await self.bot.the_guild).get_role(Roles.MUTED.value)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
if await self.active.has_active_mute(member.id):
await member.add_roles(await self.muted_role)
@commands.command(cls=CommandWithDocs)
@commands.has_any_role(*ACCESS_LEVEL_2)
async def warn(self, ctx: Context, user: Member, *, warn: Optional[WarnPunishment]):
await ctx.message.delete()
if not warn:
warn = WarnPunishment(user, ctx.author)
await self.record_punishment(warn)
await warn.punish(ctx)
@commands.command(cls=CommandWithDocs)
@commands.has_any_role(*ACCESS_LEVEL_2)
async def mute(self, ctx: Context, user: Member, *, mute: Optional[MutePunishment]):
await ctx.message.delete()
if not mute:
mute = MutePunishment(user, ctx.author)
await self.record_punishment(mute)
await mute.punish(ctx)
@commands.command(cls=CommandWithDocs)
@commands.has_any_role(*ACCESS_LEVEL_2)
async def kick(self, ctx: Context, user: Member, *, kick: Optional[KickPunishment]):
await ctx.message.delete()
if not kick:
kick = KickPunishment(user, ctx.author)
await self.record_punishment(kick)
await kick.punish(ctx)
@commands.command(cls=CommandWithDocs)
@commands.has_any_role(*ACCESS_LEVEL_2)
async def ban(self, ctx: Context, user: Member, *, ban: Optional[BanPunishment]):
await ctx.message.delete()
if not ban:
ban = BanPunishment(user, ctx.author)
await self.record_punishment(ban)
await ban.punish(ctx)
@commands.command(cls=CommandWithDocs)
@commands.has_any_role(*ACCESS_LEVEL_2)
async def permaban(
self, ctx: Context, user: Member, *, ban: Optional[PermaBanPunishment]
):
await ctx.message.delete()
if not ban:
ban = PermaBanPunishment(user, ctx.author)
await self.record_punishment(ban)
await ban.punish(ctx)
@commands.group(aliases=("pun", "p", "punish"))
@commands.has_any_role(*ACCESS_LEVEL_2)
async def punishment(self, ctx: Context) -> None:
if ctx.invoked_subcommand is None:
await ctx.channel.send(
"```Options: /punishment info, /punishment active, /punishment registry``"
)
@punishment.command(cls=CommandWithDocs)
async def info(self, ctx: Context, registry_id: str) -> None:
try:
record = await self.registry.get_info(registry_id)
except bson.errors.InvalidId:
await ctx.channel.send(f"```dts\n# Invalid Punishment ID\n```")
else:
if record is None:
await ctx.channel.send(f"```dts\n# This document doesn't exist\n```")
else:
embed_string = "\n".join(
[f"**{k}** : {v}" for k, v in record.items() if k != "_id"]
)
embed = discord.Embed(
title=f"Punishment id {registry_id}",
description=embed_string,
colour=discord.Color.red(),
)
await ctx.channel.send(embed=embed)
@punishment.group()
async def active(self, ctx: Context) -> None:
if ctx.invoked_subcommand is None:
amount = await self.active.count_total_amount()
await ctx.channel.send(f"```\nTotal amount of punishments is {amount}\n```")
@active.command()
async def type(self, ctx: Context):
amounts = await self.active.count_all_types()
await ctx.send(embed=self.database_info.active(amounts))
@punishment.group()
async def registry(self, ctx: Context) -> None:
if ctx.invoked_subcommand is None:
amount = await self.registry.count_total_amount()
await ctx.channel.send(f"```\nTotal amount of punishments is {amount}\n```")
@registry.command()
async def user(self, ctx: Context, user: Member):
amounts = await self.registry.count_all_by_user(user.id)
await ctx.send(embed=self.database_info.registry_per_member(amounts, user))
@registry.command(aliases=("type", "types"))
async def _type(self, ctx: Context):
amounts = await self.registry.count_all_type()
await ctx.send(embed=self.database_info.registry(amounts))
| StarcoderdataPython |
9622846 | <filename>streamer/scripts/replay.py<gh_stars>10-100
"""
Replay tweets from stdin (or a file) with a fixed delay,
or by examining timestamps on original tweets and using a delay delta
based on time between last and next tweet.
How to do continuous play if coming from stdin?
Or is that only possible if coming from a named file?
- http://mail.python.org/pipermail/tutor/2003-May/022520.html
"""
import datetime
import json
import logging
import os
import sys
import time
logger = logging.getLogger(__name__)
TWITTER_TIME_FORMAT = "%a %b %d %H:%M:%S +0000 %Y"
def parse_twitter_time(time_str):
return datetime.datetime.strptime(time_str, TWITTER_TIME_FORMAT)
def datetime_to_unixtime(dt):
return time.mktime(dt.timetuple())
def datetime_to_twitter_time_string(dt):
# Convert datetime to "Wed Aug 27 13:08:45 +0000 2008" format.
return dt.strftime(TWITTER_TIME_FORMAT)
def _init_logger(level):
from logging import _checkLevel
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(level)
_init_logger(os.environ.get("LOGLEVEL", "WARN"))
last_time = None
start_time = datetime.datetime.utcnow()
replay_mult = 10.0 # replay at x * normal speed
sleep_factor = 1.0 / replay_mult
for line in sys.stdin:
if not line.strip():
# Line is empty, continue.
logger.debug("Line is empty, continuing...")
continue
try:
status = json.loads(line)
except json.JSONDecodeError as e:
logger.warn("Error parsing line '%s', continuing", line)
continue
except Exception as e:
logger.exception("Fatal error parsing %s.", line)
sys.exit(1)
# If first item, emit it immediately.
# Else, calculate time delta from last status and delay the appropriate amount of time,
# if any is required.
if not last_time:
last_time = parse_twitter_time(status.get("created_at"))
else:
# get time delta in seconds.
current = parse_twitter_time(status.get("created_at", last_time))
delta = current - last_time
sleep_time = delta.total_seconds()
if sleep_time < 0:
logger.warn(
"sleep_time (%f) outside bounds for tweet id %s"
% (sleep_time, status.get("id_str"))
)
sleep_time = 0
# Clamp sleep_time to 0 <= sleep_time <= MAX_SLEEP_TIME
# sleep_time = max(0, min(sleep_time, MAX_SLEEP_TIME))
last_time = current
time.sleep(sleep_time * sleep_factor)
sys.stdout.write(line)
sys.stdout.flush()
| StarcoderdataPython |
273579 | from django.conf.urls import url
from evemansys.backend.views import CreateEventWizard
from . import views
urlpatterns = [
url(regex=r'^$', view=views.dashboard, name='Dashboard'),
url(r'^create_event/$', CreateEventWizard.as_view(), name='create-event'),
]
| StarcoderdataPython |
9678921 | # Project Imports
from typing import Optional
from pylidar_slam.common.geometry import projection_map_to_points, mask_not_null
from pylidar_slam.common.pose import Pose
from pylidar_slam.common.projection import Projector
from pylidar_slam.common.utils import check_sizes, remove_nan, modify_nan_pmap
from pylidar_slam.common.modules import _with_viz3d
from pylidar_slam.dataset import DatasetLoader
from pylidar_slam.odometry.alignment import RigidAlignmentConfig, RIGID_ALIGNMENT, RigidAlignment
from pylidar_slam.odometry.initialization import InitializationConfig, INITIALIZATION, Initialization
from pylidar_slam.odometry.odometry import *
from pylidar_slam.odometry.local_map import LOCAL_MAP, LocalMapConfig, LocalMap
from pylidar_slam.preprocessing.preprocessing import PreprocessingConfig, Preprocessing
from pylidar_slam.viz.color_map import *
if _with_viz3d:
from viz3d.window import OpenGLWindow
# ----------------------------------------------------------------------------------------------------------------------
@dataclass
class ICPFrameToModelConfig(OdometryConfig):
"""
The Configuration for the Point-To-Plane ICP based Iterative Least Square estimation of the pose
"""
algorithm: str = "icp_F2M"
device: str = "cpu"
pose: str = "euler"
max_num_alignments: int = 100
# Config for the Initialization
initialization: InitializationConfig = MISSING
# Config for the Local Map
local_map: LocalMapConfig = MISSING
# Config for the Rigid Alignment
alignment: RigidAlignmentConfig = MISSING
threshold_delta_pose: float = 1.e-4
threshold_trans: float = 0.1
threshold_rot: float = 0.3
sigma: float = 0.1
# The data key which is used to search into the data dictionary for the pointcloud to register onto the new frame
data_key: str = "vertex_map"
viz_debug: bool = False # Whether to display the FM in a window (if exists)
# Visualization parameters
viz_with_edl: bool = True
viz_num_pcs: int = 50
# ----------------------------------------------------------------------------------------------------------------------
class ICPFrameToModel(OdometryAlgorithm):
"""
OdometryAlgorithm based on the ICP-registration
"""
def __init__(self, config: ICPFrameToModelConfig,
projector: Projector = None, pose: Pose = Pose("euler"),
device: torch.device = torch.device("cpu"), **kwargs):
OdometryAlgorithm.__init__(self, config)
assert_debug(projector is not None)
self.pose = pose
self.projector = projector
self.device = device
# --------------------------------
# Loads Components from the Config
self._motion_model: Initialization = INITIALIZATION.load(self.config.initialization,
pose=self.pose, device=device)
self.local_map: LocalMap = LOCAL_MAP.load(self.config.local_map,
pose=self.pose, projector=projector)
self.config.alignment.pose = self.pose.pose_type
self.rigid_alignment: RigidAlignment = RIGID_ALIGNMENT.load(self.config.alignment, pose=self.pose)
# self._post_processing:
# -----------------------
# Optimization Parameters
self.gn_max_iters = self.config.max_num_alignments
self._sample_pointcloud: bool = False
# ---------------------
# Local state variables
self.relative_poses: list = []
self.absolute_poses: list = [] # Absolute poses (/!\ type: np.float64)
self.gt_poses: Optional[np.ndarray] = None # Ground Truth poses
self._iter = 0
self._tgt_vmap: torch.Tensor = None
self._tgt_pc: torch.Tensor = None
self._tgt_nmap: torch.Tensor = None
self._delta_since_map_update = None # delta pose since last estimate update
self._register_threshold_trans = self.config.threshold_trans
self._register_threshold_rot = self.config.threshold_rot
self.viz3d_window: Optional[OpenGLWindow] = None
self._has_window = config.viz_debug and _with_viz3d
def __del__(self):
if self._has_window:
if self.viz3d_window is not None:
self.viz3d_window.close(True)
def init(self):
"""Initialize/ReInitialize the state of the Algorithm and its components"""
super().init()
self.relative_poses = []
self.absolute_poses = []
self.gt_poses = None
self.local_map.init()
self._motion_model.init()
self._iter = 0
self._delta_since_map_update = torch.eye(4, dtype=torch.float32, device=self.device).reshape(1, 4, 4)
if self._has_window:
if self.viz3d_window is not None:
self.viz3d_window.close(True)
self.viz3d_window = None
self.viz3d_window = OpenGLWindow(
engine_config={"with_edl": self.config.viz_with_edl, "edl_strength": 1000.0})
self.viz3d_window.init()
# ------------------------------------------------------------------------------------------------------------------
def do_process_next_frame(self, data_dict: dict):
"""
Processes a new frame
Estimates the motion for the new frame, and update the states of the different components
(Local Map, Initialization)
Args:
data_dict (dict): The input frame to be processed.
The key 'self.config.data_key' is required
"""
# Reads the input frame
self._read_input(data_dict)
if self._iter == 0:
# Initiate the map with the first frame
relative_pose = torch.eye(4, dtype=torch.float32,
device=self._tgt_vmap.device).unsqueeze(0)
self.local_map.update(relative_pose,
new_vertex_map=self._tgt_vmap)
self.relative_poses.append(relative_pose.cpu().numpy())
self.absolute_poses.append(relative_pose.cpu().to(torch.float64).numpy()[0])
self._iter += 1
return
# Extract initial estimate
initial_estimate = self._motion_model.next_initial_pose(data_dict)
sample_points = self.sample_points()
# Registers the new frame onto the map
new_rpose_params, new_rpose, losses = self.register_new_frame(sample_points,
initial_estimate,
data_dict=data_dict)
# Update initial estimate
self.update_initialization(new_rpose, data_dict)
self.__update_map(new_rpose, data_dict)
# Update Previous pose
np_new_rpose = new_rpose.cpu().numpy()
self.relative_poses.append(np_new_rpose)
latest_pose = self.absolute_poses[-1].dot(
self.pose.build_pose_matrix(new_rpose_params.cpu().to(torch.float64).reshape(1, 6))[0].numpy())
self.absolute_poses.append(latest_pose)
tgt_np_pc = self._tgt_pc.cpu().numpy().reshape(-1, 3)
if self._has_window:
# Add Ground truth poses (mainly for visualization purposes)
if DatasetLoader.absolute_gt_key() in data_dict:
pose_gt = data_dict[DatasetLoader.absolute_gt_key()].reshape(1, 4, 4).cpu().numpy()
self.gt_poses = pose_gt if self.gt_poses is None else np.concatenate(
[self.gt_poses, pose_gt], axis=0)
# Apply absolute pose to the pointcloud
world_points = np.einsum("ij,nj->ni", latest_pose[:3, :3].astype(np.float32), tgt_np_pc)
world_points += latest_pose[:3, 3].reshape(1, 3).astype(np.float32)
self.viz3d_window.set_pointcloud(self._iter % self.config.viz_num_pcs, world_points)
# Follow Camera
camera_pose = latest_pose.astype(np.float32).dot(np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 60.0],
[0.0, 0.0, 0.0, 1.0]], dtype=np.float32))
self.viz3d_window.update_camera(camera_pose)
if self.gt_poses is not None and len(self.gt_poses) > 0:
# Update Pose to the pointcloud
self.viz3d_window.set_poses(-1, self.gt_poses.astype(np.float32))
# Update Dictionary with pointcloud and pose
data_dict[self.pointcloud_key()] = tgt_np_pc
data_dict[self.relative_pose_key()] = np_new_rpose.reshape(4, 4)
self._iter += 1
def register_new_frame(self,
target_points: torch.Tensor,
initial_estimate: Optional[torch.Tensor] = None,
data_dict: Optional[dict] = None,
**kwargs) -> (torch.Tensor, torch.Tensor, torch.Tensor):
"""
Registers a new frame against the Local Map
Args:
target_points (torch.Tensor): The target Ver
initial_estimate (Optional[torch.Tensor]): The initial motion estimate for the ICP
data_dict (dict): The dictionary containing the data of the new frame
Returns
pose_matrix (torch.Tensor): The relative pose between the current frame and the map `(1, 4, 4)`
"""
new_pose_matrix = initial_estimate
new_pose_params = torch.zeros(self.pose.num_params(), device=target_points.device, dtype=target_points.dtype)
if initial_estimate is None:
new_pose_matrix = torch.eye(4, device=target_points.device,
dtype=target_points.dtype).unsqueeze(0)
losses = []
old_target_points = target_points
for _ in range(self.gn_max_iters):
target_points = self.pose.apply_transformation(old_target_points.unsqueeze(0), new_pose_matrix)[0]
# Compute the nearest neighbors for the selected points
neigh_pc, neigh_normals, tgt_pc = self.local_map.nearest_neighbor_search(target_points)
# Compute the rigid transform alignment
delta_pose, residuals = self.rigid_alignment.align(neigh_pc,
tgt_pc,
neigh_normals,
**kwargs)
loss = residuals.sum()
losses.append(loss)
if delta_pose.norm() < self.config.threshold_delta_pose:
break
# Manifold normalization to keep proper rotations
new_pose_params = self.pose.from_pose_matrix(self.pose.build_pose_matrix(delta_pose) @ new_pose_matrix)
new_pose_matrix = self.pose.build_pose_matrix(new_pose_params)
return new_pose_params, new_pose_matrix, losses
def sample_points(self):
"""Returns the points sampled"""
if not self._sample_pointcloud:
target_points = projection_map_to_points(self._tgt_vmap[0], dim=0)
target_points = target_points[target_points.norm(dim=-1) > 0.0]
else:
target_points = self._tgt_pc[0]
return target_points
def get_relative_poses(self) -> np.ndarray:
"""Returns the estimated relative poses for the current sequence"""
if len(self.relative_poses) == 0:
return None
return np.concatenate(self.relative_poses, axis=0)
def update_initialization(self, new_rpose, data_dict: dict):
"""Send the frame to the initialization after registration for its state update"""
self._motion_model.register_motion(new_rpose, data_dict)
# ------------------------------------------------------------------------------------------------------------------
# `Private` methods
def _read_input(self, data_dict: dict):
"""Reads and interprets the input from the data_dict"""
assert_debug(self.config.data_key in data_dict,
f"Could not find the key `{self.config.data_key}` in the input dictionary.\n"
f"With keys : {data_dict.keys()}). Set the parameter `slam.odometry.data_key` to the desired key")
data = data_dict[self.config.data_key]
self._tgt_vmap = None
self._tgt_pc = None
if isinstance(data, np.ndarray):
check_sizes(data, [-1, 3])
self._sample_pointcloud = True
pc_data = torch.from_numpy(data).to(self.device).unsqueeze(0)
# Project into a spherical image
vertex_map = self.projector.build_projection_map(pc_data)
elif isinstance(data, torch.Tensor):
if len(data.shape) == 3 or len(data.shape) == 4:
# Cast the data tensor as a vertex map
vertex_map = data.to(self.device)
if len(data.shape) == 3:
vertex_map = vertex_map.unsqueeze(0)
else:
assert_debug(data.shape[0] == 1, f"Unexpected batched data format.")
check_sizes(vertex_map, [1, 3, -1, -1])
pc_data = vertex_map.permute(0, 2, 3, 1).reshape(1, -1, 3)
pc_data = pc_data[mask_not_null(pc_data, dim=-1)[:, :, 0]]
else:
assert_debug(len(data.shape) == 2)
pc_data = data.to(self.device).unsqueeze(0)
vertex_map = self.projector.build_projection_map(pc_data)
else:
raise RuntimeError(f"Could not interpret the data: {data} as a pointcloud tensor")
self._tgt_vmap = vertex_map.to(torch.float32) # [1, 3, -1, -1]
self._tgt_pc = pc_data.to(torch.float32)
self._tgt_vmap = modify_nan_pmap(self._tgt_vmap, 0.0)
_tgt_pc, _ = remove_nan(self._tgt_pc[0])
self._tgt_pc = _tgt_pc.unsqueeze(0)
def __update_map(self, new_rpose: torch.Tensor, data_dict: dict):
# Updates the map if the motion since last registration is large enough
new_delta = self._delta_since_map_update @ new_rpose
delta_params = self.pose.from_pose_matrix(new_delta)
if delta_params[0, :3].norm() > self._register_threshold_trans or \
delta_params[0, 3:].norm() * 180 / np.pi > self._register_threshold_rot:
new_mask = mask_not_null(self._tgt_vmap)
new_nmap = None
if "normal_map" in data_dict:
new_nmap = data_dict["normal_map"]
self.local_map.update(new_rpose,
new_vertex_map=self._tgt_vmap,
new_pc_data=self._tgt_pc,
normal_map=new_nmap,
mask=new_mask)
self._delta_since_map_update = torch.eye(4, dtype=torch.float32, device=self.device)
else:
self.local_map.update(new_rpose)
self._delta_since_map_update = new_delta
# ------------------------------------------------------------------------------------------------------------------
| StarcoderdataPython |
9640173 | from typing import List, Tuple, Dict
import numpy as np
from itertools import product
from dwave.ComponentConverter import ComponentConverter
from dwave.Sampler import Sampler
from planner import Component
class SimpleDWavePlanner:
height: int
width: int
item_height: int
item_width: int
components: List[Component] = []
def __init__(self, height: int, width: int, item_height: int, item_width: int):
self.height = height
self.width = width
self.item_width = item_width
self.item_height = item_height
self.memory = np.arange(self.memory_size)
self.memory_cursor = 0
@property
def memory_size(self):
return 2 * self.height * self.width
def _get_memory(self, shape):
size = np.product(shape)
if self.memory_cursor + size > self.memory_size:
raise ValueError("Out of memory")
allocated = self.memory[self.memory_cursor: self.memory_cursor + size].reshape(shape)
self.memory_cursor += size
return allocated
def _get_polynomial(self, S, O):
poly: Dict[Tuple, int] = {}
# as many rectangles as possible
for i, j in product(range(self.width), range(self.height)):
poly[(S[i, j],)] = -1 # some coefficient
# as small area as possible
for i, j in product(range(self.width), range(self.height)):
poly[(S[i, j],)] += np.sqrt(i * j / (self.height * self.width))
# avoid overlaps
# and ((abs(i2 - i1) > 0) or (abs(j2 - j1) > 0))
for i1, j1 in product(range(self.width), range(self.height)):
for i2, j2 in product(range(self.width), range(self.height)):
if (abs(i2 - i1) < self.item_width) and (abs(j2 - j1) < self.item_height) \
and ((abs(i2 - i1) > 0) or (abs(j2 - j1) > 0)):
poly[(S[i1, j1], S[i2, j2])] = 1
# avoid getting out of the palette
for i in range(self.width - self.item_width + 1, self.width):
for j in range(self.height):
poly[(S[i, j],)] = 1
for i in range(self.width):
for j in range(self.height - self.item_height + 1, self.height):
poly[(S[i, j],)] = 1
return poly
def _post_process_data(self, data, S, O):
for x in range(O.shape[0]):
for y in range(O.shape[1]):
data[O[x, y]] = 0
return data
def plan(self):
S = self._get_memory((self.width, self.height))
O = self._get_memory((self.width, self.height))
poly = self._get_polynomial(S, O)
sampler = Sampler(poly)
result = sampler.get_results()
result = self._post_process_data(result, S, O)
converter = ComponentConverter(self.item_width, self.item_height, S, O)
self.components = converter.get_components(result)
print([c.orientation for c in self.components])
def get_components(self):
return self.components
| StarcoderdataPython |
8030946 | <gh_stars>10-100
from django.shortcuts import get_object_or_404, redirect, render
from problems.models import Problem
def problem_details(request, slug):
problem = get_object_or_404(Problem, slug=slug)
context = {"problem": problem}
return render(request, "problems/details.html", context)
def problem_random(request):
problem = Problem.objects.random()
return redirect(problem)
def problem_select(request, problem_id):
problem = get_object_or_404(Problem, pk=problem_id, published=True)
problem.select()
request.session["problem_selected"] = problem.id
return redirect(problem)
| StarcoderdataPython |
5023647 | <reponame>whitneymichelle/class_enrollment_simulations
"""Tests for `simulation_probabilities` module."""
import pytest
from class_enrollment_simulations.simulation_probabilities import get_cv_rate, get_eng_cv_rate, \
get_retention_rate, get_eng_two_cv_rate, get_transfer_cv_rate
def test_get_cv_rate():
assert get_cv_rate(0.5) == {'low': 0.5, 'high': 0.5696469185597861}
def test_get_eng_cv_rate():
assert get_eng_cv_rate(0.5) == {'low': 0.5, 'high': 0.5870586481997327}
def test_get_retention_rate():
assert get_retention_rate(0.5, 0.6, 0.7) == {'low': 0.5, 'average': 0.6, 'high': 0.7}
def test_get_eng_two_cv_rate():
assert get_eng_two_cv_rate(0.5) == {'low': 0.5, 'high': 0.5435293240998663}
def test_get_transfer_cv_rate():
assert get_transfer_cv_rate(0.5) == {'low': 0.5, 'high': 0.5435293240998663}
| StarcoderdataPython |
12826702 | <reponame>sridatta/mlrose<gh_stars>10-100
import numpy as np
from mlrose_hiive import QueensOpt
class QueensGenerator:
@staticmethod
def generate(seed, size=20):
np.random.seed(seed)
problem = QueensOpt(length=size)
return problem
| StarcoderdataPython |
9620454 | from pydantic.dataclasses import dataclass
@dataclass
class Document:
name: str
document_url: str
download_url: str = None
| StarcoderdataPython |
31449 | <filename>solver.py
import cv2, os
import numpy as np
import sys
from utils import movingAverage, plot, computeAverage
import queue
from sklearn import linear_model
class Solver():
def __init__(self, config):
self.vid = cv2.VideoCapture(config.vidpath)
self.txtfile = config.txtfile
self.vis = config.vis
self.len_gt = config.len_gt
self.test_vid = cv2.VideoCapture(config.test_vidpath)
# Separate function to allow for different methods to be inculcated into the same class
self.setupParams()
def setupParams(self):
""" intialize parameters for tracking and extracting features
Load ground truth parameters from txt file"""
# Lucas Kanade parameters
self.lk_params = dict(winSize = (21, 21),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.01))
self.frame_idx = 0
self.prev_pts = None
self.detect_interval = 1
self.temp_preds = np.zeros(int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT)))
# Load ground truth txt file
with open(self.txtfile, 'r') as file_:
gt = file_.readlines()
gt = [float(x.strip()) for x in gt]
self.gt = np.array(gt[:self.len_gt])
self.window = 80 # for moving average
self.prev_gray = None
def constructMask(self, mask = None, test=False):
"""Constructs a mask to only take into consideration a part of the frame.
In this case it's the road. """
vid = self.test_vid if test else self.vid
if mask is None:
W = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
H = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
mask = np.zeros(shape = (H,W), dtype = np.uint8)
mask.fill(255)
else:
W = mask.shape[1]
H = mask.shape[0]
cv2.rectangle(mask, (0, 0), (W, H), (0, 0, 0), -1)
x_top_offset = 240
x_btm_offset = 65
poly_pts = np.array([[[640-x_top_offset, 250], [x_top_offset, 250], [x_btm_offset, 350], [640-x_btm_offset, 350]]], dtype=np.int32)
cv2.fillPoly(mask, poly_pts, (255, 255, 255))
return mask
def processFrame(self, frame):
""" Gaussian Blur and then apply Lucas Kanade optical flow"""
frame = cv2.GaussianBlur(frame, (3,3), 0)
curr_pts, _st, _err = cv2.calcOpticalFlowPyrLK(self.prev_gray, frame, self.prev_pts, None, **self.lk_params)
# Store flow (x, y, dx, dy)
flow = np.hstack((self.prev_pts.reshape(-1, 2), (curr_pts - self.prev_pts).reshape(-1, 2)))
preds = []
for x, y, u, v in flow:
if v < -0.05:
continue
# Translate points to center
x -= frame.shape[1]/2
y -= frame.shape[0]/2
# Append to preds taking care of stability issues
if y == 0 or (abs(u) - abs(v)) > 11:
preds.append(0)
preds.append(0)
elif x == 0:
preds.append(0)
preds.append(v / (y*y))
else:
preds.append(u / (x * y))
preds.append(v / (y*y))
return [n for n in preds if n>=0]
def getKeyPts(self, offset_x=0, offset_y=0):
""" return key points with offset """
if self.prev_pts is None:
return None
return [cv2.KeyPoint(x=p[0][0] + offset_x, y=p[0][1] + offset_y, _size=10) for p in self.prev_pts]
def getFeatures(self, frame_gray, mask):
return cv2.goodFeaturesToTrack(frame_gray,30,0.1,10,blockSize=10,
mask=mask)
def run(self):
# Construct mask first
mask = self.constructMask()
prev_key_pts = None
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# self.video = cv2.VideoWriter('video.avi', fourcc,29, (int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while self.vid.isOpened() and self.frame_idx<len(self.gt):
ret, frame = self.vid.read()
if not ret:
break
# Convert to B/W
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = frame_gray[130:350, 35:605]
mask_vis = frame.copy() # <- For visualization
# Process each frame
if self.prev_pts is None:
self.temp_preds[self.frame_idx] = 0
else:
# Get median of predicted V/hf values
preds = self.processFrame(frame_gray)
self.temp_preds[self.frame_idx] = np.median(preds) if len(preds) else 0
# Extract features
self.prev_pts = self.getFeatures(frame_gray, mask[130:350, 35:605])
self.prev_gray = frame_gray
self.frame_idx += 1
# For visualization purposes only
if self.vis:
prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# self.video.release()
self.vid.release()
# Split predictions into train and validation -
split = self.frame_idx//10
train_preds = self.temp_preds[:self.frame_idx-split]
val_preds = self.temp_preds[self.frame_idx - split:self.frame_idx]
gt_train = self.gt[:len(train_preds)]
gt_val = self.gt[len(train_preds):self.frame_idx]
# Fit to ground truth
preds = movingAverage(train_preds, self.window)
lin_reg = linear_model.LinearRegression(fit_intercept=False)
lin_reg.fit(preds.reshape(-1, 1), gt_train)
hf_factor = lin_reg.coef_[0]
print("Estimated hf factor = {}".format(hf_factor))
# estimate training error
pred_speed_train = train_preds * hf_factor
pred_speed_train = movingAverage(pred_speed_train, self.window)
mse = np.mean((pred_speed_train - gt_train)**2)
print("MSE for train", mse)
# Estimate validation error
pred_speed_val = val_preds * hf_factor
pred_speed_val = movingAverage(pred_speed_val, self.window)
mse = np.mean((pred_speed_val - gt_val)**2)
print("MSE for val", mse)
# plot(pred_speed_train, gt_train)
# plot(pred_speed_val, gt_val)
return hf_factor
def visualize(self, frame, mask_vis, prev_key_pts, speed=None):
self.constructMask(mask_vis)
mask_vis = cv2.bitwise_not(mask_vis)
frame_vis = cv2.addWeighted(frame, 1, mask_vis, 0.3, 0)
key_pts = self.getKeyPts(35, 130)
cv2.drawKeypoints(frame_vis, key_pts, frame_vis, color=(0,0,255))
cv2.drawKeypoints(frame_vis, prev_key_pts, frame_vis, color=(0,255,0))
if speed is not None:
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame_vis, "speed {}".format(speed), (10, 35), font, 1.2, (0, 0, 255))
cv2.imshow('test',frame_vis)
# self.video.write(frame_vis)
return key_pts
def test(self, hf_factor, save_txt=False):
mask = self.constructMask(test=True)
self.prev_gray = None
test_preds = np.zeros(int(self.test_vid.get(cv2.CAP_PROP_FRAME_COUNT)))
frame_idx = 0
curr_estimate = 0
prev_key_pts = None
self.prev_pts = None
while self.test_vid.isOpened():
ret, frame = self.test_vid.read()
if not ret:
break
# Convert to B/W
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = frame_gray[130:350, 35:605]
mask_vis = frame.copy() # <- For visualization
# Process each frame
# For the first frame
pred_speed = 0
if self.prev_pts is None:
test_preds[frame_idx] = 0
else:
# Get median of predicted V/hf values
preds = self.processFrame(frame_gray)
pred_speed = np.median(preds) * hf_factor if len(preds) else 0
test_preds[frame_idx] = pred_speed
# Extract features
self.prev_pts = self.getFeatures(frame_gray, mask[130:350, 35:605])
self.prev_gray = frame_gray
frame_idx += 1
# For visualization purposes only
vis_pred_speed = computeAverage(test_preds, self.window//2, frame_idx)
prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts, speed=vis_pred_speed)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.test_vid.release()
print("Saving predicted speeds in test.txt ")
if save_txt:
with open("test.txt", "w") as file_:
for item in test_preds:
file_.write("%s \n" % item)
| StarcoderdataPython |
4980175 | <reponame>FLamparski/RailDelay
import sys
import json
import logging
import yaml
import stomp
import rethinkdb as r
from time import sleep
from os import path
from toolz.dicttoolz import assoc
import train_movements as tm
conf_path = path.realpath(path.join(path.dirname(__file__), '..', 'conf.yml'))
print('Using configuration file {}'.format(conf_path))
HOSTS = [
('datafeeds.networkrail.co.uk', 61618)
]
class RailListener:
def __init__(self, mq):
self._mq = mq
self._logger = logging.getLogger('RailListener')
def on_error(self, headers, message):
self._logger.error('Stomp error:\n{}\n{}'.format(headers, message))
mq.disconnect()
def on_heartbeat_timeout(self):
self._logger.warn('Heartbeat timed out.');
sleep(1)
def on_message(self, headers, message):
self._logger.info(headers)
decoded_messages = json.loads(message)
self._logger.info('Received a total of {} message(s)'
.format(len(decoded_messages)))
for decoded_message in decoded_messages:
doc = assoc(decoded_message, 'type', headers['subscription'])
r.table('raw_messages').insert(doc).run(conn)
tm.process_message(doc, conn)
self._mq.ack(id=headers['message-id'],
subscription=headers['subscription'])
def setup_logging(config):
num_loglevel = getattr(logging, config['logging']['level'].upper(), None)
if not isinstance(num_loglevel, int):
raise ValueError('Invalid log level in logging.level: {}'
.format(config['logging']['level']))
logging.basicConfig(filename='stomp_listener.log', level=num_loglevel)
def setup_mq(config):
mq = stomp.Connection(host_and_ports=HOSTS,
keepalive=True,
vhost='datafeeds.networkrail.co.uk',
heartbeats=(10000, 5000))
mq.set_listener(None, RailListener(mq))
mq.start()
logging.getLogger('setup_mq').info('Connecting to the data feeds')
mq.connect(wait=True, **config['network_rail']['connection'])
return mq
def on_startup():
global conn
config = None
with open(conf_path) as conf_yml:
config = yaml.load(conf_yml.read())
setup_logging(config)
logger = logging.getLogger('main')
conn = r.connect(**config['database'])
mq = setup_mq(config)
logger.info('Subscribing to the data feeds')
mq.subscribe('/topic/TRAIN_MVT_ALL_TOC',
tm.TRAIN_MOVEMENTS,
ack='client-individual')
return mq
def main():
mq = on_startup()
while mq.is_connected():
sleep(0.1)
conn.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
172704 | <gh_stars>1-10
import mock
import unittest
from . import testutils
from ..layers import GitReindex
from bin.commands import reindex
class TestReindex(unittest.TestCase):
layer = GitReindex
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.git.deleted_files', return_value=['file3'])
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_noneDeleted(self, mock_call, mock_deletedfiles, mock_checkoutput, mock_isgitrepository):
# setup
files = ['file1', 'file2']
mock_checkoutput.return_value = '\n'.join(files) + '\n'
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_called_once_with(['git', 'add', '--'] + files)
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.git.deleted_files')
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_someDeleted(self, mock_call, mock_deletedfiles, mock_checkoutput, mock_isgitrepository):
# setup
files = ['file1', 'file2', 'file3']
mock_checkoutput.return_value = '\n'.join(files) + '\n'
mock_deletedfiles.return_value = ['file2']
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_called_once_with(['git', 'add', '--', 'file1', 'file3'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.git.deleted_files')
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_allDeleted(self, mock_call, mock_deletedfiles, mock_checkoutput, mock_isgitrepository):
# setup
files = ['file1', 'file2']
mock_checkoutput.return_value = '\n'.join(files) + '\n'
mock_deletedfiles.return_value = files
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output', return_value = '')
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_noFilesToIndex(self, mock_call, mock_checkoutput, mock_isgitrepository):
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=False)
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
@mock.patch('os.getcwd', return_value='/working/dir')
def test_reindex_notAGitRepository(self, mock_getcwd, mock_error, mock_isgitrepository):
# when
try:
reindex.reindex()
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
# then
mock_isgitrepository.assert_called_once_with()
mock_error.assert_called_once_with("'/working/dir' not a git repository")
mock_getcwd.assert_called_once_with()
| StarcoderdataPython |
3475955 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-31 17:12
from __future__ import unicode_literals, print_function
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
#
# These 8 models use Member to map to a user and to make things easier for the
# future migration where we rename Member to UserProfile, we are crating a User
# field we can start moving over to. --JLS
#
def member_to_user(model):
# print(" %d objects found..." % model.objects.all().count(), end="")
for o in model.objects.all():
if o.member:
o.user = o.member.user
o.save()
#else:
# print()
# print(" '%s' has no Member object! (pk=%s)" % (o, o.pk))
# print()
def forwards_func(apps, schema_editor):
# print()
# print (" Migrating DailyLog... ", end="")
member_to_user(apps.get_model("nadine", "DailyLog"))
# print (" Migrating Membership... ", end="")
member_to_user(apps.get_model("nadine", "Membership"))
# print (" Migrating SentEmailLog... ", end="")
member_to_user(apps.get_model("nadine", "SentEmailLog"))
# print (" Migrating SecurityDeposit... ", end="")
member_to_user(apps.get_model("nadine", "SecurityDeposit"))
# print (" Migrating SpecialDay... ", end="")
member_to_user(apps.get_model("nadine", "SpecialDay"))
# print (" Migrating MemberNote... ", end="")
member_to_user(apps.get_model("nadine", "MemberNote"))
# print (" Migrating Bill... ", end="")
member_to_user(apps.get_model("nadine", "Bill"))
# print (" Migrating Transaction... ", end="")
member_to_user(apps.get_model("nadine", "Transaction"))
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('nadine', '0006_membershipplan_enabled'),
]
operations = [
migrations.AddField(
model_name='bill',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='transaction',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='dailylog',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='membership',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='sentemaillog',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='specialday',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='securitydeposit',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='membernote',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='membernote',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.RunPython(forwards_func, reverse_func),
migrations.AlterField(
model_name='bill',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='transaction',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='dailylog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='membership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='securitydeposit',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='specialday',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='membernote',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
1909406 | <gh_stars>10-100
"""
Module: 'pybricks.ev3devio' on LEGO EV3 v1.0.0
"""
# MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3
# Stubber: 1.3.2
class Ev3devSensor:
''
def _close_files():
pass
_default_mode = None
_ev3dev_driver_name = 'none'
def _mode():
pass
_number_of_values = 1
def _open_files():
pass
def _value():
pass
class Ev3devUartSensor:
''
def _close_files():
pass
_default_mode = None
_ev3dev_driver_name = 'none'
def _mode():
pass
_number_of_values = 1
def _open_files():
pass
def _reset():
pass
def _reset_port():
pass
def _value():
pass
class StopWatch:
''
def pause():
pass
def reset():
pass
def resume():
pass
def time():
pass
def get_sensor_path():
pass
def listdir():
pass
path = None
def read_int():
pass
def read_str():
pass
def wait():
pass
def write_int():
pass
def write_str():
pass
| StarcoderdataPython |
1933510 | <filename>display3d/display3d.py<gh_stars>1-10
import pickle
import os
import numpy as np
import viewer3d
from viewer3d import plot3d, inte_to_rgb, show_pillar_cuboid
from msic import get_corners_3d
from kitti import Object3d
car_th = 0.5
ped_th = 0.5
data_dir = '/data/Machine_Learning/ImageSet/KITTI/object/training/'
f = open('./results/car/step_296960/result.pkl', 'rb')
res_cars = pickle.load(f)
print(len(res_cars))
f.close()
f = open('./results/ped/step_194880/result.pkl', 'rb')
res_peds = pickle.load(f)
print(len(res_peds))
f.close()
f = open('./ImageSets/val.txt')
ids = f.readlines()
f.close()
print(len(ids))
show_sets = [ '002565']
for i, id in enumerate(ids):
id = id.replace('\n', '')
# if id not in show_sets:
# continue
pc_path = os.path.join(data_dir,'velodyne', id+'.bin')
print(pc_path)
pc_velo = np.fromfile(pc_path, dtype=np.float32).reshape(-1, 4)
print(pc_velo.shape)
res_car = res_cars[i]
res_ped = res_peds[i]
results = []
cls_list = []
for j, score in enumerate(res_car['score']):
if score > car_th:
result = {}
result['type'] = res_car['name'][j]
result['alpha'] = res_car['alpha'][j]
result['truncated'] = res_car['truncated'][j]
result['occluded'] = res_car['occluded'][j]
result['bbox'] = res_car['bbox'][j]
result['dimensions'] = res_car['dimensions'][j]
result['location'] = res_car['location'][j]
result['rotation_y'] = res_car['rotation_y'][j]
results.append(result)
cls_list.append(result['type'])
for j, score in enumerate(res_ped['score']):
if score > ped_th:
result = {}
result['type'] = res_ped['name'][j]
result['alpha'] = res_ped['alpha'][j]
result['truncated'] = res_ped['truncated'][j]
result['occluded'] = res_ped['occluded'][j]
result['bbox'] = res_ped['bbox'][j]
result['dimensions'] = res_ped['dimensions'][j]
result['location'] = res_ped['location'][j]
result['rotation_y'] = res_ped['rotation_y'][j]
results.append(result)
cls_list.append(result['type'])
# p3d = plot3d()
# points = pc_velo[:, 0:3]
# pc_inte = pc_velo[:, 3]
# pc_color = inte_to_rgb(pc_inte)
# p3d.add_points(points, pc_color)
# p3d.show()
show_pillar_cuboid(pc_velo, pc_path, results, id=id)
| StarcoderdataPython |
29887 | #!/usr/bin/env python3
import logging
import subprocess
import re
import boto.utils
from jinja2 import Environment, FileSystemLoader
from taupage import get_config
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
TPL_NAME = 'td-agent.conf.jinja2'
TD_AGENT_TEMPLATE_PATH = '/etc/td-agent/templates/'
TD_AGENT_OUTPUT_PATH = '/etc/td-agent/td-agent.conf'
def restart_td_agent_process():
''' Restart Fluentd '''
process = subprocess.Popen(['service', 'td-agent', 'restart'])
exit_code = process.wait(timeout=5)
if exit_code:
raise Exception("'service td-agent restart' failed with exit code: {0}".format(exit_code))
def get_scalyr_api_key():
''' Read Scalyr API key from Taupage config and set in template file '''
main_config = get_config()
config = main_config.get('logging')
scalyr_api_key = config.get('scalyr_account_key', main_config.get('scalyr_account_key'))
if scalyr_api_key:
# If scalyr_api_key starts with "aws:kms:" then decrypt key
match_kms_key = re.search('aws:kms:', scalyr_api_key, re.IGNORECASE)
if match_kms_key:
scalyr_api_key = re.sub(r'aws:kms:', '', scalyr_api_key)
try:
scalyr_api_key = subprocess.check_output(['python3',
'/opt/taupage/bin/decrypt-kms.py',
scalyr_api_key]).decode('UTF-8').strip()
except Exception:
logger.error('Failed to run /opt/taupage/bin/decrypt-kms.py')
raise SystemExit()
if scalyr_api_key == "Invalid KMS key.":
logger.error('Failed to decrypt KMS Key')
raise SystemExit(1)
return scalyr_api_key
def update_configuration_from_template(s3_default):
''' Update Jinja Template to create configuration file for Scalyr '''
fluentd_destinations = dict(scalyr=False, s3=False, rsyslog=False, scalyr_s3=False)
config = get_config()
logging_config = config.get('logging', {})
application_id = config.get('application_id')
application_version = config.get('application_version')
stack = config.get('notify_cfn', {}).get('stack')
source = config.get('source')
image = config.get('source').split(':', 1)[0]
instance_data = boto.utils.get_instance_identity()['document']
aws_region = instance_data['region']
aws_account = instance_data['accountId']
hostname = boto.utils.get_instance_metadata()['local-hostname'].split('.')[0]
customlog = config.get('mount_custom_log')
if config.get('rsyslog_aws_metadata'):
scalyr_syslog_log_parser = 'systemLogMetadata'
else:
scalyr_syslog_log_parser = 'systemLog'
scalyr_application_log_parser = logging_config.get('scalyr_application_log_parser', 'slf4j')
scalyr_custom_log_parser = logging_config.get('scalyr_custom_log_parser', 'slf4j')
fluentd_log_destination = logging_config.get('log_destination', 's3')
fluentd_syslog_destination = logging_config.get('syslog_destination', fluentd_log_destination)
fluentd_applog_destination = logging_config.get('applog_destination', fluentd_log_destination)
fluentd_authlog_destination = logging_config.get('authlog_destination', fluentd_log_destination)
fluentd_customlog_destination = logging_config.get('customlog_destination', fluentd_log_destination)
fluentd_applog_filter_exclude = logging_config.get('applog_filter_exclude', None)
fluentd_customlog_filter_exclude = logging_config.get('customlog_filter_exclude', None)
fluentd_loglevel = logging_config.get('fluentd_loglevel', 'error')
fluentd_s3_raw_log_format = logging_config.get('s3_raw_log_format', 'true')
fluentd_s3_region = logging_config.get('s3_region', aws_region)
fluentd_s3_bucket = logging_config.get('s3_bucket', 'zalando-logging-'+aws_account+'-'+aws_region)
fluentd_s3_timekey = logging_config.get('s3_timekey', '5m')
fluentd_s3_acl = logging_config.get('s3_acl', 'bucket-owner-full-control')
fluentd_rsyslog_host = logging_config.get('rsyslog_host')
fluentd_rsyslog_port = logging_config.get('rsyslog_port', '514')
fluentd_rsyslog_protocol = logging_config.get('rsyslog_protocol', 'tcp')
fluentd_rsyslog_severity = logging_config.get('rsyslog_severity', 'notice')
fluentd_rsyslog_program = logging_config.get('rsyslog_program', 'fluentd')
fluentd_rsyslog_hostname = logging_config.get('rsyslog_hostname', hostname)
for destination in (fluentd_applog_destination,
fluentd_authlog_destination,
fluentd_customlog_destination,
fluentd_syslog_destination):
fluentd_destinations[destination] = True
# Get Scalyr key only if configured
if fluentd_destinations.get('scalyr') or fluentd_destinations.get('scalyr_s3'):
scalyr_api_key = get_scalyr_api_key()
else:
scalyr_api_key = None
if fluentd_destinations.get('s3') or fluentd_destinations.get('scalyr_s3'):
try:
with open('/etc/cron.d/s3-iam-check', 'w') as file:
file.write('#!/bin/bash\n')
file.write('PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n')
file.write('*/5 * * * * root /opt/taupage/bin/s3-iam-check.py test {!s}\n'.format(fluentd_s3_bucket))
except Exception:
logger.exception('Failed to write file /etc/cron.d/s3-iam-check')
raise SystemExit(1)
env = Environment(loader=FileSystemLoader(TD_AGENT_TEMPLATE_PATH), trim_blocks=True)
template_data = env.get_template(TPL_NAME).render(
scalyr_api_key=scalyr_api_key,
application_id=application_id,
application_version=application_version,
stack=stack,
source=source,
image=image,
aws_region=aws_region,
aws_account=aws_account,
customlog=customlog,
scalyr_application_log_parser=scalyr_application_log_parser,
scalyr_syslog_log_parser=scalyr_syslog_log_parser,
scalyr_custom_log_parser=scalyr_custom_log_parser,
fluentd_syslog_destination=fluentd_syslog_destination,
fluentd_applog_destination=fluentd_applog_destination,
fluentd_applog_filter_exclude=fluentd_applog_filter_exclude,
fluentd_authlog_destination=fluentd_authlog_destination,
fluentd_customlog_destination=fluentd_customlog_destination,
fluentd_customlog_filter_exclude=fluentd_customlog_filter_exclude,
fluentd_loglevel=fluentd_loglevel,
fluentd_s3_raw_log_format=fluentd_s3_raw_log_format,
fluentd_s3_region=fluentd_s3_region,
fluentd_s3_bucket=fluentd_s3_bucket,
fluentd_s3_timekey=fluentd_s3_timekey,
fluentd_s3_acl=fluentd_s3_acl,
fluentd_rsyslog_host=fluentd_rsyslog_host,
fluentd_rsyslog_port=fluentd_rsyslog_port,
fluentd_rsyslog_protocol=fluentd_rsyslog_protocol,
fluentd_rsyslog_severity=fluentd_rsyslog_severity,
fluentd_rsyslog_program=fluentd_rsyslog_program,
fluentd_rsyslog_hostname=fluentd_rsyslog_hostname,
fluentd_destinations=fluentd_destinations
)
try:
with open(TD_AGENT_OUTPUT_PATH, 'w') as f:
f.write(template_data)
except Exception:
logger.exception('Failed to write file td-agent.conf')
raise SystemExit(1)
if __name__ == '__main__':
hostname = boto.utils.get_instance_metadata()['local-hostname'].split('.')[0]
config = get_config()
logging_config = config.get('logging')
s3_default = False
if logging_config:
if not logging_config.get('fluentd_enabled'):
logger.info('Fluentd disabled; skipping Fluentd initialization')
raise SystemExit()
if not logging_config:
logger.info('Found no logging section in senza.yaml; enable dafault logging to s3')
s3_default = True
try:
with open('/var/local/textfile_collector/fluentd_default_s3.prom', 'w') as file:
file.write('fluentd_default_s3_logging{{tag=\"td-agent\",hostname=\"{!s}\"}} 1.0\n'
.format(hostname))
except Exception:
logger.exception('Failed to write file /var/local/textfile_collector/fluentd_default_s3.prom')
raise SystemExit(1)
try:
with open('/etc/cron.d/get_fluentd_metrics', 'w') as file:
file.write('#!/bin/bash\n')
file.write('PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n')
file.write('* * * * * root /opt/taupage/bin/get-fluentd-metrics.sh\n')
except Exception:
logger.exception('Failed to write file /etc/cron.d/get_fluentd_metrics')
raise SystemExit(1)
update_configuration_from_template(s3_default)
restart_td_agent_process()
| StarcoderdataPython |
4803962 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\retail\retail_commands.py
# Compiled at: 2018-03-06 02:00:39
# Size of source mod 2**32: 17737 bytes
from protocolbuffers import Consts_pb2, Dialog_pb2, UI_pb2, InteractionOps_pb2
from business.business_enums import BusinessType
from distributor import shared_messages
from distributor.rollback import ProtocolBufferRollback
from distributor.system import Distributor
from retail.retail_balance_transfer_dialog import FundsTransferDialog
from retail.retail_customer_situation import RetailCustomerSituation
from retail.retail_utils import RetailUtils
from server_commands.argument_helpers import OptionalSimInfoParam, get_optional_target, RequiredTargetParam
from sims.funds import transfer_funds
from sims4.commands import CommandType
from sims4.common import Pack
import distributor, services, sims4.commands
logger = sims4.log.Logger('Retail', default_owner='trevor')
@sims4.commands.Command('retail.get_retail_info')
def get_retail_info(_connection=None):
output = sims4.commands.Output(_connection)
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
output("This doesn't appear to be a retail lot.")
return False
is_open = business_manager.is_open
output('Funds: {}'.format(business_manager.funds.money))
output('Curb Appeal: {}'.format(business_manager.get_curb_appeal()))
output('Employee Count: {}'.format(business_manager.employee_count))
output('Markup Multiplier: {}X'.format(business_manager.markup_multiplier))
output('Median Item Price: {}'.format(business_manager.get_median_item_value()))
output('The store is {}.'.format('OPEN' if is_open else 'CLOSED'))
if is_open:
output('Items Sold: {}'.format(business_manager.daily_items_sold))
output('Gross Income: {}'.format(business_manager.funds))
format_msg = '{sim:>24} {career_level:>32} {salary:>12} {desired_salary:>12}'
output(format_msg.format(sim='Sim', career_level='Career Level', salary='Current Salary', desired_salary='Desired Salary'))
for employee_sim in business_manager.get_employees_gen():
career_level = business_manager.get_employee_career_level(employee_sim)
desired_career_level = business_manager.RETAIL_CAREER.start_track.career_levels[business_manager.get_employee_desired_career_level(employee_sim)]
output(format_msg.format(sim=(employee_sim.full_name), career_level=(str(career_level.__name__)), salary=(career_level.simoleons_per_hour),
desired_salary=(desired_career_level.simoleons_per_hour)))
return True
@sims4.commands.Command('retail.show_summary_dialog', command_type=(CommandType.Live), pack=(Pack.EP01))
def show_retail_summary_dialog(_connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
return False
business_manager.show_summary_dialog()
return True
@sims4.commands.Command('retail.show_retail_dialog', command_type=(CommandType.Live), pack=(Pack.EP01))
def show_retail_dialog(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
return False
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None or business_manager.business_type != BusinessType.RETAIL:
return False
msg = Dialog_pb2.RetailManageEmployeesDialog()
msg.hiring_sim_id = sim_info.sim_id
def populate_employee_msg(sim_info, employee_msg):
employee_msg.sim_id = sim_info.sim_id
for skill_type in business_manager.EMPLOYEE_SKILLS:
with ProtocolBufferRollback(employee_msg.skill_data) as (employee_skill_msg):
employee_skill_msg.skill_id = skill_type.guid64
employee_skill_msg.curr_points = int(sim_info.get_stat_value(skill_type))
if business_manager.is_employee(sim_info):
satisfaction_stat = sim_info.get_statistic(business_manager.EMPLOYEE_SATISFACTION_COMMODITY)
statisfaction_state_index = satisfaction_stat.get_state_index()
if statisfaction_state_index is not None:
employee_msg.satisfaction_string = satisfaction_stat.states[statisfaction_state_index].buff.buff_type.buff_name(sim_info)
career_level = business_manager.get_employee_career_level(sim_info)
employee_msg.pay = career_level.simoleons_per_hour
career = business_manager.get_employee_career(sim_info)
employee_msg.current_career_level = career.level
employee_msg.max_career_level = len(career.current_track_tuning.career_levels) - 1
else:
desired_level = business_manager.get_employee_desired_career_level(sim_info)
career_level = business_manager.RETAIL_CAREER.start_track.career_levels[desired_level]
employee_msg.pay = career_level.simoleons_per_hour
employee_msg.current_career_level = desired_level
employee_msg.max_career_level = len(business_manager.RETAIL_CAREER.start_track.career_levels) - 1
for employee_sim_info in business_manager.get_employees_gen():
with ProtocolBufferRollback(msg.employees) as (employee_msg):
populate_employee_msg(employee_sim_info, employee_msg)
def get_sim_filter_gsi_name():
return 'Retail Command: Create Employees for Hire'
results = services.sim_filter_service().submit_matching_filter(number_of_sims_to_find=(business_manager.EMPLOYEE_POOL_SIZE), sim_filter=(business_manager.EMPLOYEE_POOL_FILTER),
requesting_sim_info=sim_info,
allow_yielding=False,
gsi_source_fn=get_sim_filter_gsi_name)
for result in results:
with ProtocolBufferRollback(msg.available_sims) as (employee_msg):
populate_employee_msg(result.sim_info, employee_msg)
op = shared_messages.create_message_op(msg, Consts_pb2.MSG_RETAIL_MANAGE_EMPLOYEES)
Distributor.instance().add_op_with_no_owner(op)
@sims4.commands.Command('retail.employee_hire', command_type=(CommandType.Live), pack=(Pack.EP01))
def hire_retail_employee(sim: RequiredTargetParam, _connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
return False
target_sim = sim.get_target(manager=(services.sim_info_manager()))
if target_sim is None:
return False
return business_manager.run_employee_interaction(business_manager.EMPLOYEE_INTERACTION_HIRE, target_sim)
@sims4.commands.Command('retail.employee_fire', command_type=(CommandType.Live), pack=(Pack.EP01))
def fire_retail_employee(sim: RequiredTargetParam, _connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
return False
target_sim = sim.get_target(manager=(services.sim_info_manager()))
if target_sim is None:
return False
return business_manager.run_employee_interaction(business_manager.EMPLOYEE_INTERACTION_FIRE, target_sim)
@sims4.commands.Command('retail.employee_promote', command_type=(CommandType.Live), pack=(Pack.EP01))
def promote_retail_employee(sim: RequiredTargetParam, _connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
return False
target_sim = sim.get_target(manager=(services.sim_info_manager()))
if target_sim is None:
return False
return business_manager.run_employee_interaction(business_manager.EMPLOYEE_INTERACTION_PROMOTE, target_sim)
@sims4.commands.Command('retail.employee_demote', command_type=(CommandType.Live), pack=(Pack.EP01))
def demote_retail_employee(sim: RequiredTargetParam, _connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
return False
target_sim = sim.get_target(manager=(services.sim_info_manager()))
if target_sim is None:
return False
return business_manager.run_employee_interaction(business_manager.EMPLOYEE_INTERACTION_DEMOTE, target_sim)
@sims4.commands.Command('retail.add_funds')
def add_retail_funds(amount: int=1000, _connection=None):
output = sims4.commands.Output(_connection)
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
output("This doesn't appear to be a retail lot.")
return False
business_manager.modify_funds(amount, from_item_sold=False)
@sims4.commands.Command('retail.sell_lot', command_type=(CommandType.Live))
def sell_retail_lot(_connection=None):
output = sims4.commands.Output(_connection)
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
output("Trying to sell a lot that isn't a retail lot.")
return False
current_zone = services.current_zone()
lot_value = current_zone.lot.furnished_lot_value
sell_value = max(0.0, business_manager._funds.money + lot_value)
dialog = business_manager.SELL_STORE_DIALOG(current_zone)
dialog.show_dialog(on_response=sell_retail_lot_response, additional_tokens=(
sell_value,))
def sell_retail_lot_response(dialog):
if not dialog.accepted:
return
business_service = services.business_service()
business_manager = business_service.get_business_manager_for_zone()
current_zone = services.current_zone()
lot = current_zone.lot
lot_value = lot.furnished_lot_value
business_manager.modify_funds(lot_value)
business_manager.transfer_balance_to_household()
zone_id = current_zone.id
services.get_zone_manager().clear_lot_ownership(zone_id)
business_service.remove_owner(zone_id, household_id=(business_manager.owner_household_id))
msg = InteractionOps_pb2.SellRetailLot()
msg.retail_zone_id = current_zone.id
distributor.system.Distributor.instance().add_event(Consts_pb2.MSG_SELL_RETAIL_LOT, msg)
@sims4.commands.Command('retail.toggle_for_sale_vfx', command_type=(CommandType.Live), pack=(Pack.EP01))
def toggle_for_sale_vfx(_connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
logger.error('Trying to toggle for sale VFX when not in a retail zone.', owner='tastle')
return
business_manager.toggle_for_sale_vfx()
@sims4.commands.Command('retail.show_balance_transfer_dialog', command_type=(CommandType.Live), pack=(Pack.EP01))
def show_retail_balance_transfer_dialog(_connection=None):
FundsTransferDialog.show_dialog()
@sims4.commands.Command('retail.transfer_funds', command_type=(CommandType.Live))
def transfer_retail_funds(amount: int, from_zone_id: int, to_zone_id: int, _connection=None):
output = sims4.commands.Output(_connection)
if amount < 1:
output('You can only transfer positive, non-zero amounts.')
return False
from_business_manager = services.business_service().get_business_manager_for_zone(zone_id=from_zone_id)
to_business_manager = services.business_service().get_business_manager_for_zone(zone_id=to_zone_id)
if from_business_manager is None:
if to_business_manager is None:
output('Invalid transfer request. Neither zone was a retail zone. At least one retail zone is required.')
return False
elif from_business_manager is None:
household = services.household_manager().get(to_business_manager.owner_household_id)
transfer_funds(amount, from_funds=(household.funds), to_funds=(to_business_manager.funds))
else:
if to_business_manager is None:
household = services.household_manager().get(from_business_manager.owner_household_id)
transfer_funds(amount, from_funds=(from_business_manager.funds), to_funds=(household.funds))
else:
transfer_funds(amount, from_funds=(from_business_manager.funds), to_funds=(to_business_manager.funds))
if from_business_manager is not None:
from_business_manager.send_business_funds_update()
if to_business_manager is not None:
to_business_manager.send_business_funds_update()
return True
@sims4.commands.Command('retail.get_owned_lot_count_message', command_type=(CommandType.Live), pack=(Pack.EP01))
def get_owned_retail_lot_count_message(_connection=None):
lot_count = 0
active_household = services.active_household()
if active_household is not None:
retail_tracker = services.business_service().get_business_tracker_for_household(active_household.id, BusinessType.RETAIL)
if retail_tracker is not None:
lot_count = len(retail_tracker.business_managers)
lot_count_msg = UI_pb2.OwnedRetailLotCountMessage()
lot_count_msg.owned_lot_count = lot_count
op = shared_messages.create_message_op(lot_count_msg, Consts_pb2.MSG_RETAIL_OWNED_LOT_COUNT)
Distributor.instance().add_op_with_no_owner(op)
@sims4.commands.Command('retail.get_retail_objects', command_type=(CommandType.Automation), pack=(Pack.EP01))
def get_retail_objects(_connection=None):
automation_output = sims4.commands.AutomationOutput(_connection)
automation_output('GetRetailObjects; Status:Begin')
for obj in RetailUtils.get_all_retail_objects():
automation_output('GetRetailObjects; Status:Data, ObjId:{}'.format(obj.id))
automation_output('GetRetailObjects; Status:End')
@sims4.commands.Command('retail.set_purchase_intents_to_almost_max', command_type=(CommandType.Automation), pack=(Pack.EP01))
def set_purchase_intents_to_almost_max(_connection=None):
stat_type = RetailCustomerSituation.PURCHASE_INTENT_STATISTIC
almost_max_value = stat_type.max_value - 1
for sim in services.sim_info_manager().instanced_sims_gen():
stat = sim.get_statistic(stat_type, add=False)
if stat is not None and stat.get_value() < almost_max_value:
stat.set_value(almost_max_value) | StarcoderdataPython |
276058 | from constants.processes import ptid_visualize_movement
from defs import *
from meta.process_base import Process
from meta.registry_exports import Exports
from providers.movement import passing_movement
from utilities import visuals, world
COLOR_WINE = "#6d213c"
COLOR_RAW_UMBER = "#946846"
COLOR_DARK_KHAKI = "#baab68"
COLOR_STRAW = "#e3c16f"
COLOR_MELLOW_YELLOW = "#faff70"
def draw_arrow(room_name: str,
origin_x: int,
origin_y: int,
destination_x: int,
destination_y: int,
color: str,
width: float) -> None:
end_x = origin_x + (destination_x - origin_x) * 0.95
end_y = origin_y + (destination_y - origin_y) * 0.95
visuals.draw_line(room_name, origin_x, origin_y, end_x, end_y, {
'opacity': 0.45,
'fill': color,
'width': width,
})
circle_x = origin_x + (destination_x - origin_x) * 0.9
circle_y = origin_y + (destination_y - origin_y) * 0.9
visuals.draw_circle(room_name, circle_x, circle_y, {
'opacity': 0.45,
'fill': color,
'radius': width * 1.5
})
class VisualizeMovement(Process):
ptid = ptid_visualize_movement
def run(self) -> None:
data = passing_movement.get_current_movements_per_room()
for room_name in Object.keys(data):
room_data = data[room_name]
if room_data is None:
continue
for origin_xy, (destination_xy, move_reason) in list(room_data.entries()):
origin_x, origin_y = world.int_to_xy(origin_xy)
destination_x, destination_y = world.int_to_xy(destination_xy)
draw_arrow(room_name, origin_x, origin_y, destination_x, destination_y, COLOR_STRAW, 0.1)
exports = Exports().process(VisualizeMovement)
| StarcoderdataPython |
3388741 | <filename>src/logger_setting/my_logger.py
# Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
import logging
import os
from src.setting.setting import my_logger_path
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d-%(funcName)s - %(message)s "
log_parent_path = os.path.join(my_logger_path, 'log_dir')
log_path = os.path.join(log_parent_path, 'example_file_name.log')
if not os.path.exists(log_parent_path):
os.mkdir(log_parent_path)
logging.basicConfig(filename=log_path,
filemode='a',
format=LOG_FORMAT,
level=logging.DEBUG)
def get_logger():
return logging.getLogger()
| StarcoderdataPython |
6441107 | <reponame>khanhnguyen21006/ViLT
import math
import torch
import torch.nn as nn
from torch.nn.modules.utils import _single
class ConvTBC(nn.Module):
"""1D convolution over an input of shape [seq_len, batch_size, in_channels].
The implementation uses GEMM to perform the convolution. This
implementation is faster than cuDNN for small kernel sizes. It is the same
as torch.nn.Conv1d, except it accepts the kernel size as the first
dimension (instead of the last) in the weight matrix. The kernel size is
the time dimension, the number of words in the window. The in_channels is
the input hidden size and the out_channels is the output hidden size.
"""
def __init__(self, in_channels, out_channels, kernel_size, dropout=0,
padding=0, weight_norm=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout = dropout
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(torch.Tensor(
self.kernel_size[0], in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
self.weight_norm = weight_norm
self.reset_parameters()
def reset_parameters(self):
# See A.3. in Gehring et al. (2017) for the justification of the
# constant 4: https://arxiv.org/pdf/1705.03122.
std = math.sqrt((4 * (1.0 - self.dropout)) /
(self.kernel_size[0] * self.in_channels))
self.weight.data.normal_(mean=0, std=std)
self.bias.data.fill_(0)
# Weight normalization is a reparameterization that decouples the
# magnitude of a weight tensor from its direction. The norm is computed
# independently per output channel (dim 2).
if self.weight_norm:
nn.utils.weight_norm(self, dim=2)
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
def __repr__(self):
s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
', padding={padding}')
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
class ConvBCT(nn.Conv1d):
"""1D convolution over an input of shape [batch_size, in_channels, seq_len].
A wrapper of Conv1d with Gehring initialization and weight normalization.
"""
def reset_parameters(self):
# See A.3. in Gehring et al. (2017) for the justification of the
# constant 4: https://arxiv.org/pdf/1705.03122.
n = self.in_channels
for k in self.kernel_size:
n *= k
std = math.sqrt(4 / n)
self.weight.data.normal_(mean=0, std=std)
self.bias.data.fill_(0)
# Weight normalization is a reparameterization that decouples the
# magnitude of a weight tensor from its direction. The norm is computed
# independently per output channel (dim 0).
nn.utils.weight_norm(self, dim=0)
| StarcoderdataPython |
3431136 | from snovault import (
collection,
calculated_property,
load_schema,
)
from .base import (
Item,
)
from snovault.attachment import ItemWithAttachment
@collection(
name='images',
unique_key='image:filename',
properties={
'title': 'Image',
'description': 'Listing of portal images',
})
class Image(ItemWithAttachment, Item):
item_type = 'image'
schema = load_schema('encoded:schemas/image.json')
schema['properties']['attachment']['properties']['type']['enum'] = [
'image/png',
'image/jpeg',
'image/gif',
]
embedded = ['submitted_by']
def unique_keys(self, properties):
keys = super(Image, self).unique_keys(properties)
value = properties['attachment']['download']
keys.setdefault('image:filename', []).append(value)
return keys
@calculated_property(schema={
"title": "Thumb Nail",
"description": "Image url",
"type": "string",
})
def thumb_nail(self, request, attachment):
return self.jsonld_id(request) + attachment['href']
@calculated_property(schema={
"title": "Download Url",
"description": "Download Url",
"type": "string",
})
def download_url(self, request, attachment):
return self.jsonld_id(request) + attachment['href']
| StarcoderdataPython |
1713776 | <filename>Python Fundamentals/Text Processing/Exercise/Task09.py
text = input()
used_text = ""
result = ""
convert = ""
symbols = ""
index = 0
while index < len(text):
convert = ""
letter = text[index]
if letter.isdigit():
if (index + 1) < len(text) and text[index + 1].isdigit():
convert = text[len(used_text):index]
used_text += convert + letter + text[index + 1]
number = int(text[index] + text[index + 1])
result += convert.upper() * number
index += 2
else:
convert = text[len(used_text):index]
used_text += convert + letter
result += convert.upper() * int(letter)
index += 1
else:
index += 1
for char in result:
if char not in symbols:
symbols += char
print(f"Unique symbols used: {len(symbols)}")
print(result)
| StarcoderdataPython |
11257554 | from distutils.core import setup
# see also http://docs.python.org/distutils/setupscript.html
import os
import sys
#import subprocess
import setup_conf
DEBUG = False
#DEBUG = True
# checks
#
if sys.version_info < (2 , 6):
sys.stderr.write("FATAL: sorry, Python versions"
" below 2.6 are not supported\n")
sys.exit(1)
# where modules reside:
#package_dir = {'': setup_conf.PACKAGE_NAME.lower()}
#package_dir = {'': ''}
setup(name = setup_conf.PACKAGE_NAME,
packages=[setup_conf.PACKAGE_NAME.lower()],
version = setup_conf.PACKAGE_VERSION,
description="Low frequency variant caller",
author="<NAME>",
author_email=setup_conf.PACKAGE_BUGREPORT,
long_description = """LoFreq-Star is a fast and sensitive variant-caller for inferring single-nucleotide variants (SNVs) from high-throughput sequencing data""",
# doesn't seem to work
# requires = ['pysam (>=0.7.5)', 'scipy (>=0.12.0)', 'numpy (>=1.7.1)', 'huddel'],
#url='https://sourceforge.net/p/lofreq/',
scripts = [
'scripts/lofreq2_vcfplot.py',
'scripts/lofreq2_indel_ovlp.py'
],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Environment :: Console',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords='bioinformatics'
)
| StarcoderdataPython |
6535507 | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2021 NV Access Limited, <NAME>
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import enum
class FillType(enum.IntEnum):
NONE = 0
COLOR = 1
GRADIENT = 2
PICTURE = 3
PATTERN = 4
FillTypeLabels = {
# Translators: a style of fill type (to color the inside of a control or text)
FillType.NONE: pgettext("UIAHandler.FillType", "none"),
# Translators: a style of fill type (to color the inside of a control or text)
FillType.COLOR: pgettext("UIAHandler.FillType", "color"),
# Translators: a style of fill type (to color the inside of a control or text)
FillType.GRADIENT: pgettext("UIAHandler.FillType", "gradient"),
# Translators: a style of fill type (to color the inside of a control or text)
FillType.PICTURE: pgettext("UIAHandler.FillType", "picture"),
# Translators: a style of fill type (to color the inside of a control or text)
FillType.PATTERN: pgettext("UIAHandler.FillType", "pattern"),
}
# Some newer UIA constants that could be missing
class UIAutomationType(enum.IntEnum):
INT = 1
BOOL = 2
STRING = 3
DOUBLE = 4
POINT = 5
RECT = 6
ELEMENT = 7
ARRAY = 8
OUT = 9
INT_ARRAY = 10
BOOL_ARRAY = 11
STRING_ARRAY = 12
DOUBLE_ARRAY = 13
POINT_ARRAY = 14
RECT_ARRAY = 15
ELEMENT_ARRAY = 16
OUT_INT = 17
OUT_BOOL = 18
OUT_STRING = 19
OUT_DOUBLE = 20
OUT_POINT = 21
OUT_RECT = 22
OUT_ELEMENT = 23
OUT_INT_ARRAY = 24
OUT_BOOL_ARRAY = 25
OUT_STRING_ARRAY = 26
OUT_DOUBLE_ARRAY = 27
OUT_POINT_ARRAY = 28
OUT_RECT_ARRAY = 29
OUT_ELEMENT_ARRAY = 30
class WinConsoleAPILevel(enum.IntEnum):
"""
Defines actively used Windows Console versions and the levels of custom code required
for each.
"""
# Represents a console before microsoft/terminal#4018 was merged.
# These consoles do not support UIA word navigation and require a number
# of text range workarounds.
END_INCLUSIVE = 0
# Represents a console with microsoft/terminal#4018, but without
# resolution of microsoft/terminal#2161 (text formatting)
# or microsoft/terminal#6986 (extraneous empty lines).
# This is a significant improvement over END_INCLUSIVE, so fewer workarounds
# are required. However, these consoles lack some information
# (such as text formatting) and require bounding, so are unsuitable for
# usage by default.
IMPROVED = 1
# Represents an IMPROVED console that exposes text formatting and a
# buffer that does not contain extraneous empty lines.
FORMATTED = 2
| StarcoderdataPython |
5037745 | find("1265075160887.png")
d = VDict()
d["1265075226698.png"] = "OK"
print d["1265075226698.png"][0]
| StarcoderdataPython |
9679714 | """
GCN model for relation extraction.
"""
import copy
import math
"""
GCN model for relation extraction.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.tree import Tree, head_to_tree, tree_to_adj
from utils import constant, torch_utils
class GCNClassifier(nn.Module):
""" A wrapper classifier for GCNRelationModel. """
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.gcn_model = GCNRelationModel(opt, emb_matrix=emb_matrix)
in_dim = opt['hidden_dim']
self.classifier = nn.Linear(in_dim, opt['num_class'])
self.opt = opt
def forward(self, inputs):
outputs, pooling_output = self.gcn_model(inputs)
logits = self.classifier(outputs)
return logits, pooling_output
class GCNRelationModel(nn.Module):
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.opt = opt
self.emb_matrix = emb_matrix
# create embedding layers
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim']) if opt['pos_dim'] > 0 else None
# self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim']) if opt['ner_dim'] > 0 else None
embeddings = (self.emb, self.pos_emb)
self.init_embeddings()
# gcn layer
self.gcn = AGGCN(opt, embeddings)
# mlp output layer
in_dim = opt['hidden_dim'] * 3
layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]
for _ in range(self.opt['mlp_layers'] - 1):
layers += [nn.Linear(opt['hidden_dim'], opt['hidden_dim']), nn.ReLU()]
self.out_mlp = nn.Sequential(*layers)
def init_embeddings(self):
if self.emb_matrix is None:
self.emb.weight.data[1:, :].uniform_(-1.0, 1.0)
else:
self.emb_matrix = torch.from_numpy(self.emb_matrix)
self.emb.weight.data.copy_(self.emb_matrix)
# decide finetuning
if self.opt['topn'] <= 0:
print("Do not finetune word embedding layer.")
self.emb.weight.requires_grad = False
elif self.opt['topn'] < self.opt['vocab_size']:
print("Finetune top {} word embeddings.".format(self.opt['topn']))
self.emb.weight.register_hook(lambda x: torch_utils.keep_partial_grad(x, self.opt['topn']))
else:
print("Finetune all embeddings.")
def forward(self, inputs):
words, masks, pos, deprel, head, subj_pos, obj_pos = inputs # unpack
l = (masks.data.cpu().numpy() == 0).astype(np.int64).sum(1)
maxlen = max(l)
def inputs_to_tree_reps(head, l):
trees = [head_to_tree(head[i], l[i]) for i in range(len(l))]
adj = [tree_to_adj(maxlen, tree, directed=False).reshape(1, maxlen, maxlen) for tree in trees]
adj = np.concatenate(adj, axis=0)
adj = torch.from_numpy(adj)
return Variable(adj.cuda()) if self.opt['cuda'] else Variable(adj)
adj = inputs_to_tree_reps(head.data, l)
h, pool_mask = self.gcn(adj, inputs)
# pooling
subj_mask, obj_mask = subj_pos.eq(0).eq(0).unsqueeze(2), obj_pos.eq(0).eq(0).unsqueeze(2) # invert mask
pool_type = self.opt['pooling']
h_out = pool(h, pool_mask, type=pool_type)
subj_out = pool(h, subj_mask, type="max")
obj_out = pool(h, obj_mask, type="max")
outputs = torch.cat([h_out, subj_out, obj_out], dim=1)
outputs = self.out_mlp(outputs)
return outputs, h_out
class AGGCN(nn.Module):
def __init__(self, opt, embeddings):
super().__init__()
self.opt = opt
self.in_dim = opt['emb_dim'] + opt['pos_dim']
self.emb, self.pos_emb = embeddings
self.use_cuda = opt['cuda']
self.mem_dim = opt['hidden_dim']
# rnn layer
if self.opt.get('rnn', False):
self.input_W_R = nn.Linear(self.in_dim, opt['rnn_hidden'])
self.rnn = nn.LSTM(opt['rnn_hidden'], opt['rnn_hidden'], opt['rnn_layers'], batch_first=True, \
dropout=opt['rnn_dropout'], bidirectional=True)
self.in_dim = opt['rnn_hidden'] * 2
self.rnn_drop = nn.Dropout(opt['rnn_dropout']) # use on last layer output
self.input_W_G = nn.Linear(self.in_dim, self.mem_dim)
self.in_drop = nn.Dropout(opt['input_dropout'])
self.num_layers = opt['num_layers']
self.layers = nn.ModuleList()
self.heads = opt['heads']
self.sublayer_first = opt['sublayer_first']
self.sublayer_second = opt['sublayer_second']
# gcn layer
for i in range(self.num_layers):
if i == 0:
self.layers.append(GraphConvLayer(opt, self.mem_dim, self.sublayer_first))
self.layers.append(GraphConvLayer(opt, self.mem_dim, self.sublayer_second))
else:
self.layers.append(MultiGraphConvLayer(opt, self.mem_dim, self.sublayer_first, self.heads))
self.layers.append(MultiGraphConvLayer(opt, self.mem_dim, self.sublayer_second, self.heads))
self.aggregate_W = nn.Linear(len(self.layers) * self.mem_dim, self.mem_dim)
self.attn = MultiHeadAttention(self.heads, self.mem_dim)
def encode_with_rnn(self, rnn_inputs, masks, batch_size):
seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())
h0, c0 = rnn_zero_state(batch_size, self.opt['rnn_hidden'], self.opt['rnn_layers'])
rnn_inputs = nn.utils.rnn.pack_padded_sequence(rnn_inputs, seq_lens, batch_first=True)
rnn_outputs, (ht, ct) = self.rnn(rnn_inputs, (h0, c0))
rnn_outputs, _ = nn.utils.rnn.pad_packed_sequence(rnn_outputs, batch_first=True)
return rnn_outputs
def forward(self, adj, inputs):
words, masks, pos, deprel, head, subj_pos, obj_pos = inputs # unpack
src_mask = (words != constant.PAD_ID).unsqueeze(-2)
word_embs = self.emb(words)
embs = [word_embs]
if self.opt['pos_dim'] > 0:
embs += [self.pos_emb(pos)]
embs = torch.cat(embs, dim=2)
embs = self.in_drop(embs)
if self.opt.get('rnn', False):
embs = self.input_W_R(embs)
gcn_inputs = self.rnn_drop(self.encode_with_rnn(embs, masks, words.size()[0]))
else:
gcn_inputs = embs
gcn_inputs = self.input_W_G(gcn_inputs)
layer_list = []
outputs = gcn_inputs
for i in range(len(self.layers)):
if i < 2:
outputs = self.layers[i](adj, outputs)
layer_list.append(outputs)
else:
attn_tensor = self.attn(outputs, outputs, src_mask)
attn_adj_list = [attn_adj.squeeze(1) for attn_adj in torch.split(attn_tensor, 1, dim=1)]
outputs = self.layers[i](attn_adj_list, outputs)
layer_list.append(outputs)
aggregate_out = torch.cat(layer_list, dim=2)
dcgcn_output = self.aggregate_W(aggregate_out)
mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2)
return dcgcn_output, mask
class GraphConvLayer(nn.Module):
""" A GCN module operated on dependency graphs. """
def __init__(self, opt, mem_dim, layers):
super(GraphConvLayer, self).__init__()
self.opt = opt
self.mem_dim = mem_dim
self.layers = layers
self.head_dim = self.mem_dim // self.layers
self.gcn_drop = nn.Dropout(opt['gcn_dropout'])
# dcgcn layer
self.Linear = nn.Linear(self.mem_dim, self.mem_dim)
self.weight_list = nn.ModuleList()
for i in range(self.layers):
self.weight_list.append(nn.Linear((self.mem_dim + self.head_dim * i), self.head_dim))
self.weight_list = self.weight_list.cuda()
self.Linear = self.Linear.cuda()
def forward(self, adj, gcn_inputs):
# gcn layer
denom = adj.sum(2).unsqueeze(2) + 1
outputs = gcn_inputs
cache_list = [outputs]
output_list = []
for l in range(self.layers):
Ax = adj.bmm(outputs)
AxW = self.weight_list[l](Ax)
AxW = AxW + self.weight_list[l](outputs) # self loop
AxW = AxW / denom
gAxW = F.relu(AxW)
cache_list.append(gAxW)
outputs = torch.cat(cache_list, dim=2)
output_list.append(self.gcn_drop(gAxW))
gcn_ouputs = torch.cat(output_list, dim=2)
gcn_ouputs = gcn_ouputs + gcn_inputs
out = self.Linear(gcn_ouputs)
return out
class MultiGraphConvLayer(nn.Module):
""" A GCN module operated on dependency graphs. """
def __init__(self, opt, mem_dim, layers, heads):
super(MultiGraphConvLayer, self).__init__()
self.opt = opt
self.mem_dim = mem_dim
self.layers = layers
self.head_dim = self.mem_dim // self.layers
self.heads = heads
self.gcn_drop = nn.Dropout(opt['gcn_dropout'])
# dcgcn layer
self.Linear = nn.Linear(self.mem_dim * self.heads, self.mem_dim)
self.weight_list = nn.ModuleList()
for i in range(self.heads):
for j in range(self.layers):
self.weight_list.append(nn.Linear(self.mem_dim + self.head_dim * j, self.head_dim))
self.weight_list = self.weight_list.cuda()
self.Linear = self.Linear.cuda()
def forward(self, adj_list, gcn_inputs):
multi_head_list = []
for i in range(self.heads):
adj = adj_list[i]
denom = adj.sum(2).unsqueeze(2) + 1
outputs = gcn_inputs
cache_list = [outputs]
output_list = []
for l in range(self.layers):
index = i * self.layers + l
Ax = adj.bmm(outputs)
AxW = self.weight_list[index](Ax)
AxW = AxW + self.weight_list[index](outputs) # self loop
AxW = AxW / denom
gAxW = F.relu(AxW)
cache_list.append(gAxW)
outputs = torch.cat(cache_list, dim=2)
output_list.append(self.gcn_drop(gAxW))
gcn_ouputs = torch.cat(output_list, dim=2)
gcn_ouputs = gcn_ouputs + gcn_inputs
multi_head_list.append(gcn_ouputs)
final_output = torch.cat(multi_head_list, dim=2)
out = self.Linear(final_output)
return out
def pool(h, mask, type='max'):
if type == 'max':
h = h.masked_fill(mask, -constant.INFINITY_NUMBER)
return torch.max(h, 1)[0]
elif type == 'avg':
h = h.masked_fill(mask, 0)
return h.sum(1) / (mask.size(1) - mask.float().sum(1))
else:
h = h.masked_fill(mask, 0)
return h.sum(1)
def rnn_zero_state(batch_size, hidden_dim, num_layers, bidirectional=True):
total_layers = num_layers * 2 if bidirectional else num_layers
state_shape = (total_layers, batch_size, hidden_dim)
h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)
return h0.cuda(), c0.cuda()
def batched_index_select(input, dim, index):
views = [input.shape[0]] + \
[1 if i != dim else -1 for i in range(1, len(input.shape))]
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def attention(query, key, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return p_attn
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MultiHeadAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 2)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
query, key = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key))]
# query = query.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
# key = key.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
attn = attention(query, key, mask=mask, dropout=self.dropout)
return attn
| StarcoderdataPython |
180163 | <filename>{{cookiecutter.project_slug}}/{{cookiecutter.app_name}}/forms.py
from django import forms
# https://docs.djangoproject.com/en/1.10/topics/forms/
# https://docs.djangoproject.com/en/1.10/topics/forms/modelforms/ | StarcoderdataPython |
3452474 | from pathlib import Path
ROJECT_ROOT_DIR = Path.cwd().joinpath("Results")
CHECKPOINTS_DIR = ROJECT_ROOT_DIR.joinpath("Checkpoints")
FIGURE_DIR = ROJECT_ROOT_DIR.joinpath("FigureFiles")
DATA_DIR = Path.cwd().joinpath("DataFiles")
TERRAIN_PATH = DATA_DIR.joinpath("SRTM_data_Norway_2.tif")
CONFIGS_PATH = ROJECT_ROOT_DIR.joinpath("Configs")
SEED = 2018
SAVE_FIG = False | StarcoderdataPython |
125938 |
import collections
from .helpers import makeInverse, makeInverseVal
class EdgeFeatures(object):
pass
class EdgeFeature(object):
def __init__(self, api, metaData, data, doValues):
self.api = api
self.meta = metaData
self.doValues = doValues
if type(data) is tuple:
self.data = data[0]
self.dataInv = data[1]
else:
self.data = data
self.dataInv = (
makeInverseVal(self.data) if doValues else makeInverse(self.data)
)
def items(self):
return self.data.items()
def f(self, n):
if n not in self.data:
return ()
Crank = self.api.C.rank.data
if self.doValues:
return tuple(sorted(self.data[n].items(), key=lambda mv: Crank[mv[0] - 1]))
else:
return tuple(sorted(self.data[n], key=lambda m: Crank[m - 1]))
def t(self, n):
if n not in self.dataInv:
return ()
Crank = self.api.C.rank.data
if self.doValues:
return tuple(
sorted(self.dataInv[n].items(), key=lambda mv: Crank[mv[0] - 1])
)
else:
return tuple(sorted(self.dataInv[n], key=lambda m: Crank[m - 1]))
def b(self, n):
if n not in self.data and n not in self.dataInv:
return ()
Crank = self.api.C.rank.data
if self.doValues:
result = {}
if n in self.dataInv:
result.update(self.dataInv[n].items())
if n in self.data:
result.update(self.data[n].items())
return tuple(sorted(result.items(), key=lambda mv: Crank[mv[0] - 1]))
else:
result = set()
if n in self.dataInv:
result |= self.dataInv[n]
if n in self.data:
result |= self.data[n]
return tuple(sorted(result, key=lambda m: Crank[m - 1]))
def freqList(self, nodeTypesFrom=None, nodeTypesTo=None):
if nodeTypesFrom is None and nodeTypesTo is None:
if self.doValues:
fql = collections.Counter()
for (n, vals) in self.data.items():
for val in vals.values():
fql[val] += 1
return tuple(sorted(fql.items(), key=lambda x: (-x[1], x[0])))
else:
fql = 0
for (n, ms) in self.data.items():
fql += len(ms)
return fql
else:
fOtype = self.api.F.otype.v
if self.doValues:
fql = collections.Counter()
for (n, vals) in self.data.items():
if nodeTypesFrom is None or fOtype(n) in nodeTypesFrom:
for (m, val) in vals.items():
if nodeTypesTo is None or fOtype(m) in nodeTypesTo:
fql[val] += 1
return tuple(sorted(fql.items(), key=lambda x: (-x[1], x[0])))
else:
fql = 0
for (n, ms) in self.data.items():
if nodeTypesFrom is None or fOtype(n) in nodeTypesFrom:
for m in ms:
if nodeTypesTo is None or fOtype(m) in nodeTypesTo:
fql += len(ms)
return fql
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.