id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1848915 | """
Classes for Repo providers
Subclass the base class, ``RepoProvider``, to support different version
control services and providers.
"""
from datetime import timedelta
import json
import os
import time
from prometheus_client import Gauge
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.httputil import url_concat
from traitlets import Dict, Unicode, default
from traitlets.config import LoggingConfigurable
GITHUB_RATE_LIMIT = Gauge('binderhub_github_rate_limit_remaining', 'GitHub rate limit remaining')
def tokenize_spec(spec):
"""Tokenize a GitHub-style spec into parts, error if spec invalid."""
spec_parts = spec.split('/', 2) # allow ref to contain "/"
if len(spec_parts) != 3:
msg = 'Spec is not of the form "user/repo/ref", provided: "{spec}".'.format(spec=spec)
if len(spec_parts) == 2 and spec_parts[-1] != 'master':
msg += ' Did you mean "{spec}/master"?'.format(spec=spec)
raise ValueError(msg)
return spec_parts
def strip_suffix(text, suffix):
if text.endswith(suffix):
text = text[:-(len(suffix))]
return text
class RepoProvider(LoggingConfigurable):
"""Base class for a repo provider"""
name = Unicode(
help="""
Descriptive human readable name of this repo provider.
"""
)
spec = Unicode(
help="""
The spec for this builder to parse
"""
)
unresolved_ref = Unicode()
@gen.coroutine
def get_resolved_ref(self):
raise NotImplementedError("Must be overridden in child class")
def get_repo_url(self):
raise NotImplementedError("Must be overridden in the child class")
def get_build_slug(self):
raise NotImplementedError("Must be overriden in the child class")
class FakeProvider(RepoProvider):
"""Fake provider for local testing of the UI
"""
async def get_resolved_ref(self):
return "1a2b3c4d5e6f"
def get_repo_url(self):
return "fake/repo"
def get_build_slug(self):
return '{user}-{repo}'.format(user='Rick', repo='Morty')
class GitHubRepoProvider(RepoProvider):
"""Repo provider for the GitHub service"""
name = Unicode('GitHub')
client_id = Unicode(config=True,
help="""GitHub client id for authentication with the GitHub API
For use with client_secret.
Loaded from GITHUB_CLIENT_ID env by default.
"""
)
@default('client_id')
def _client_id_default(self):
return os.getenv('GITHUB_CLIENT_ID', '')
client_secret = Unicode(config=True,
help="""GitHub client secret for authentication with the GitHub API
For use with client_id.
Loaded from GITHUB_CLIENT_SECRET env by default.
"""
)
@default('client_secret')
def _client_secret_default(self):
return os.getenv('GITHUB_CLIENT_SECRET', '')
access_token = Unicode(config=True,
help="""GitHub access token for authentication with the GitHub API
Loaded from GITHUB_ACCESS_TOKEN env by default.
"""
)
@default('access_token')
def _access_token_default(self):
return os.getenv('GITHUB_ACCESS_TOKEN', '')
auth = Dict(
help="""Auth parameters for the GitHub API access
Populated from client_id, client_secret, access_token.
"""
)
@default('auth')
def _default_auth(self):
auth = {}
for key in ('client_id', 'client_secret', 'access_token'):
value = getattr(self, key)
if value:
auth[key] = value
return auth
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user, self.repo, self.unresolved_ref = tokenize_spec(self.spec)
self.repo = strip_suffix(self.repo, ".git")
def get_repo_url(self):
return "https://github.com/{user}/{repo}".format(user=self.user, repo=self.repo)
@gen.coroutine
def get_resolved_ref(self):
if hasattr(self, 'resolved_ref'):
return self.resolved_ref
client = AsyncHTTPClient()
api_url = "https://api.github.com/repos/{user}/{repo}/commits/{ref}".format(
user=self.user, repo=self.repo, ref=self.unresolved_ref
)
self.log.debug("Fetching %s", api_url)
if self.auth:
# Add auth params. After logging!
api_url = url_concat(api_url, self.auth)
try:
resp = yield client.fetch(api_url, user_agent="BinderHub")
except HTTPError as e:
if (
e.code == 403
and e.response
and e.response.headers.get('x-ratelimit-remaining') == '0'
):
rate_limit = e.response.headers['x-ratelimit-limit']
reset_timestamp = int(e.response.headers['x-ratelimit-reset'])
reset_seconds = int(reset_timestamp - time.time())
self.log.error(
"GitHub Rate limit ({limit}) exceeded. Reset in {delta}.".format(
limit=rate_limit,
delta=timedelta(seconds=reset_seconds),
)
)
# round expiry up to nearest 5 minutes
minutes_until_reset = 5 * (1 + (reset_seconds // 60 // 5))
raise ValueError("GitHub rate limit exceeded. Try again in %i minutes."
% minutes_until_reset
)
elif e.code == 404:
return None
else:
raise
# record and log github rate limit
remaining = int(resp.headers['x-ratelimit-remaining'])
rate_limit = int(resp.headers['x-ratelimit-limit'])
reset_timestamp = int(resp.headers['x-ratelimit-reset'])
# record with prometheus
GITHUB_RATE_LIMIT.set(remaining)
# log at different levels, depending on remaining fraction
fraction = remaining / rate_limit
if fraction < 0.2:
log = self.log.warning
elif fraction < 0.5:
log = self.log.info
else:
log = self.log.debug
# str(timedelta) looks like '00:32'
delta = timedelta(seconds=int(reset_timestamp - time.time()))
log("GitHub rate limit remaining {remaining}/{limit}. Reset in {delta}.".format(
remaining=remaining, limit=rate_limit, delta=delta,
))
ref_info = json.loads(resp.body.decode('utf-8'))
if 'sha' not in ref_info:
# TODO: Figure out if we should raise an exception instead?
return None
self.resolved_ref = ref_info['sha']
return self.resolved_ref
def get_build_slug(self):
return '{user}-{repo}'.format(user=self.user, repo=self.repo)
| StarcoderdataPython |
1731652 | <reponame>JosephChataignon/pyclustering
"""!
@brief Templates for tests of Local Excitatory Global Inhibitory Oscillatory Network (LEGION).
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.nnet.legion import legion_network;
from pyclustering.nnet import conn_type;
from pyclustering.utils import extract_number_oscillations;
class LegionTestTemplates:
@staticmethod
def templateOscillationsWithStructures(type_conn, ccore_flag):
net = legion_network(4, type_conn = conn_type.LIST_BIDIR, ccore = ccore_flag);
dynamic = net.simulate(500, 1000, [1, 1, 1, 1]);
for i in range(len(net)):
assert extract_number_oscillations(dynamic.output, i) > 1;
@staticmethod
def templateSyncEnsembleAllocation(stimulus, params, type_conn, sim_steps, sim_time, expected_clusters, ccore_flag):
result_testing = False;
for _ in range(0, 5, 1):
net = legion_network(len(stimulus), params, type_conn, ccore = ccore_flag);
dynamic = net.simulate(sim_steps, sim_time, stimulus);
ensembles = dynamic.allocate_sync_ensembles(0.1);
if (ensembles != expected_clusters):
continue;
result_testing = True;
break;
assert result_testing;
@staticmethod
def templateOutputDynamicInformation(stimulus, params, type_conn, sim_steps, sim_time, ccore_flag):
legion_instance = legion_network(len(stimulus), params, type_conn, ccore = ccore_flag);
dynamic = legion_instance.simulate(sim_steps, sim_time, stimulus);
assert len(dynamic.output) > 0;
assert len(dynamic.inhibitor) > 0;
assert len(dynamic.time) > 0; | StarcoderdataPython |
6409494 | <reponame>UACoreFacilitiesIT/UA-Clarity-LIMS-Tools
"""Tools that interact with Clarity's REST database."""
import os
import re
import argparse
from dataclasses import dataclass, field, astuple
from collections import namedtuple
import requests
from bs4 import BeautifulSoup, Tag
from jinja2 import Template
from ua_clarity_api import ua_clarity_api
__author__ = (
"<NAME>, <NAME>, <NAME>,",
"<NAME>, <NAME>")
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class ClarityExceptions:
"""Holds custom Clarity Exceptions."""
class TechnicianError(Exception):
"""A Clarity user technician has made a mistake."""
class EPPError(Exception):
"""The EPP script provided is not correct."""
class CallError(Exception):
"""This method call wasn't well-formed."""
PreviousStepArtifact = namedtuple(
"PreviousStepArtifact", ["uri", "art_type", "generation_type"])
@dataclass
class Sample:
"""Stores the fields of a Sample."""
name: str = ""
uri: str = None
date_received: str = None
project_uri: str = None
project_name: str = None
artifact_uri: str = None
udf: dict = field(default_factory=dict)
@dataclass
class Artifact:
"""Stores the fields of an Artifact."""
name: str = None
uri: str = None
art_type: str = None
sample_uri: str = None
container_uri: str = None
container_name: str = None
container_type: str = None
location: str = None
parent_process: str = None
reagent_label: str = None
udf: dict = field(default_factory=dict)
@dataclass
class Process:
"""Stores the fields of a Process."""
uri: str = None
technician: str = "None"
udf: dict = field(default_factory=dict)
class ClarityTools():
"""Tools that interact with Clarity without a step. These tools are general
use functions for when caller is not attached to a step and knows the
endpoints they want to perform work on. These methods are not limited by
the requirement to have a step uri.
"""
def __init__(self, host, username, password):
"""Initializes a ClarityAPI object for use within method calls.
username and password should be strings representing your creds in the
clarity environment.
host should be a string representing the url of your clarity api
endpoint.
"""
self.api = ua_clarity_api.ClarityApi(host, username, password)
def get_samples(self, uris, prj_info=True):
"""Returns a list of Sample data classes with data populated from the
get responses of given clarity sample URIs.
Arguments:
uris (list): List of Sample URIs harvested from the clarity env.
Returns:
samples (list): Returns a list of Sample data classes.
"""
samples = list()
samples_soup = BeautifulSoup(self.api.get(uris), "xml")
project_uris = set()
for sample_data in samples_soup.find_all("smp:sample"):
sample = Sample()
sample.name = sample_data.find("name").text
sample.uri = sample_data["uri"]
sample.date_received = sample_data.find("date-received").text
# Find the project uri if the sample is not a control sample.
if prj_info:
if sample_data.find("control-type"):
sample.project_uri = None
sample.project_name = None
else:
project = sample_data.find("project")
sample.project_uri = project["uri"]
project_uris.add(project["uri"])
# Find 0th-artifact tag and extract data.
artifact = sample_data.find("artifact")
sample.artifact_uri = artifact["uri"].split('?')[0]
# Extract all UDF names and values.
for udf_data in sample_data.find_all("udf:field"):
sample.udf[udf_data["name"]] = udf_data.text
samples.append(sample)
# Map the projects to their names.
if prj_info:
projects_soup = BeautifulSoup(
self.api.get(list(project_uris)), "xml")
project_uri_name = dict()
for soup in projects_soup.find_all("prj:project"):
project_uri_name[soup["uri"]] = soup.find("name").text.strip()
# Assign project names to each sample.
for sample in samples:
sample.project_name = project_uri_name.get(sample.project_uri)
return samples
def get_arts_from_samples(self, sample_uris):
"""Map sample uris to their respective artifact uris from clarity.
Arguments:
sample_uris (list): A list of sample uris. All sample uris given
must have at least one artifact uri in clarity.
Returns:
smp_art_uris (dict): The sample uri mapped to the artifact uri.
"""
batch_soup = BeautifulSoup(self.api.get(sample_uris), "xml")
smp_art_uris = dict()
for sample_soup in batch_soup.find_all("smp:sample"):
smp_art_uris[sample_soup["uri"]] = sample_soup.find(
"artifact")["uri"].split('?')[0]
return smp_art_uris
def get_udfs(self, target):
"""Find all of the udfs with attach-to-name: target attributes.
Arguments:
target (str): A string representation of what attach-to-name
attributes to harvest.
Returns:
target_udfs (list): A list of all udf names for specified target.
Raises:
ClarityExceptions.CallError: If there are no target udfs found.
"""
udfs = self.api.get(
"configuration/udfs", parameters={"attach-to-name": target})
udf_soup = BeautifulSoup(udfs, "xml")
target_udfs = [tag["name"] for tag in udf_soup.find_all("udfconfig")]
if not target_udfs:
raise ClarityExceptions.CallError(
f"There are no UDFs for {target}. Either that target"
f" doesn't exist, or you forgot that this argument is"
f" case sensitive.")
return target_udfs
def set_reagent_label(self, limsid_label):
"""Set reagent-label of all artifact limsid keys to their mapped value.
Arguments:
limsid_label (dict {str: str}): maps limsid's to
reagent-label information. If a value is Falsey, then all
labels will be removed.
Side Effects:
If successful, this method will add a reagent-label to each
artifact.
Overwrites the original reagent-label if it existed.
Raises:
RuntimeError: If there was an exception raised while POSTing.
"""
art_uris = [f"artifacts/{key}" for key in limsid_label.keys()]
art_soup = BeautifulSoup(self.api.get(art_uris), "xml")
for art in art_soup.find_all("art:artifact"):
art_limsid = art["limsid"]
reagent_label = limsid_label.get(art_limsid)
if reagent_label:
label_tag = f'<reagent-label name="{reagent_label}"/>'
label_tag = BeautifulSoup(label_tag, "xml")
art.find("sample").insert_after(label_tag)
else:
[tag.decompose() for tag in art.find_all("reagent-label")]
# Use Jinja to create the batch update xml.
template_path = (os.path.join(
os.path.split(__file__)[0], "batch_artifact_update_template.xml"))
with open(template_path, "r") as file:
template = Template(file.read())
update_xml = template.render(artifacts=[
str(tag) for tag in art_soup.find_all("art:artifact")])
self.api.post(f"{self.api.host}artifacts/batch/update", update_xml)
def step_router(self, wf_name, dest_stage_name, art_uris, action="assign"):
"""Assign/unassign artifacts from current step to a destination step.
Assigning will move the artifacts to the given destination step.
Unassigning will remove the artifact from the step/queue, but does
not remove the artifact from the clarity environment
Arguments:
wf_name (string): The workflow name in which the destination
step is.
dest_stage_name (string): The step name that is the destination
for the artifacts.
art_uris (list): The list of artifact_uris to route to the
destination step.
action (string): Either 'assign' or 'unassign', determining which
action to perform.
Side Effects:
If successful, assigns or unassigns the artifacts to the
destination step in Clarity.
Raises:
ClarityExceptions.CallError: If that workflow or stage isn't found.
RuntimeError: If there was an exception raised while POSTing.
RuntimeError: If for some other, unknown reason the artifact was
not routed.
"""
# Remove the ?state information from the artifacts.
artifact_uris = [uri.split('?')[0] for uri in art_uris]
# Extract all of the workflow names from Clarity.
workflows_url = f"{self.api.host}configuration/workflows"
workflow_cnf_response = (self.api.get(
workflows_url, parameters={"name": wf_name}))
workflow_cnf_soup = BeautifulSoup(workflow_cnf_response, "xml")
workflow_cnf_soup = workflow_cnf_soup.find("workflow")
# If the workflow passed in doesn't exist or isn't active.
if not workflow_cnf_soup:
raise ClarityExceptions.CallError(
f"The workflow {wf_name} doesn't exist.")
else:
if not workflow_cnf_soup["status"] == "ACTIVE":
raise ClarityExceptions.CallError(
f"The worklow {wf_name} is not active.")
# Find all of the stage names.
workflow_soup = BeautifulSoup(
self.api.get(workflow_cnf_soup["uri"]), "xml")
wf_stages = workflow_soup.find_all("stage")
stage_names = [stage["name"] for stage in wf_stages]
# If that stage name isn't in that workflow, throw an error.
if dest_stage_name not in stage_names:
raise ClarityExceptions.CallError(
f"There is no {dest_stage_name} stage(step) in the {wf_name}"
f" format.")
stage_uri = workflow_soup.find(
"stage", attrs={"name": dest_stage_name})["uri"]
stage_soup = BeautifulSoup(self.api.get(stage_uri), "xml")
# Find the step uri which will provide the location of the queue.
try:
step_uri = stage_soup.find("step")["uri"].split('/')[-1]
qc_step = False
except TypeError:
qc_step = True
# Build and submit the routing message.
routing_template_path = os.path.join(
os.path.split(__file__)[0],
"routing_template.xml")
with open(routing_template_path, "r") as file:
template = Template(file.read())
routing_xml = template.render(
stage_uri=stage_uri,
artifact_uris=artifact_uris,
action=action)
try:
self.api.post(f"{self.api.host}route/artifacts", routing_xml)
except requests.exceptions.HTTPError:
raise RuntimeError(f"The post for \n\n{routing_xml}\n\n failed.")
# Check the queue for this stagename to make sure the artifacts were
# actually added to it if the queue is easily accessible (not a qc
# protocol step, as those all have different queues).
if qc_step is False and action == "assign":
queue_uri = f"{self.api.host}queues/{step_uri}"
queue_soup = BeautifulSoup(self.api.get(queue_uri), "xml")
queue_art_uris = [
soup["uri"] for soup in queue_soup.find_all("artifact")]
for uri in artifact_uris:
file_uri = uri.split('/')[-1].startswith("92-")
if uri not in queue_art_uris and not file_uri:
raise RuntimeError(f"The artifact: {uri} was not queued.")
class StepTools():
"""Defines step specific methods which act upon a given step uri in
Clarity. This class can be instantiated directly or from a Clarity EPP
script.
"""
def __init__(self, username=None, password=<PASSWORD>, step_uri=None):
"""Initialize LimsTools with information to access step details.
username and password should be strings representing your creds in the
clarity environment.
step_uri should be a string representing the step endpoint in your
clarity environment that you wish to perform work on.
"""
if username and password and step_uri:
UserData = namedtuple(
"UserData", ["username", "password", "step_uri"])
self.args = UserData(username, password, step_uri)
else:
self.args = self.setup_arguments()
self.host = re.sub("v2/.*", "v2/", self.args.step_uri)
self.api = ua_clarity_api.ClarityApi(
self.host, self.args.username, self.args.password)
self.step_details = f"{self.args.step_uri}/details"
self.step_soup = BeautifulSoup(self.api.get(self.step_details), "xml")
def setup_arguments(self):
"""Incorporate EPP arguments into your StepTools object.
Returns:
(arguments): The object that holds all of the arguments that
were parsed (at object.{dest}).
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-u", dest="username", required=True)
parser.add_argument(
"-p", dest="password", required=True)
parser.add_argument(
"-s", dest="step_uri", required=True)
parser.add_argument(
"-r", dest="input_files", nargs='+')
parser.add_argument(
"-o", dest="output_files", nargs='+')
parser.add_argument(
"--log", dest="log")
parser.add_argument(
"nargs", nargs=argparse.REMAINDER)
return parser.parse_args()
def get_artifacts(self, stream, uri_only=False, container_info=False):
"""Return the artifact information as a list of Artifact data classes.
Arguments:
stream (str): The source of the samples, either "input" or
"output".
Returns:
artifacts (list): Returns a list of Artifact data classes.
Notes:
Does not include 'PerAllInputs' shared output files.
"""
art_uris = list()
# Get URI for target artifacts.
for iomap in self.step_soup.find_all("input-output-map"):
target = iomap.find(stream)
# If there are no {stream}s, skip this iomap soup.
if target is None:
continue
# Only add perInput output uri's.
if stream == "output":
if target["output-generation-type"] == "PerInput":
art_uris.append(target["uri"])
# Add input uri's.
else:
art_uris.append(target["uri"])
if art_uris:
batch_artifacts = BeautifulSoup(self.api.get(art_uris), "xml")
else:
return art_uris
# Store all artifact data.
artifacts = list()
con_uris = set()
for artifact_data in batch_artifacts.find_all("artifact"):
artifact = Artifact()
artifact.name = artifact_data.find("name").text
artifact.uri = artifact_data["uri"].split("?")[0]
artifact.art_type = artifact_data.find("type").text
artifact.sample_uri = artifact_data.find("sample")["uri"]
reagent_label = artifact_data.find("reagent-label")
if reagent_label:
artifact.reagent_label = reagent_label["name"]
# If the artifact has no location or container, set as None.
artifact.container_uri = artifact_data.find("container")
artifact.location = artifact_data.find("location")
if artifact.location:
artifact.location = artifact_data.location.find("value").text
if artifact.container_uri:
con_uris.add(artifact.container_uri["uri"])
artifact.container_uri = artifact.container_uri["uri"]
# Find Parent Process.
parent_process = artifact_data.find("parent-process")
if parent_process:
parent_process = parent_process["uri"]
# Construct UDF Map.
for udf_data in artifact_data.find_all("udf:field"):
artifact.udf[udf_data["name"]] = udf_data.text
# Add link only.
if uri_only:
artifacts.append(artifact.uri)
# Store all artifact data.
else:
artifacts.append(artifact)
# Setting the Artifact's con info if desired, by using a batch get.
ConInfo = namedtuple("ConInfo", ["name", "con_type"])
con_uri_info = dict()
if not uri_only and container_info and con_uris:
con_soups = BeautifulSoup(self.api.get(list(con_uris)), "xml")
for soup in con_soups.find_all("con:container"):
con_uri_info[soup["uri"]] = ConInfo(
soup.find("name").text, soup.find("type")["name"])
for art in artifacts:
art.container_name = con_uri_info.get(art.container_uri).name
art.container_type = con_uri_info.get(
art.container_uri).con_type
return artifacts
def get_process_data(self):
"""Retrieves Process data for the current step, including technician,
uri, and udfs.
Returns:
process: a Process dataclass representing the process of the
current step.
"""
step_limsid = self.args.step_uri.split("/")[-1]
process_uri = (f"{self.api.host}processes/{step_limsid}")
# Get Process URI to extract data.
soup = BeautifulSoup(self.api.get(process_uri), "xml")
# Construct Process data class.
process = Process()
process.uri = process_uri
first_name = soup.find("first-name").text.strip()
last_name = soup.find("last-name").text.strip()
process.technician = f"{first_name} {last_name}"
# Extract all UDF names and values.
for udf_data in soup.find_all("udf:field"):
process.udf[udf_data["name"]] = udf_data.text
return process
def get_artifact_map(self, uri_only=False, container_info=False):
"""Returns a map of input artifacts to output artifacts, either as uris
or as Artifact dataclasses. One input artifact can be mapped to a
list of their multiple output artifacts.
Arguments:
uri_only (boolean): This denotes whether to harvest this mapping as
uris or as namedtuples.
Returns:
artifact_map (dict {input artifact: [output_artifact]}):
Returns a dict of input artifact : all output artifacts.
"""
if not uri_only:
# Make a dict with input_uri: input_artifact.
input_uri_art = {
art.uri: art for art in self.get_artifacts("input")}
# Make a dict with output_uri: output_artifact.
output_uri_art = {
art.uri: art for art in self.get_artifacts("output")}
# The container_name and container_type fields will always be None,
# because it is not always necessary. They exist so that
# the data_class can be run through the 'astuple' method.
Hashable_Artifact = namedtuple("HashableArtifact", [
"name",
"uri",
"art_type",
"sample_uri",
"container_uri",
"container_name",
"container_type",
"location",
"parent_process",
"reagent_label"
])
artifact_map = dict()
for io_map in self.step_soup.find_all("input-output-map"):
output_soup = io_map.find("output")
if output_soup["output-generation-type"] == "PerInput":
input_uri = io_map.find("input")["uri"]
output_uri = output_soup["uri"]
if uri_only and not container_info:
artifact_map.setdefault(input_uri, list())
artifact_map[input_uri].append(output_uri)
else:
if container_info:
input_con_soup = BeautifulSoup(
self.api.get(
input_uri_art[input_uri].container_uri),
"xml")
input_con_name = input_con_soup.find("name").text
input_con_type = input_con_soup.find("type")["name"]
input_uri_art[
input_uri].container_name = input_con_name
input_uri_art[
input_uri].container_type = input_con_type
output_con_soup = BeautifulSoup(
self.api.get(
output_uri_art[output_uri].container_uri),
"xml")
output_con_name = output_con_soup.find("name").text
output_con_type = output_con_soup.find("type")["name"]
output_uri_art[
output_uri].container_name = output_con_name
output_uri_art[
output_uri].container_type = output_con_type
# Convert to hashable namedtuples excluding the UDF map.
input_art = Hashable_Artifact(
*(astuple(input_uri_art[input_uri])[:-1]))
output_art = Hashable_Artifact(
*(astuple(output_uri_art[output_uri])[:-1]))
artifact_map.setdefault(input_art, list())
artifact_map[input_art].append(output_art)
return artifact_map
def set_artifact_udf(self, sample_values, stream):
"""Set UDF values for analytes in the current step based on given
mapping.
Arguments:
sample_values (dict {str: [namedtuple]}): Maps sample limsid's to
a list of namedtuples called 'UDF' with the fields 'name',
'value'.
stream (str): The source of the samples, either "input" or
"output".
Side Effects:
Sets the samples' UDFs that were passed into the REST database.
Overwrites the value that was in that UDF if it existed.
Raises:
RuntimeError: If there was an exception raised while POSTing.
Requirements:
The UDF Value's type must be in line with Clarity's
initialization of that type.
"""
art_uris = list()
for iomap in self.step_soup.find_all("input-output-map"):
art_soup = iomap.find(stream)
art_uris.append(art_soup["uri"])
art_soups = BeautifulSoup(self.api.get(art_uris), "xml")
art_queue = list()
for art in art_soups.find_all("art:artifact"):
if art["limsid"] in sample_values:
udfs = sample_values[art["limsid"]]
for udf in udfs:
target_udf = art.find(attrs={"name": udf.name})
# If the UDF exists as a value, replace it.
if target_udf:
target_udf.string = str(udf.value)
# If it does not exist, find out the UDF type for Clarity.
else:
if isinstance(udf.value, bool):
udf_type = "Boolean"
elif (isinstance(udf.value, int)
or isinstance(udf.value, float)):
udf_type = "Numeric"
else:
udf_type = "String"
# Build a new UDF tag and add it to the art:artifact.
udf_tag = Tag(
builder=art.builder,
name="udf:field",
attrs={"name": udf.name, "type": udf_type})
udf_tag.string = str(udf.value)
art.find("sample").insert_after(udf_tag)
# Build the list that will be rendered by the Jinja template.
art_queue.append(str(art))
# Use Jinja to create the batch update xml.
template_path = (os.path.join(
os.path.split(__file__)[0], "batch_artifact_update_template.xml"))
with open(template_path, "r") as file:
template = Template(file.read())
update_xml = template.render(artifacts=art_queue)
self.api.post(f"{self.api.host}artifacts/batch/update", update_xml)
def get_artifacts_previous_step(
self, dest_step, stream, art_smp_uris, step_soup, results=None):
"""Return artifact uris mapped to ancestor artifacts from a target
step.
Arguments:
dest_step (str): The name of the step where the ancestor
artifacts were created.
stream (str): The source of the samples, either "input" or
"output" in the dest_step.
art_smp_uris (dict {str: str}): A dict that maps smp_uris to
passed in art_uris.
step_soup: The step details soup for initial step.
results (dict): The empty dict that will eventually be returned
with the desired artifacts from the dest_step.
Returns:
results (dict {str: Artifact}): The dictionary that
maps the art_uri to the artifact namedtuple. All of the
'PerAllInputs' are stored in the results dict at
results['shared']. If the art_uri does not have ancestors at
that target, the art_uri will not be in the dictionary.
Exceptions:
RuntimeError: If that target_step is not in any of the provided
art_uri histories.
RuntimeError: If there are targets that ran through the step at
two or more different times. (The dest_step process id is not
the same for all of the passed-in samples.)
Requirements:
The targets to reroute must all have the same endpoint; if a sample
went through the step separately from its fellows, this
will not work.
"""
results = results or dict()
try:
step_name = step_soup.find("configuration").text
except AttributeError:
step_name = step_soup.find("type").text
if step_name != dest_step:
# Harvest all of the input uri's of the current step.
input_uris = [art["uri"].split(
'?')[0]for art in step_soup.find_all("input")]
all_input_soup = BeautifulSoup(self.api.get(input_uris), "xml")
try:
# Harvest all of the previous steps of the current step.
prev_steps = {
tag["uri"] for tag in all_input_soup.find_all(
"parent-process")}
# If there is no parent-process tag, the step isn't in at least
# one of the initial artifact's history.
except AttributeError:
raise RuntimeError(
f"The target_step is not in one or more of your "
f"art_smp_uris histories. The earliest step is "
f"{step_name}")
# for every prev_step, you need to recurse (where all of the
# stored result values are in results).
else:
for step_uri in prev_steps:
step_soup = BeautifulSoup(self.api.get(step_uri), "xml")
return self.get_artifacts_previous_step(
dest_step, stream, art_smp_uris, step_soup, results)
else:
# Get all of the inputs or outputs as PreviousStepArtifacts.
target_arts = list()
for iomap in step_soup.find_all("input-output-map"):
art_uri = iomap.find(stream)["uri"].split('?')[0]
out_art = iomap.find("output")
art_type = out_art["output-type"]
art_generation_type = out_art["output-generation-type"]
# Skip PerInput ResultFiles, because there is not a way to map
# them to the originally passed in artifacts (they don't have
# a sample tag to match).
if (art_generation_type == "PerInput"
and art_type == "ResultFile"):
continue
# Add Analytes and shared ResultFiles to be matched to its
# originally passed in analyte.
target_arts.append(PreviousStepArtifact(
art_uri, art_type, art_generation_type))
target_art_uris = [art.uri for art in target_arts]
all_target_soup = BeautifulSoup(
self.api.get(target_art_uris), "xml")
target_smp_arts = dict()
# Map the input or output sample_uri : list of
# Previous_Step_Analytes.
for art in all_target_soup.find_all("art:artifact"):
for target_art in target_arts:
if art["uri"].split('?')[0] == target_art.uri:
target_smp_arts.setdefault(
art.find("sample")["uri"], []).append(target_art)
# Add as a result the original uri: list of Artifacts.
for initial_art_uri, initial_smp_uri in art_smp_uris.items():
try:
results[initial_art_uri] = target_smp_arts[initial_smp_uri]
except KeyError:
raise RuntimeError(
f"The artifact {initial_art_uri} did not run at the"
f" same time as the other samples passed in.")
# Add the PerAllInputs ResultFiles to the results with the key
# of 'shared'.
for art in target_arts:
if art.art_type == "ResultFile":
results.setdefault("shared", []).append(art)
return results
| StarcoderdataPython |
3373408 | """
Utility functions for gathering data from Google Datastore Query
objects.
"""
def has_filter(query, col_and_operator):
"""
query: A Cloud Datastore Query object
col_and_operator: tuple of column name and operator
"""
for col, operator, value in query.filters:
if (col, operator) == tuple(col_and_operator):
return True
return False
def get_filter(query, col_and_operator):
"""
query: A Cloud Datastore Query object
col_and_operator: tuple of column name and operator
"""
for col, operator, value in query.filters:
if (col, operator) == tuple(col_and_operator):
return value
return None
def is_keys_only(query):
return query.projection == ["__key__"]
def compare_keys(lhs, rhs):
"""
The App Engine API used to provide a key comparison, but for
some reason the Cloud Datastore API doesn't :(
"""
def cmp(a, b):
if a is None and b is None:
return 0
if a is None:
return -1
if b is None:
return 1
return (a > b) - (a < b)
lhs_args = [lhs.project, lhs.namespace] + list(lhs.flat_path)
if lhs.is_partial:
# If the key is partial, then we need to add a blank placeholder
# for the id or name so we can compare correctly
lhs_args.extend("")
rhs_args = [rhs.project, rhs.namespace] + list(rhs.flat_path)
if rhs.is_partial:
rhs_args.extend("")
for lhs_component, rhs_component in zip(lhs_args, rhs_args):
comparison = cmp(lhs_component, rhs_component)
if comparison != 0:
return comparison
return cmp(len(lhs_args), len(rhs_args))
| StarcoderdataPython |
4815923 | def f(x):
loops = 0
if x<100:
loops = x*5
else:
if x<200:
loops = x*10
else:
if x<300:
loops = x*15
else:
loops = x*20
for i in xrange(loops):
t=0 | StarcoderdataPython |
5044522 | #!/usr/bin/env python3
"""Star Wars API HTTP response parsing"""
# pprint helps make things like dictionaries more human-readable
from pprint import pprint
# requests is used to send HTTP requests (get it?)
import requests
URL= "https://swapi.dev/api/people/1"
def main():
"""getting at the JSON attached to this response"""
# SWAPI response is stored in "resp" object
resp= requests.get(URL)
x= """
The .content attribute returns the content (our Star Wars data)!...
but in bytes. Bytes are a sequence of bits/bytes that represent data,
but is only really meant to be read by machines.
Note the superfluous apostrophes (') and the "b" character at the beginning of each line.
"""
print(x)
print(type(resp.content))
pprint(resp.content)
input()
y= """
The .text attribute will return the content as a string! Much more readable!
However, this data is useless to us in most programs...
we can't easily parse strings!
"""
print(y)
print(type(resp.text))
pprint(resp.text)
input()
z= """
The .json() method is wonderful. If the page is returning JSON, the .json() method
will convert it into the Pythonic data equivalent! We can now use this data
INFINITELY more effectively because it has been converted to a Python dictionary!
"""
print(z)
print(type(resp.json()))
pprint(resp.json())
# now we can do some cool stuff with the data we received!
print("\n" + resp.json()["name"] + " is the protagonist of Star Wars! He appeared in the following films:")
for film in resp.json()["films"]:
print(" •", requests.get(film).json()["title"])
if __name__ == "__main__":
main()
| StarcoderdataPython |
11391827 | # Generated by Django 3.1.1 on 2020-10-28 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ramaelectric', '0023_auto_20201028_1138'),
]
operations = [
migrations.AlterModelOptions(
name='items',
options={'get_latest_by': ['updated_on']},
),
migrations.AlterField(
model_name='items',
name='arrival_date',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| StarcoderdataPython |
12823275 | """Convert epidemic forecasts into JSON files for online viewing."""
import datetime
import errno
import h5py
import json
import logging
import numpy as np
import os.path
import pypfilt.summary
from . import settings
fs_fmt = "%Y-%m-%d %H:%M:%S"
d_fmt = "%Y-%m-%d"
def dtime(dstr):
"""Convert datetime strings to datetime instances."""
if isinstance(dstr, bytes):
dstr = dstr.decode()
return datetime.datetime.strptime(dstr, fs_fmt)
def date_str(dstr):
"""Convert datetime strings to date strings."""
if isinstance(dstr, bytes):
dstr = dstr.decode()
dt = datetime.datetime.strptime(dstr, fs_fmt).date()
return dt.strftime(d_fmt)
def update_obs_model(f, hdf5_file, om_dict):
"""
Record the observation model parameters, and check that they're consistent
across all of the input files.
:param f: The file object from which to read the simulation output.
:param hdf5_file: The corresponding file name for ``f``.
:param om_dict: A dictionary of observation model parameter names/values.
:raises ValueError: if more than one observation model is found. Note that
if the parameter **values** differ across the input files, a warning
message will be printed but the files will still be processed.
"""
logger = logging.getLogger(__name__)
# Note: in Python 3, h5py group methods such as keys(), values(),
# and items() return view-like objects that cannot be sliced or
# indexed like lists, but which support iteration.
obs_models = list(f['meta']['param']['obs'].values())
n_obs_models = len(obs_models)
if n_obs_models != 1:
raise ValueError("Found {} observation models".format(n_obs_models))
om = obs_models[0]
if om_dict:
# An observation model has already been recorded, check that the
# observation model in this file is consistent with it.
for key in om.keys():
if key not in om_dict:
# A new observation model parameter has appeared.
logger.warning("New parameter {} in {}".format(
key, os.path.basename(hdf5_file)))
continue
ok = om_dict[key] == om[key][()].item()
if not ok:
logger.warning("Param {} differs".format(key))
pass
else:
# Record the observation model parameters.
for key in om.keys():
om_dict[key] = om[key][()].item()
def most_recent_obs_date(f):
"""
Return the time of the most recent observation (a ``datetime`` instance).
:param f: The file object from which to read the simulation output.
:raises ValueError: if more than one observation model is found.
"""
obs_units = list(f['data']['obs'].keys())
if len(obs_units) != 1:
raise ValueError("Found {} observation models".format(
len(obs_units)))
obs = f['data']['obs'][obs_units[0]][()]
return max(dtime(row['date']) for row in obs)
def update_forecast_cis(f, hdf5_file, fs_dict, cis, most_recent):
"""
Record the forecast credible intervals and return the forecasting dates
for which other simulation outputs should be calculated.
:param f: The file object from which to read the simulation output.
:param hdf5_file: The corresponding file name for ``f``.
:param fs_dict: A dictionary of forecast credible intervals.
:param cis: The credible intervals to record.
:param most_recent: Whether to use only the most recent forecast.
"""
logger = logging.getLogger(__name__)
# Extract the forecast credible intervals.
fs = f['data']['forecasts'][()]
conds = tuple(fs['prob'] == p for p in cis)
keep = np.logical_or.reduce(conds)
fs = fs[keep]
# Note that forecast dates are date-time strings (%Y-%m-%d %H:%M:%S).
fs_dates = np.unique(fs['fs_date'])
# Check that this table contains the desired credible intervals.
ci_levels = np.unique(fs['prob'])
if len(ci_levels) < len(cis):
msg = "expected CIs: {}; only found: {}"
expect = ", ".join(str(p) for p in sorted(cis))
found = ", ".join(str(p) for p in sorted(ci_levels))
logger.warning(msg.format(expect, found))
# Ignore the estimation run, if present.
sim_end = max(dtime(dstr) for dstr in fs['date']).strftime(fs_fmt)
if len(fs_dates) == 1 and fs_dates[0] == sim_end:
# If the file only contains the result of an estimation run,
# inform the user and keep these results --- they can result
# from directly using pypfilt.run() to produce forecasts.
last_obs = most_recent_obs_date(f)
logger.warning('Estimation run, set fs_date = {} for {}'.format(
last_obs.strftime(fs_fmt), os.path.basename(hdf5_file)))
# Replace fs_date with the date of the most recent observation.
last_obs = last_obs.strftime(fs_fmt)
fs_dates = [last_obs]
fs['fs_date'] = last_obs
# Discard all rows prior to the (effective) forecasting date.
dates = np.array([dtime(row['date']) for row in fs])
fs = fs[dates >= last_obs]
# Note: these files may contain duplicate rows.
# So identify the first duplicate row (if any) and crop.
for (n, rix) in enumerate(np.where(fs['date'] == last_obs)[0]):
# If the nth row for the date on which the forecast begins isn't
# the nth row of the entire table, it represents the start of the
# duplicate data, so discard all subsequent rows.
if n != rix:
fs = fs[:rix]
break
else:
fs_dates = [d for d in fs_dates if d != sim_end]
if most_recent:
# Only retain the more recent forecast.
fs_dates = [max(dtime(dstr) for dstr in fs_dates)]
fs_dates = [d.strftime(fs_fmt) for d in fs_dates]
# Store the forecast credible intervals.
ci_levels = np.unique(fs['prob'])
for fs_date in fs_dates:
mask = fs['fs_date'] == fs_date
if not isinstance(mask, np.ndarray) or mask.shape[0] != fs.shape[0]:
raise ValueError('Invalid fs_date comparison; {} == {}'.format(
type(fs['fs_date'][0]), type(fs_date)))
fs_rows = fs[mask]
ci_dict = {}
for ci in ci_levels:
ci_rows = fs_rows[fs_rows['prob'] == ci]
ci_dict[str(ci)] = [
{"date": date_str(date),
"ymin": ymin,
"ymax": ymax}
for (_, _, _, date, _, ymin, ymax) in ci_rows]
fs_dict[date_str(fs_date)] = ci_dict
# Return the forecast date(s) that should be considered for this file.
return fs_dates
def update_peak_timing(f, hdf5_file, pkt_dict, cis, fs_dates):
"""
Record the peak timing credible intervals.
:param f: The file object from which to read the simulation output.
:param hdf5_file: The corresponding file name for ``f``.
:param pkt_dict: A dictionary of peak timing credible intervals.
:param cis: The credible intervals to record.
:param fs_dates: The forecasting dates for which the observations should
be recorded.
:raises ValueError: if more than one observation model is found. Note that
if the parameter **values** differ across the input files, a warning
message will be printed but the files will still be processed.
"""
logger = logging.getLogger(__name__)
# Extract the peak timing credible intervals.
try:
pk = f['data']['peak_cints'][()]
except KeyError:
# If this table is not present, return an empty array with the
# minimal set of required columns.
logger.warning("No 'peak_cints' table: {}".format(
os.path.basename(hdf5_file)))
return
conds = tuple(pk['prob'] == p for p in cis)
keep = np.logical_or.reduce(conds)
pk = pk[keep]
ci_levels = np.unique(pk['prob'])
if len(ci_levels) < len(cis):
msg = "expected CIs: {}; only found: {}"
expect = ", ".join(str(p) for p in sorted(cis))
found = ", ".join(str(p) for p in sorted(ci_levels))
logger.warning(msg.format(expect, found))
for fs_date in fs_dates:
mask = pk['fs_date'] == fs_date
if not isinstance(mask, np.ndarray) or mask.shape[0] != pk.shape[0]:
raise ValueError('Invalid fs_date comparison; {} == {}'.format(
type(pk['fs_date'][0]), type(fs_date)))
pk_rows = pk[mask]
ci_dict = {}
for ci in ci_levels:
ci_rows = pk_rows[pk_rows['prob'] == ci]
ci_dict[str(ci)] = [
{"date": date_str(fs_date),
"ymin": date_str(tmin),
"ymax": date_str(tmax)}
for (_, _, _, _, _smin, _smax, tmin, tmax) in ci_rows]
pkt_dict[date_str(fs_date)] = ci_dict
def update_obs(f, hdf5_file, obs_dict, fs_dates):
"""
Record the observations provided at each of the forecasting dates.
:param f: The file object from which to read the simulation output.
:param hdf5_file: The corresponding file name for ``f``.
:param obs_dict: A dictionary of observations.
:param fs_dates: The forecasting dates for which the observations should
be recorded.
:raises ValueError: if more than one observation model is found. Note that
if the parameter **values** differ across the input files, a warning
message will be printed but the files will still be processed.
"""
obs_units = list(f['data']['obs'].keys())
n_obs_units = len(obs_units)
if n_obs_units != 1:
raise ValueError("Found {} observation models".format(n_obs_units))
obs = f['data']['obs'][obs_units[0]][()]
cols = obs.dtype.names
bs_cols = [c for c in cols if obs.dtype[c].kind == 'S']
for fs_date in fs_dates:
obs_list = [
{c: obs_row[c].item() for c in cols}
for obs_row in obs]
for o in obs_list:
# Convert byte string to Unicode strings.
for c in bs_cols:
if isinstance(o[c], bytes):
o[c] = o[c].decode()
# Ensure the date is stored as 'YYYY-MM-DD'.
o['date'] = date_str(o['date'])
obs_dict[date_str(fs_date)] = obs_list
def convert(files, most_recent, locn_id, out_file, replace, pretty, cis=None):
"""
Convert a set of epidemic forecasts into a JSON file for online viewing.
:param files: A list of forecast files (HDF5).
:param most_recent: Whether to use only the most recent forecast in each
file.
:param locn_id: The forecasting location identifier.
:param out_file: The output file name.
:param replace: Whether to replace (overwrite) an existing JSON file,
rather than updating it with the provided forecasts.
:param pretty: Whether the JSON output should be pretty-printed.
:param cis: The credible intervals to record (default: ``[0, 50, 95]``).
"""
logger = logging.getLogger(__name__)
locn_settings = settings.local(locn_id)
if cis is None:
cis = [0, 50, 95]
# If we're updating an existing file, try to load the current contents.
json_data = None
if (not replace) and os.path.isfile(out_file):
# The output file already exists and we're not replacing it.
try:
with open(out_file, encoding='utf-8') as f:
json_data = json.load(f)
except json.JSONDecodeError:
logger.warning("Could not read file '{}'".format(out_file))
# If we're generating a new file, or the current file could not be loaded,
# start with empty content.
if json_data is None:
json_data = {
'obs': {},
'forecasts': {},
'timing': {},
'obs_model': {},
'location': locn_id,
'location_name': locn_settings['name'],
'obs_axis_lbl': locn_settings['obs_axis_lbl'],
'obs_axis_prec': locn_settings['obs_axis_prec'],
'obs_datum_lbl': locn_settings['obs_datum_lbl'],
'obs_datum_prec': locn_settings['obs_datum_prec'],
}
# Note: files may be in any order, sorting yields deterministic output.
for hdf5_file in sorted(files):
with h5py.File(hdf5_file, 'r') as f:
update_obs_model(f, hdf5_file, json_data['obs_model'])
fs_dates = update_forecast_cis(f, hdf5_file,
json_data['forecasts'],
cis, most_recent)
update_peak_timing(f, hdf5_file, json_data['timing'],
cis, fs_dates)
update_obs(f, hdf5_file, json_data['obs'], fs_dates)
if pretty:
indent = 2
separators = (', ', ': ')
else:
indent = None
separators = (',', ':')
# Create the output directory (and missing parents) as needed.
# The directory will be empty ('') if out_file has no path component.
out_dir = os.path.dirname(out_file)
if out_dir and not os.path.isdir(out_dir):
# Create with mode -rwxr-x---.
try:
logger.info('Creating {}'.format(out_dir))
os.makedirs(out_dir, mode=0o750)
except OSError as e:
# Potential race condition with multiple script instances.
if e.errno != errno.EEXIST:
logger.warning('Could not create {}'.format(out_dir))
raise
logger.debug("Writing {}".format(out_file))
with open(out_file, encoding='utf-8', mode='w') as f:
json.dump(json_data, f, ensure_ascii=False,
sort_keys=True, indent=indent, separators=separators)
def parser():
"""Return the command-line argument parser for ``epifx-json``."""
p = settings.common_parser(locns=False)
ip = p.add_argument_group('Input arguments')
ip.add_argument(
'-i', '--intervals', action='store', metavar='CIs',
help='Credible intervals (default: 0,50,95)')
ip.add_argument(
'-m', '--most-recent', action='store_true',
help='Use only the most recent forecast in each file')
op = p.add_argument_group('Output arguments')
op.add_argument(
'-o', '--output', action='store', type=str, default='output.json',
help='The name of the JSON output file')
op.add_argument(
'-p', '--pretty', action='store_true',
help='Pretty-print the JSON output')
op.add_argument(
'-r', '--replace', action='store_true',
help='Replace the output file (default: update if it exists)')
rp = p.add_argument_group('Required arguments')
rp.add_argument(
'-l', '--location', action='store', type=str, default=None,
help='The location to which the forecasts pertain')
rp.add_argument(
'files', metavar='HDF5_FILE', type=str, nargs='*',
help='Forecast data file(s)')
return p
def main(args=None):
"""The entry point for ``epifx-json``."""
p = parser()
if args is None:
args = vars(p.parse_args())
else:
args = vars(p.parse_args(args))
if args['location'] is None:
p.print_help()
return 2
if not args['files']:
p.print_help()
return 2
if args['intervals'] is not None:
vals = args['intervals'].split(",")
for ix, val in enumerate(vals):
try:
vals[ix] = int(val)
except ValueError:
p.error("Invalid credible interval '{}'".format(val))
args['intervals'] = vals
logging.basicConfig(level=args['loglevel'])
convert(files=args['files'], most_recent=args['most_recent'],
locn_id=args['location'], out_file=args['output'],
replace=args['replace'], pretty=args['pretty'],
cis=args['intervals'])
| StarcoderdataPython |
8126516 | <gh_stars>0
from beautifuldict.baseconfig import Baseconfig
from decouple import config
from pkg_resources import resource_filename
params = {
'path': {
'input': resource_filename(__name__, 'data/input/'),
'output': resource_filename(__name__, 'data/output/')
},
'value_labels': {
'month': {
'1': 'Enero',
'2': 'Febrero',
'3': 'Marzo',
'4': 'Abril',
'5': 'Mayo',
'6': 'Junio',
'7': 'Julio',
'8': 'Agosto',
'9': 'Septiembre',
'10': 'Octubre',
'11': 'Noviembre',
'12': 'Diciembre'
},
'quarter': {
'1': '1T',
'2': '2T',
'3': '3T',
'4': '4T'
}
},
'periods': {
'global_annual': 6,
'annual': 10,
'global_monthly': 5,
'monthly': 61,
'global_quarterly': 5,
'quarterly': 15
}
}
common_cfg = Baseconfig(params)
| StarcoderdataPython |
12853923 | <filename>losses.py
import tensorflow as tf
from tensorflow.keras import backend
#DEPRECATED
# An implementation of wasserstein used for a naive implementation of WGAN
# calculate wasserstein loss
def wasserstein_loss(y_true, y_pred):
return backend.mean(y_true * y_pred)
# Define the loss functions for the discriminator,
# which should be (fake_loss - real_loss).
# We will add the gradient penalty later to this loss function.
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return real_loss, fake_loss, fake_loss - real_loss
# Define the loss functions for the generator.
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
| StarcoderdataPython |
5097832 | <reponame>marteinn/wagtail-text-analysis
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.core.models import Page
from wagtail.admin.edit_handlers import FieldPanel
from wagtailtextanalysis.text_analysis import (
TextAnalysis,
KeyPhrasesField,
SentimentField,
)
class ArticlePage(Page, TextAnalysis):
wysiwyg = models.TextField(blank=True, null=True, verbose_name=_("Wysiwyg"))
key_phrases = models.TextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel("wysiwyg"),
FieldPanel("key_phrases"),
]
text_analysis_fields = [KeyPhrasesField("title"), KeyPhrasesField("wysiwyg")]
def update_key_phrases(self, phrases):
self.key_phrases = " ".join(phrases)
class Comment(models.Model, TextAnalysis):
title = models.CharField(max_length=255)
content = models.TextField()
sentiment = models.DecimalField(max_digits=7, decimal_places=6, default=0)
text_analysis_fields = [SentimentField("title"), SentimentField("wysiwyg")]
def update_sentiment(self, sentiment):
self.sentiment = sentiment
| StarcoderdataPython |
6620895 | import sys
def helloWorld(args, params):
to = 'World'
if params['variables']['to'] != '':
to = params['variables']['to']
text = 'Hello, {}'.format(to)
if params['toggle']['excited']:
text += '!'
print(text)
commands = {
'hello': {
'function': helloWorld,
'comment': 'Hello world command',
'variables': {
'to': {
'arg_offset': 0,
'name': 'To',
'comment': 'Specify whom you\'re saying your helloes to'
},
},
'toggle': {
'excited': {
'call': ['-e', '--excited'],
'comment': 'When selected adds exclamation point to the end'
}
}
},
}
def wrongUsage(args):
print(f'Error: Command missused, try {args[0]} [command] -h',
f'or {args[0]} [command] --help')
exit()
def fetchSyntax(placeholder_name, prop_dict):
tmp_name, tmp_req, tmp_comment = [placeholder_name, ['[ ', ' ]'], '']
if 'name' in prop_dict:
tmp_name = prop_dict['name']
elif 'call' in prop_dict:
tmp_name = " | ".join(prop_dict["call"])
if 'required' in prop_dict and prop_dict['required']:
tmp_req = ['', '']
if 'comment' in prop_dict:
tmp_comment = prop_dict['comment']
return [tmp_name, tmp_req, tmp_comment]
def helpParse(com, args):
helper = com
if args[1] not in ['-h' '--help'] and args[1] in com.keys():
helper = {args[1]: com[args[1]]}
else:
print(f'''For more details about command use: \
{args[0]} <command> --help''')
for x, y in helper.items():
tmp_comment, prp_m = ['', {'variables': 'Values', 'toggle': 'Options'}]
if 'comment' in y:
tmp_comment = y['comment']
props = {a: y[z] for z, a in prp_m.items() if z in y}
if args[1] in com.keys():
for gen in ['Values', 'Options']:
if gen not in props:
props[gen] = {}
order = {str(a['arg_offset']): z
for z, a in props['Values'].items() if 'arg_offset' in a}
order_keys, ordered, com_args = [list(order.keys()), [], args[1]]
for order_suposed_index in range(len(order_keys)):
if order_suposed_index != int(order_keys[order_suposed_index]):
raise IndexError(
'Missing index, check your command dictionary')
ordered.append(
props['Values'][order[order_keys[order_suposed_index]]])
ordered.extend([y['toggle'][com_tg]
for com_tg in props['Options'].keys()])
for com_data in ordered:
call, optional, _ = fetchSyntax('', com_data)
com_args += ' {}{}{}'.format(optional[0], call, optional[1])
print(f'Command reference: {args[0]} {com_args}')
print('\n{:<4}{:<32}{}'.format('', x, tmp_comment))
for z, a in props.items():
print('{:<8}{:<40}'.format('', z + ':'))
for b, c in a.items():
tmp_name, tmp_req, tmp_cmn = fetchSyntax(b, c)
print('{:<14}{:<26}{}'
.format('', tmp_req[0] + tmp_name + tmp_req[1], tmp_cmn))
if any(r in sys.argv for r in ['--help', '-h']):
helpParse(commands, sys.argv)
elif len(sys.argv) > 1 and sys.argv[1] in commands.keys():
params = {}
if 'toggle' in commands[sys.argv[1]].keys() \
and len(commands[sys.argv[1]]['toggle'].keys()) > 0:
params['toggle'] = {}
calls = {x: commands[sys.argv[1]]['toggle'][x]['call']
for x in commands[sys.argv[1]]['toggle']}
for x in calls.keys():
params['toggle'][x] = any(y in sys.argv for y in calls[x])
if 'variables' in commands[sys.argv[1]].keys() \
and len(commands[sys.argv[1]]['variables'].keys()) > 0:
params['variables'], mixed_values = [{}, []]
variables = [x for x in commands[sys.argv[1]]['variables'].keys()]
for variable in variables:
var, val = [commands[sys.argv[1]]['variables'][variable], '']
if 'arg_offset' in var.keys() \
and len(sys.argv) > (2 + var['arg_offset']):
arg_set, tgl_d = [2 + var['arg_offset'],
commands[sys.argv[1]]['toggle'].items()]
toggle_calls = [toggler for _, com_data in tgl_d
for toggler in com_data['call']]
if sys.argv[arg_set] not in toggle_calls:
val = sys.argv[2 + var['arg_offset']]
if 'call' in var.keys() and type(var['call']) == list:
pre_val = [arg for call in var['call']
for arg in sys.argv if call in arg]
if len(pre_val) > 0:
val = (pre_val[0].split('='))[1]
if 'required' in var and var['required'] and val == '':
wrongUsage(sys.argv)
else:
params['variables'][variable] = val
commands[sys.argv[1]]['function'](sys.argv, params)
else:
wrongUsage(sys.argv)
exit()
| StarcoderdataPython |
3366201 | from .generic_arch import *
from .lm32 import * | StarcoderdataPython |
1794922 | import pandas as pd
from sklearn.model_selection import StratifiedKFold
if __name__ == '__main__':
df = pd.read_csv('input/train.csv')
# Creating the dummy column
df['kfold'] = -1
# Shuffle the data and reseting the indices
df = df.sample(frac=1).reset_index(drop=True)
# Creating the KFolds
kf = StratifiedKFold(n_splits=5, shuffle=False)
for fold , (train_idx, val_idx) in enumerate(kf.split(X=df, y=df.target.values)):
print(len(train_idx), len(val_idx))
df.loc[val_idx, 'kfold'] = fold
df.to_csv('input/train_folds.csv', index=False)
| StarcoderdataPython |
12850405 | """Modelo de pacientes"""
# Django
from django.db import models
# Utilidades
from apis.utils.models import ModelUtil
class Memberships(ModelUtil):
"""Modelo de pacientes
Un paciente puede tener un grupo familiar asociado,
si el paciente crea el grupo sera el titular de la familia,
los miembros del grupo familiar no podran agregar mas familiares.
Solo el titular podra actualizar o eliminar miembros de su grupo familiar
"""
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
family_group = models.ForeignKey('family_group.FamilyGroup', on_delete=models.CASCADE)
pacient = models.ForeignKey('pacient.Pacient', on_delete=models.CASCADE)
is_admin = models.BooleanField('Titular', default=False)
affiliated = models.PositiveIntegerField(default=0)
remaining_affiliates = models.PositiveIntegerField(default=0)
affiliated_by = models.ForeignKey(
'users.User',
null=True,
on_delete=models.CASCADE,
related_name='affiliated_by'
)
def __str__(self):
"""Regresa el username y el grupo familiar al que pertenece"""
return f'{self.user.username} hace parte del grupo familiar'
| StarcoderdataPython |
3352850 | import re
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
class CreateEmergencycodeSerializer(serializers.Serializer):
description = serializers.CharField(required=True)
activation_delay = serializers.IntegerField(required=True)
emergency_authkey = serializers.CharField(required=True)
emergency_data = serializers.CharField(required=True)
emergency_data_nonce = serializers.CharField(max_length=64, required=True)
emergency_sauce = serializers.CharField(max_length=64, required=True)
def validate_emergency_data(self, value):
value = value.strip()
if not re.match('^[0-9a-f]*$', value, re.IGNORECASE):
msg = _('Emergency data must be in hex representation')
raise exceptions.ValidationError(msg)
return value
def validate_emergency_data_nonce(self, value):
value = value.strip()
if not re.match('^[0-9a-f]*$', value, re.IGNORECASE):
msg = _('Emergency data nonce must be in hex representation')
raise exceptions.ValidationError(msg)
return value
def validate_activation_delay(self, value):
if value < 0:
msg = _('Activation delay needs to be a positive integer')
raise exceptions.ValidationError(msg)
return value | StarcoderdataPython |
4924822 | # _*_ coding: utf-8 _*_
__author__ = 'LelandYan'
__date__ = '2019/5/16 19:32'
import numpy as np
import cv2
import matplotlib.pyplot as plt
import copy
from pylab import mpl
import skimage
# 防止中文乱码
mpl.rcParams['font.sans-serif'] = ['SimHei']
class processing_image:
def __init__(self, filename="./raw_data/1.jpg", output="./out_data"):
self.filename = filename
self.output = output
def op_gray_to_four_type(self, kernel=(9, 9), erode_iter=5, dilate_iter=5):
img = cv2.imread(self.filename)
# gray
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# erode dilate
closed = cv2.erode(img, None, iterations=erode_iter)
img = cv2.dilate(closed, None, iterations=dilate_iter)
kernel = np.ones(kernel, np.uint8)
# open operation
img_open = cv2.morphologyEx(img, op=cv2.MORPH_OPEN, kernel=kernel)
# close operation
img_close = cv2.morphologyEx(img, op=cv2.MORPH_CLOSE, kernel=kernel)
# gradient operation
img_grad = cv2.morphologyEx(img, op=cv2.MORPH_GRADIENT, kernel=kernel)
# tophat operation
img_tophat = cv2.morphologyEx(img, op=cv2.MORPH_TOPHAT, kernel=kernel)
# blackhat operation
img_blackhat = cv2.morphologyEx(img, op=cv2.MORPH_BLACKHAT, kernel=kernel)
# Plot the images
images = [img, img_open, img_close, img_grad,
img_tophat, img_blackhat]
names = ["raw_img", "img_open", "img_close", "img_grad", "img_tophat", "img_blackhat"]
cv2.imwrite(self.output+"/gradient_image1.jpg",img_grad)
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 15))
for ind, p in enumerate(images):
ax = axs[ind // 3, ind % 3]
ax.imshow(p, cmap='gray')
ax.set_title(names[ind])
ax.axis('off')
plt.show()
def op_first_to_three_type(self, flag=False):
# 全局阈值
def threshold_demo(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
# 直接阈值化是对输入的单通道矩阵逐像素进行阈值分割。
ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE)
if flag:
cv2.imwrite(self.output + "/global_binary_first1.jpg", binary)
return binary
# 局部阈值
def local_threshold(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
# 自适应阈值化能够根据图像不同区域亮度分布,改变阈值
binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
if flag:
cv2.imwrite(self.output + "/local_binary_first1.jpg", binary)
return binary
# 用户自己计算阈值
def custom_threshold(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
h, w = gray.shape[:2]
m = np.reshape(gray, [1, w * h])
mean = m.sum() / (w * h)
ret, binary = cv2.threshold(gray, mean, 255, cv2.THRESH_BINARY)
if flag:
cv2.imwrite(self.output + "/custom_binary_first1.jpg", binary)
return binary
if flag:
src = cv2.imread("./out_data/gray_cutting_image1.jpg")
else:
src = cv2.imread(self.filename)
src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
global_scr = threshold_demo(src)
local_scr = local_threshold(src)
custom_src = custom_threshold(src)
images = [src, global_scr, local_scr,
custom_src]
names = ["src", "global_scr", "local_scr", "custom_src"]
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
for ind, p in enumerate(images):
ax = axs[ind // 2, ind % 2]
ax.imshow(p, cmap='gray')
ax.set_title(names[ind])
ax.axis('off')
plt.show()
def op_cutting_image(self):
raw_img = cv2.imread(self.filename)
img = cv2.imread("./out_data/gradient_image1.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 7, sigmaSpace=75, sigmaColor=75)
ret, binary = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY)
closed = cv2.dilate(binary, None, iterations=130)
closed = cv2.erode(closed, None, iterations=127)
_, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
c = sorted(contours, key=cv2.contourArea, reverse=True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
# draw a bounding box arounded the detected barcode and display the image
draw_img = cv2.drawContours(raw_img.copy(), [box], -1, (0, 0, 255), 3)
h, w, _ = img.shape
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
hight = y2 - y1
width = x2 - x1
crop_img = img[0:h - hight, x1:x1 + width]
raw_img = raw_img[0:h - hight, x1:x1 + width]
cv2.imwrite(self.output + "/raw_draw_image1.jpg", draw_img)
cv2.imwrite(self.output + "/raw_cutting_image1.jpg", raw_img)
cv2.imwrite(self.output + "/gray_cutting_image1.jpg", crop_img)
def op_edge_test(self):
def gray_dege_test():
img = cv2.imread("./out_data/gradient_image1.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (9, 9), 0)
ret, binary = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY)
closed = cv2.dilate(binary, None, iterations=110)
closed = cv2.erode(closed, None, iterations=120)
_, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
plt.imshow(img)
plt.show()
cv2.imwrite(self.output + "/gray_edge_test.jpg", img)
def fourier_edge_test():
img = cv2.imread('./out_data/gradient_image1.jpg', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
rows, cols = img.shape
crow, ccol = int(rows / 2), int(cols / 2)
for i in range(crow - 30, crow + 30):
for j in range(ccol - 30, ccol + 30):
fshift[i][j] = 0.0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift) # 进行高通滤波
# 取绝对值
img_back = np.abs(img_back)
plt.subplot(121), plt.imshow(img, cmap='gray') # 因图像格式问题,暂已灰度输出
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# 先对灰度图像进行伽马变换,以提升暗部细节
rows, cols = img_back.shape
gamma = copy.deepcopy(img_back)
rows = img.shape[0]
cols = img.shape[1]
for i in range(rows):
for j in range(cols):
gamma[i][j] = 5.0 * pow(gamma[i][j], 0.34) # 0.34这个参数是我手动调出来的,根据不同的图片,可以选择不同的数值
# 对灰度图像进行反转
for i in range(rows):
for j in range(cols):
gamma[i][j] = 255 - gamma[i][j]
plt.subplot(122), plt.imshow(gamma, cmap='gray')
plt.title('Result in HPF'), plt.xticks([]), plt.yticks([])
cv2.imwrite(self.output + "/fourier_edge_test_image1.jpg", gamma)
plt.show()
def canny_edge_test():
img = cv2.imread('./out_data/gradient_image1.jpg', 0)
edges = cv2.Canny(img, 100, 200)
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('original'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(edges, cmap='gray')
plt.title('edge'), plt.xticks([]), plt.yticks([])
cv2.imwrite(self.output + "/canny_edge_test_image1.jpg", edges)
plt.show()
gray_dege_test()
fourier_edge_test()
canny_edge_test()
def op_trans_plot(self):
im_in = cv2.imread("./out_data/custom_binary_first1.jpg", cv2.IMREAD_GRAYSCALE)
th, im_th = cv2.threshold(im_in, 220, 255, cv2.THRESH_BINARY_INV)
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0, 0), 255)
cv2.imwrite(self.output + "/edge_processing1.jpg", im_floodfill)
def op_counter(self):
ob1 = cv2.imread("./out_data/edge_processing1.jpg", cv2.IMREAD_GRAYSCALE)
# ob1 = cv2.dilate(ob1, None, iterations=2)
ob1 = cv2.bilateralFilter(ob1, 7, sigmaSpace=70, sigmaColor=70)
ob1 = cv2.erode(ob1, None, iterations=2) # 1 # 2
ob1 = cv2.dilate(ob1, None, iterations=2)
ob2 = cv2.imread("./raw_data/icon4.jpg", cv2.IMREAD_GRAYSCALE)
# ob2 = cv2.bilateralFilter(ob2, 7, sigmaSpace=60, sigmaColor=60)
ob2 = cv2.erode(ob2, None, iterations=1)
# ob2 = cv2.dilate(ob2, None, iterations=1)
# orb = cv2.xfeatures2d.SURF_create()
orb = cv2.xfeatures2d.SIFT_create()
keyp1, desp1 = orb.detectAndCompute(ob1, None)
keyp2, desp2 = orb.detectAndCompute(ob2, None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(desp1, desp2, k=2)
matchesMask = [[0, 0] for i in range(len(matches))]
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
matchesMask[i] = [1, 0]
# 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)
img3 = cv2.drawMatchesKnn(ob1, keyp1, ob2, keyp2, matches, None, **draw_params)
a = len(keyp1) // len(keyp2)
plt.figure(figsize=(8, 8))
plt.subplot(211)
plt.imshow(img3)
plt.subplot(212)
plt.text(0.5, 0.6, "the number of sticks:" + str(a), size=30, ha="center", va="center")
plt.axis('off')
plt.show()
cv2.imwrite(self.output+"/counter_sticks_image1.jpg", img3)
if __name__ == '__main__':
ob = processing_image()
ob.op_gray_to_four_type()
ob.op_first_to_three_type()
# ob.op_cutting_image()
ob.op_edge_test()
ob.op_trans_plot()
ob.op_first_to_three_type(flag=True)
ob.op_counter()
| StarcoderdataPython |
150191 | from typing import Callable
from typing import Tuple
from astrolib.base_objects import Matrix
from astrolib.base_objects import TimeSpan
def integrate(t_0: TimeSpan, X_0: Matrix, h: TimeSpan, dynamics_func: Callable[[TimeSpan, Matrix],Matrix]) -> Tuple[TimeSpan, Matrix, TimeSpan]:
""" Function utilizes a first-order/Euler integration scheme to integrate
the input state from the initial to the final epoch.
Arguments:
t_0 (TimeSpan) Epoch of the initial state.
X_0 (Matrix) Initial state vector, in column matrix form.
h (TimeSpan) Step size for integration step to take.
dynamics_func ([Callable[TimeSpan, Matrix],Matrix]) State vector dynamics function.
Returns:
Tuple[TimeSpan, Matrix, TimeSpan] Tuple of the propagated state vector epoch, the
propagated state vector, and the step size taken.
"""
return t_0 + h, X_0 + h.to_seconds() * dynamics_func(t_0, X_0), h
| StarcoderdataPython |
3278479 | <reponame>kurisufriend/modular-discord-bot-fw<gh_stars>0
"""
the "hello world" (if you will) of the plugin system.
there are only two required parts:
* 'hooks': a list of dispatch events to run() on
* run(event, ctx, bot): the function to run on-event, where event is the trigger,
ctx is the 'd'(data) key of the response, and 'bot' is the current bot
instance.
the rest is up to you.
this particular example listens for a message that contains the string 'dango' and
returns a response, similar to the traditional 'hello' -> 'hello, world!'
test interaction
"""
hooks = ["MESSAGE_CREATE"] # run() will be called when client.dispatch() gets a MESSAGE_CREATE
def run(event, ctx, bot):
if ctx["content"] == "dango": # if the message body matches...
bot.send_msg(ctx["channel_id"], "to all the motherfuckers that shed a tear") | StarcoderdataPython |
6419803 | <filename>azure/functions/__init__.py
from ._abc import HttpRequest, TimerRequest, InputStream, Context, Out # NoQA
from ._http import HttpResponse # NoQA
from ._queue import QueueMessage # NoQA
| StarcoderdataPython |
3268922 | from django.db import models
class User(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
age = models.PositiveSmallIntegerField(range(5, 100))
class Event(models.Model):
event_name = models.CharField(max_length=200)
creator_id = models.ForeignKey(User, on_delete=models.CASCADE)
event_time = models.DateField(auto_now=True)
| StarcoderdataPython |
323194 | <filename>advanced_modules/lv2_os_app_history_analyzer.py
# -*- coding: utf-8 -*-
"""module for LV2."""
import os, sys
from datetime import datetime
from advanced_modules import manager
from advanced_modules import interface
from advanced_modules import logger
from dfvfs.lib import definitions as dfvfs_definitions
class LV2OSAPPHISTORYAnalyzer(interface.AdvancedModuleAnalyzer):
NAME = 'lv2_os_app_history_analyzer'
DESCRIPTION = 'Module for LV2 OS APP History'
_plugin_classes = {}
def __init__(self):
super(LV2OSAPPHISTORYAnalyzer, self).__init__()
def Analyze(self, par_id, configuration, source_path_spec, knowledge_base):
this_file_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'schema' + os.sep
# 모든 yaml 파일 리스트
yaml_list = [this_file_path + 'lv2_os_app_history.yaml']
# 모든 테이블 리스트
table_list = ['lv2_os_app_history']
# 모든 테이블 생성
for count in range(0, len(yaml_list)):
if not self.LoadSchemaFromYaml(yaml_list[count]):
logger.error('cannot load schema from yaml: {0:s}'.format(table_list[count]))
return False
# if table is not existed, create table
if not configuration.cursor.check_table_exist(table_list[count]):
ret = self.CreateTable(configuration.cursor)
if not ret:
logger.error('cannot create database table name: {0:s}'.format(table_list[count]))
return False
# UserAssist
query = f"SELECT file_name, last_run_time FROM lv1_os_win_reg_user_assist WHERE par_id='{par_id}';"
results = configuration.cursor.execute_query_mul(query)
if type(results) == int or len(results) == 0:
pass
else:
insert_data = []
for result in results:
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0][result[0].rfind('/') + 1:],
result[1], result[0], '', 'UserAssist']))
query = "Insert into lv2_os_app_history values (%s, %s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_data)
# Amcache - file_entries
query = f"SELECT file_name, key_last_updated_time, full_path FROM lv1_os_win_reg_amcache_file WHERE par_id='{par_id}';"
results = configuration.cursor.execute_query_mul(query)
if type(results) == int or len(results) == 0:
pass
else:
insert_data = []
for result in results:
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[1], result[2], '',
'Amcache-file_entries']))
query = "Insert into lv2_os_app_history values (%s, %s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_data)
# Prefetch - reference_file 추후에 추가
query = f"SELECT program_name, program_path, program_run_count, file_created_time, last_run_time, " \
f"`2nd_last_run_time`, `3rd_last_run_time`, `4th_last_run_time`, `5th_last_run_time`, " \
f"`6th_last_run_time`, `7th_last_run_time`, `8th_last_run_time` " \
f"FROM lv1_os_win_prefetch WHERE par_id='{par_id}';"
results = configuration.cursor.execute_query_mul(query)
if type(results) == int or len(results) == 0:
pass
else:
insert_data = []
for result in results:
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[3], result[1], '',
'Prefetch']))
if result[4] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[4], result[1], '',
'Prefetch']))
if result[5] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[5], result[1], '',
'Prefetch']))
if result[6] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[6], result[1], '',
'Prefetch']))
if result[7] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[7], result[1], '',
'Prefetch']))
if result[8] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[8], result[1], '',
'Prefetch']))
if result[9] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[9], result[1], '',
'Prefetch']))
if result[10] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[10], result[1], '',
'Prefetch']))
if result[11] != ' ':
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0], result[11], result[1], '',
'Prefetch']))
query = "Insert into lv2_os_app_history values (%s, %s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_data)
# Windows Timeline
query = f"SELECT program_name, start_time, content FROM lv1_os_win_windows_timeline WHERE par_id='{par_id}';"
results = configuration.cursor.execute_query_mul(query)
if type(results) == int or len(results) == 0:
pass
else:
insert_data = []
for result in results:
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0][result[0].rfind('/') + 1:],
result[1], result[0], result[2],
'Windows Timeline']))
query = "Insert into lv2_os_app_history values (%s, %s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_data)
# Eventlog - application
query = f"SELECT application_name, time, path FROM lv1_os_win_event_logs_applications WHERE par_id='{par_id}';"
results = configuration.cursor.execute_query_mul(query)
if type(results) == int or len(results) == 0:
pass
else:
insert_data = []
for result in results:
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[0],
str(result[1]), result[2], '',
'Eventlogs-Application']))
query = "Insert into lv2_os_app_history values (%s, %s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_data)
# Jumplist - automatics
query = f"SELECT file_name, file_path, record_time, application_name " \
f"FROM lv1_os_win_jumplist_automatics WHERE par_id='{par_id}';"
results = configuration.cursor.execute_query_mul(query)
if type(results) == int or len(results) == 0:
pass
else:
insert_data = []
for result in results:
insert_data.append(tuple(
[par_id, configuration.case_id, configuration.evidence_id, result[3],
result[2], '', result[1],
'Jumplist-automatics']))
query = "Insert into lv2_os_app_history values (%s, %s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_data)
manager.AdvancedModulesManager.RegisterModule(LV2OSAPPHISTORYAnalyzer) | StarcoderdataPython |
1998227 | <filename>Outdated/RNN_draft.py
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
# vectorize data
input_text = []
output_text = []
input_character = set()
output_character = set()
def make_dictionary (fname):
content = []
with open(fname, 'r') as f:
content = f.readlines()
word = set()
for line in content:
for ch in line:
word.add(ch)
word = sorted(list(word))
print('Length of unique words in dictionary: ', len(word))
# make index table
char_to_int = {w: i for i, w in enumerate(word)}
int_to_char = {v: k for k, v in char_to_int.items()}
print('Lookup table 1 (map chars to integer): ', char_to_int)
print('Lookup table 2 (map integer to chars: ', int_to_char)
return char_to_int, int_to_char, content, word
input_index, input_index_rev, input_text, input_char = make_dictionary('seq_dic_nan_test.txt')
output_index, output_index_rev, output_text, output_char = make_dictionary('seq_dic_nan_test_y.txt')
encode_token = len(input_char)
decode_token = len(output_char)
max_encode_len = max([len(txt) for txt in input_text])
max_decode_len = max([len(txt) for txt in output_text])
print('Number of samples: ', len(input_text))
print('Number of unique input: ', encode_token)
print('Number of unique output: ', decode_token)
print('Max input length: ', max_encode_len)
print('Max output length: ', max_decode_len)
# 3d matrix
# input --> encode --> decode --> node --> decode
encode_input = np.zeros(
(len(input_text), max_encode_len, encode_token), dtype='float32')
decode_input = np.zeros(
(len(input_text), max_decode_len, decode_token), dtype='float32')
decode_output = np.zeros(
(len(output_text), max_decode_len, decode_token), dtype='float32')
print('Dimension(words_number * padded_word * features)')
print('encode input', encode_input.shape)
print('decode input: ', decode_input.shape)
print('decode output: ', decode_output.shape)
for i, (input_line, output_line) in enumerate(zip(input_text, output_text)):
for t, char in enumerate(input_line):
encode_input[i, t, input_index[char]] = 1
for t, char in enumerate(output_line):
decode_input[i, t, output_index[char]] = 1
if (t > 0):
decode_output[i, t - 1, output_index[char]] = 1
# define parameters that can be tuned
batch_size = 64
epochs = 100
latent_dim = 256
# input --> LSTM(encoder) -->output(input) --> LSTM(decoder) --> output
# still vector to vector
encoder_inputs = Input(shape=(None, encode_token))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
#set up decoder layer
decoder_inputs = Input(shape=(None, decode_token))
decoder = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(decode_token, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit([encode_input, decode_input], decode_output, batch_size=batch_size, epochs=epochs,
validation_split=0.2)
model.save('example.h5') | StarcoderdataPython |
3293220 | <reponame>ShengyuH/PredateOverlap
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0.0
self.sq_sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sq_sum += val ** 2 * n
self.var = self.sq_sum / self.count - self.avg ** 2
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.avg = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.avg = self.total_time / self.calls
if average:
return self.avg
else:
return self.diff
| StarcoderdataPython |
3530102 | import sqlite3
import subprocess
# Get the restore point
res_pt = raw_input("Enter the restore point date\n")
# Restoring
script = "cd backups & sqlite3 Restored_DB.db < "+ res_pt + ".bak"
restore = subprocess.Popen(script, shell = True)
restore.wait()
print("Database Restored successfully")
restore.kill() | StarcoderdataPython |
1788357 | <gh_stars>10-100
import random
from context_creator import ContextCreator
# returns a random TARGET context for the wordset and parameters supplied to the constructor
class RandomContextCreator(ContextCreator):
def __init__(self, word_list, num_contexts=5000, length_bounds=[6,12], tagset=set([0])):
self.word_list = set(word_list)
self.num_contexts = num_contexts
self.length_bounds = length_bounds
self.tagset = set(tagset)
def get_contexts(self, token):
return [self.build_context_obj(token) for i in range(self.num_contexts)]
def random_context(self):
rand_length = random.randint(self.length_bounds[0],self.length_bounds[1])
# casting the set to a tuple makes this faster apparently
rand_words = [random.choice(tuple(self.word_list)) for i in range(rand_length)]
return rand_words
def build_context_obj(self, token):
rand_context = self.random_context()
# get the index of the token after we know the length of the random context
rand_idx = random.randint(0, len(rand_context)-1)
# substitute that index for our token
rand_context[rand_idx] = token
# casting the set to a tuple makes this faster apparently
random_tag = random.choice(tuple(self.tagset))
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
new_obj = { 'token': token, 'index': rand_idx, 'target': rand_context, 'tag': random_tag }
return new_obj
| StarcoderdataPython |
8006395 | <filename>Fundamentals/Exercises/Basic_Syntax_Lab/5_patterns.py
char = '*'
num = int(input())
for i in range(num):
print(char*(i+1))
for i in reversed(range(num-1)):
print(char*(i+1)) | StarcoderdataPython |
11322838 | <reponame>CAVED123/reinvent-randomized
import random
import math
import numpy as np
import scipy.stats as sps
import torch
import torch.utils.data as tud
import torch.nn.utils as tnnu
import models.dataset as md
import models.vocabulary as mv
import utils.chem as uc
import utils.tensorboard as utb
class Action:
def __init__(self, logger=None):
"""
(Abstract) Initializes an action.
:param logger: An optional logger instance.
"""
self.logger = logger
def _log(self, level, msg, *args):
"""
Logs a message with the class logger.
:param level: Log level.
:param msg: Message to log.
:param *args: The arguments to escape.
:return:
"""
if self.logger:
getattr(self.logger, level)(msg, *args)
class TrainModelPostEpochHook(Action):
def __init__(self, logger=None):
"""
Initializes a training hook that runs after every epoch.
This hook enables to save the model, change LR, etc. during training.
:return:
"""
Action.__init__(self, logger)
def run(self, model, training_set, epoch): # pylint: disable=unused-argument
"""
Performs the post-epoch hook. Notice that model should be modified in-place.
:param model: Model instance trained up to that epoch.
:param training_set: List of SMILES used as the training set.
:param epoch: Epoch number (for logging purposes).
:return: Boolean that indicates whether the training should continue or not.
"""
return True # simply does nothing...
class TrainModel(Action):
def __init__(self, model, optimizer, training_sets, batch_size, clip_gradient,
epochs, post_epoch_hook=None, logger=None):
"""
Initializes the training of an epoch.
: param model: A model instance, not loaded in sampling mode.
: param optimizer: The optimizer instance already initialized on the model.
: param training_set: A list with the training set SMILES, either cycled using \
itertools.cycle or as many as epochs needed to train.
: param batch_size: Batch size to use.
: param clip_gradient: Clip the gradients after each backpropagation.
: return:
"""
Action.__init__(self, logger)
self.model = model
self.optimizer = optimizer
self.epochs = epochs
self.clip_gradient = clip_gradient
self.batch_size = batch_size
self.training_sets = training_sets
if not post_epoch_hook:
self.post_epoch_hook = TrainModelPostEpochHook(logger=self.logger)
else:
self.post_epoch_hook = post_epoch_hook
def run(self):
"""
Performs a training epoch with the parameters used in the constructor.
:return: An iterator of (total_batches, epoch_iterator), where the epoch iterator
returns the loss function at each batch in the epoch.
"""
for epoch, training_set in zip(range(1, self.epochs + 1), self.training_sets):
dataloader = self._initialize_dataloader(training_set)
epoch_iterator = self._epoch_iterator(dataloader)
yield len(dataloader), epoch_iterator
self.model.set_mode("eval")
post_epoch_status = self.post_epoch_hook.run(self.model, training_set, epoch)
self.model.set_mode("train")
if not post_epoch_status:
break
def _epoch_iterator(self, dataloader):
for padded_seqs, seq_lengths in dataloader:
loss = self.model.likelihood(padded_seqs, seq_lengths).mean()
self.optimizer.zero_grad()
loss.backward()
if self.clip_gradient > 0:
tnnu.clip_grad_norm_(self.model.network.parameters(), self.clip_gradient)
self.optimizer.step()
yield loss
def _initialize_dataloader(self, training_set):
dataset = md.Dataset(smiles_list=training_set, vocabulary=self.model.vocabulary, tokenizer=mv.SMILESTokenizer())
return tud.DataLoader(dataset, batch_size=self.batch_size, shuffle=True,
collate_fn=md.Dataset.collate_fn)
class CollectStatsFromModel(Action):
"""Collects stats from an existing RNN model."""
def __init__(self, model, epoch, training_set, validation_set, writer, sample_size,
with_weights=False, to_mol_func=uc.to_mol, other_values=None, logger=None):
"""
Creates an instance of CollectStatsFromModel.
: param model: A model instance initialized as sampling_mode.
: param epoch: Epoch number to be sampled(informative purposes).
: param training_set: Iterator with the training set.
: param validation_set: Iterator with the validation set.
: param writer: Writer object(Tensorboard writer).
: param other_values: Other values to save for the epoch.
: param sample_size: Number of molecules to sample from the training / validation / sample set.
: param with_weights: To calculate or not the weights.
: param to_mol_func: Mol function used(change for deepsmiles or other representations).
: return:
"""
Action.__init__(self, logger)
self.model = model
self.epoch = epoch
self.training_set = training_set
self.validation_set = validation_set
self.writer = writer
self.other_values = other_values
self.with_weights = with_weights
self.sample_size = max(sample_size, 1)
self.to_mol_func = to_mol_func
self.data = {}
self._calc_nlls_action = CalculateNLLsFromModel(self.model, 128, self.logger)
@torch.no_grad()
def run(self):
"""
Collects stats for a specific model object, epoch, validation set, training set and writer object.
: return: A dictionary with all the data saved for that given epoch.
"""
self._log("info", "Collecting data for epoch %s", self.epoch)
self.data = {}
self._log("debug", "Sampling SMILES")
sampled_smis, sampled_nlls = [np.array(a) for a in zip(*self.model.sample_smiles(num=self.sample_size))]
self._log("debug", "Obtaining molecules from SMILES")
sampled_mols = [smi_mol for smi_mol in [(smi, self.to_mol_func(smi)) for smi in sampled_smis] if smi_mol[1]]
self._log("debug", "Calculating NLLs for the validation and training sets")
validation_nlls, training_nlls = self._calculate_validation_training_nlls()
if self.with_weights:
self._log("debug", "Calculating weight stats")
self._weight_stats()
self._log("debug", "Calculating nll stats")
self._nll_stats(sampled_nlls, validation_nlls, training_nlls)
self._log("debug", "Calculating validity stats")
self._valid_stats(sampled_mols)
self._log("debug", "Drawing some molecules")
self._draw_mols(sampled_mols)
if self.other_values:
self._log("debug", "Adding other values")
for name, val in self.other_values.items():
self._add_scalar(name, val)
return self.data
def _calculate_validation_training_nlls(self):
def calc_nlls(smiles_set):
subset = random.sample(smiles_set, self.sample_size)
return np.array(list(self._calc_nlls_action.run(subset)))
return (calc_nlls(self.validation_set), calc_nlls(self.training_set))
def _valid_stats(self, mols):
self._add_scalar("valid", 100.0*len(mols)/self.sample_size)
def _weight_stats(self):
for name, weights in self.model.network.named_parameters():
self._add_histogram("weights/{}".format(name), weights.clone().cpu().data.numpy())
def _nll_stats(self, sampled_nlls, validation_nlls, training_nlls):
self._add_histogram("nll_plot/sampled", sampled_nlls)
self._add_histogram("nll_plot/validation", validation_nlls)
self._add_histogram("nll_plot/training", training_nlls)
self._add_scalars("nll/avg", {
"sampled": sampled_nlls.mean(),
"validation": validation_nlls.mean(),
"training": training_nlls.mean()
})
self._add_scalars("nll/var", {
"sampled": sampled_nlls.var(),
"validation": validation_nlls.var(),
"training": training_nlls.var()
})
def jsd(dists):
min_size = min(len(dist) for dist in dists)
dists = [dist[:min_size] for dist in dists]
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return np.sum([sps.entropy(dist, avg_dist) for dist in dists]) / num_dists
self._add_scalar("nll_plot/jsd_joined", jsd([sampled_nlls, training_nlls, validation_nlls]))
def _draw_mols(self, mols):
try:
smis, mols = zip(*random.sample(mols, 20))
utb.add_mols(self.writer, "molecules", mols, mols_per_row=4, legends=smis, global_step=self.epoch)
except ValueError:
pass
def _add_scalar(self, key, val):
self.data[key] = val
self.writer.add_scalar(key, val, self.epoch)
def _add_scalars(self, key, dict_vals):
for k, val in dict_vals.items():
self.data["{}.{}".format(key, k)] = val
self.writer.add_scalars(key, dict_vals, self.epoch)
def _add_histogram(self, key, vals):
self.data[key] = vals
self.writer.add_histogram(key, vals, self.epoch)
class SampleModel(Action):
def __init__(self, model, batch_size, logger=None):
"""
Creates an instance of SampleModel.
:params model: A model instance (better in sampling mode).
:params batch_size: Batch size to use.
:return:
"""
Action.__init__(self, logger)
self.model = model
self.batch_size = batch_size
def run(self, num):
"""
Samples the model for the given number of SMILES.
:params num: Number of SMILES to sample.
:return: An iterator with each of the batches sampled in (smiles, nll) pairs.
"""
num_batches = math.ceil(num / self.batch_size)
molecules_left = num
for _ in range(num_batches):
current_batch_size = min(molecules_left, self.batch_size)
for smi, nll in self.model.sample_smiles(current_batch_size):
yield (smi, nll)
molecules_left -= current_batch_size
class CalculateNLLsFromModel(Action):
def __init__(self, model, batch_size, logger=None):
"""
Creates an instance of CalculateNLLsFromModel.
:param model: A model instance.
:param batch_size: Batch size to use.
:return:
"""
Action.__init__(self, logger)
self.model = model
self.batch_size = batch_size
def run(self, smiles_list):
"""
Calculates the NLL for a set of SMILES strings.
:param smiles_list: List with SMILES.
:return: An iterator with each NLLs in the same order as the SMILES list.
"""
dataset = md.Dataset(smiles_list, self.model.vocabulary, self.model.tokenizer)
dataloader = tud.DataLoader(dataset, batch_size=self.batch_size, collate_fn=md.Dataset.collate_fn,
shuffle=False)
for batch in dataloader:
for nll in self.model.likelihood(*batch).data.cpu().numpy():
yield nll
| StarcoderdataPython |
94310 | <reponame>mojaie/kiwiii-server<filename>kiwiii/test/parser/test_helper.py
#
# (C) 2014-2017 <NAME>
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import os
import unittest
from kiwiii.parser import helper
TEST_FILE = os.path.join(
os.path.dirname(__file__),
"../../../resources/raw/instruments/SpectraMaxM2.txt"
)
class TestHelper(unittest.TestCase):
def test_well_index(self):
self.assertEqual(helper.well_index("A1"), 0)
self.assertEqual(helper.well_index("A24"), 23)
self.assertEqual(helper.well_index("P1"), 360)
self.assertEqual(helper.well_index("P24"), 383)
self.assertEqual(helper.well_index("A01"), 0)
self.assertEqual(helper.well_index("p24"), 383)
| StarcoderdataPython |
5094885 | <reponame>AustralianDisabilityLimited/MultiversePlatform<filename>client/Scripts/SceneQuery.py<gh_stars>10-100
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from Axiom.MathLib import Vector3, Ray
import Multiverse.Base
import ClientAPI
import WorldObject
class RaySceneQueryResult:
def __init__(self, distance, obj, loc):
self.distance = distance
self.worldObject = obj
self.fragmentLocation = loc
class RaySceneQuery:
#
# Constructor
#
def __init__(self, origin, dir):
ray = Ray(origin, dir)
self.__dict__['_rayQuery'] = ClientAPI._sceneManager.CreateRayQuery(ray)
#
# Methods
#
def Execute(self):
results = self._rayQuery.Execute()
rv = []
for entry in results:
if entry.SceneObject is not None:
if isinstance(entry.SceneObject.UserData, Multiverse.Base.ObjectNode):
existingObject = WorldObject._GetExistingWorldObject(entry.SceneObject.UserData)
rv.append(RaySceneQueryResult(entry.Distance, existingObject, None))
else:
ClientAPI.Write("Skipping non-multiverse object: %s" % entry.SceneObject.UserData)
# ignore this object
pass
elif entry.worldFragment is not None:
rv.append(RaySceneQueryResult(entry.Distance, None, entry.worldFragment.SingleIntersection))
return rv
# def Dispose(self):
# ClientAPI._sceneManager.RemoveLight(self._light)
| StarcoderdataPython |
3414177 | <gh_stars>0
from functools import reduce
from typing import List
from bscscan.enums.actions_enum import ActionsEnum as actions
from bscscan.enums.fields_enum import FieldsEnum as fields
from bscscan.enums.modules_enum import ModulesEnum as modules
from bscscan.enums.tags_enum import TagsEnum as tags
class Accounts:
@staticmethod
def get_eth_balance(address: str) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.BALANCE}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.TAG}"
f"{tags.LATEST}"
)
return url
# r = requests.get(url)
# return conversions.to_ticker_unit(parser.get_result(r))
@staticmethod
def get_eth_balance_multiple(addresses: List[str]) -> str:
# NOTE: Max 20 wallets at a time
address_list = reduce(lambda w1, w2: str(w1) + "," + str(w2), addresses)
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.BALANCE_MULTI}"
f"{fields.ADDRESS}"
f"{address_list}"
f"{fields.TAG}"
f"{tags.LATEST}"
)
return url
# r = requests.get(url)
# return [conversions.to_ticker_unit(r["balance"]) for r in parser.get_result(r)]
@staticmethod
def get_normal_txs_by_address(
address: str, startblock: int, endblock: int, sort: str,
) -> str:
# NOTE: Returns the last 10k events
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TXLIST}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
)
return url
@staticmethod
def get_normal_txs_by_address_paginated(
address: str, page: int, offset: int, startblock: int, endblock: int, sort: str,
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TXLIST}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_internal_txs_by_address(
address: str, startblock: int, endblock: int, sort: str,
) -> str:
# NOTE: Returns the last 10k events
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TXLIST_INTERNAL}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
)
return url
@staticmethod
def get_internal_txs_by_address_paginated(
address: str, page: int, offset: int, startblock: int, endblock: int, sort: str,
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TXLIST_INTERNAL}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_internal_txs_by_txhash(txhash: str) -> str:
# NOTE: Returns the last 10k events
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TXLIST_INTERNAL}"
f"{fields.TXHASH}"
f"{txhash}"
)
return url
@staticmethod
def get_internal_txs_by_block_range_paginated(
startblock: int, endblock: int, page: int, offset: int, sort: str,
) -> str:
# NOTE: Returns the last 10k events
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TXLIST_INTERNAL}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_erc20_token_transfer_events_by_address(
address: str, startblock: int, endblock: int, sort: str,
) -> str:
# NOTE: Returns the last 10k events
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TOKENTX}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
)
return url
@staticmethod
def get_erc20_token_transfer_events_by_contract_address_paginated(
contract_address: str, page: int, offset: int, sort: str
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TOKENTX}"
f"{fields.CONTRACT_ADDRESS}"
f"{contract_address}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_erc20_token_transfer_events_by_address_and_contract_paginated(
contract_address: str, address: str, page: int, offset: int, sort: str
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TOKENTX}"
f"{fields.CONTRACT_ADDRESS}"
f"{contract_address}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_erc721_token_transfer_events_by_address(
address: str, startblock: int, endblock: int, sort: str,
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TOKENNFTTX}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.START_BLOCK}"
f"{str(startblock)}"
f"{fields.END_BLOCK}"
f"{str(endblock)}"
f"{fields.SORT}"
f"{sort}"
)
return url
@staticmethod
def get_erc721_token_transfer_events_by_contract_address_paginated(
contract_address: str, page: int, offset: int, sort: str
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TOKENNFTTX}"
f"{fields.CONTRACT_ADDRESS}"
f"{contract_address}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_erc721_token_transfer_events_by_address_and_contract_paginated(
contract_address: str, address: str, page: int, offset: int, sort: str
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.TOKENNFTTX}"
f"{fields.CONTRACT_ADDRESS}"
f"{contract_address}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.SORT}"
f"{sort}"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
@staticmethod
def get_mined_blocks_by_address(address: str) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.GET_MINED_BLOCKS}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.BLOCK_TYPE}"
f"blocks"
)
return url
@staticmethod
def get_mined_blocks_by_address_paginated(
address: str, page: int, offset: int
) -> str:
url = (
f"{fields.MODULE}"
f"{modules.ACCOUNT}"
f"{fields.ACTION}"
f"{actions.GET_MINED_BLOCKS}"
f"{fields.ADDRESS}"
f"{address}"
f"{fields.BLOCK_TYPE}"
f"blocks"
f"{fields.PAGE}"
f"{str(page)}"
f"{fields.OFFSET}"
f"{str(offset)}"
)
return url
| StarcoderdataPython |
11358863 | <reponame>miguelgrinberg/circular-dependencies-webcast<filename>myapp/api.py
from flask import url_for, jsonify, request, Blueprint
from myapp import db
from myapp.models.user import User
from myapp.models.message import Message
api = Blueprint('api', __name__)
@api.route('/users', methods=['POST'])
def new_user():
user = User(**request.get_json())
db.session.add(user)
db.session.commit()
return '', 201, {'Location': url_for('api.get_user', id=user.id)}
@api.route('/users', methods=['GET'])
def get_users():
users = User.query
return jsonify({'users': [u.to_dict() for u in users]})
@api.route('/users/<id>', methods=['GET'])
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_dict())
@api.route('/users/<id>', methods=['DELETE'])
def delete_user(id):
user = User.query.get_or_404(id)
user.deleted = True
db.session.commit()
return '', 204
@api.route('/users/<id>/messages', methods=['POST'])
def new_message(id):
user = User.query.get_or_404(id)
message = Message(user_id=user.id, **request.get_json())
db.session.add(message)
db.session.commit()
return '', 201, {'Location': url_for('api.get_message', id=message.id)}
@api.route('/messages')
def get_messages():
messages = Message.query
return jsonify({'messages': [m.to_dict() for m in messages]})
@api.route('/messages/<id>')
def get_message(id):
message = Message.query.get_or_404(id)
return jsonify(message.to_dict())
| StarcoderdataPython |
1849202 | import json
import networkx as nwx
from networkx.readwrite import json_graph
global G
G=nwx.DiGraph()
global dummy
def edges():
print list(G.edges_iter(data='weight', default=-999))
def newGraph():
global G
G=nwx.DiGraph()
def addedge(s,t,w):
global G
G.add_edge(s,t,weight=w)
def savejson():
global G
json.dump(json_graph.node_link_data(G), open('force.json','w'))
def p ():
global G
print G.graph, G.nodes()
def eigenvector(w='weight'):
return nwx.eigenvector_centrality(G,weight=w)
def betweenness(w='weight'):
return nwx.betweenness_centrality(G,weight=w)
def closeness(w='weight'):
return nwx.closeness_centrality(G)
def highest_centrality(centrality_function):
"""Returns a tuple (node,value) with the node
with largest value from Networkx centrality dictionary."""
# Create ordered tuple of centrality data
cent_items=[(b,a) for (a,b) in centrality_function.iteritems()]
# Sort in descending order
cent_items.sort()
cent_items.reverse()
return tuple(reversed(cent_items[0]))
def triads():
import triadic as tr
#census,
global dummy
census,node_census = tr.triadic_census(G)
keys = node_census.values()[1].keys()
## Generate a table header
print '| Node |', ' | '.join(keys)
## Generate table contents
## A little magic is required to convert ints to strings
data = []
for k in node_census.keys():
#print '|', k ,'|',' | '.join([str(v) for v in node_census[k].values() ])
data.append([k,[i for i in node_census[k].values()]])
return {"key":keys,"data":[i[1] for i in data],"nodes":[i[0] for i in data]}
#https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.triads.triadic_census.html
def test():
global G
G = nwx.connected_component_subgraphs(G.to_undirected())
| StarcoderdataPython |
1638543 | # -*- coding: utf-8 -*-
import json
from fastapi import APIRouter, File, HTTPException, UploadFile
from loguru import logger
from xmltodict import parse as xml_parse
from xmltodict import unparse as xml_unparse
router = APIRouter()
@router.post("/xml-json", status_code=201)
async def convert_xml(
myfile: UploadFile = File(...),
) -> dict:
"""
convert xml document to json
Returns:
json object
"""
# determine if file has no content_type set
# set file_type to a value
if len(myfile.content_type) == 0:
file_type = "unknown"
else:
file_type = myfile.content_type
logger.info(f"file_name: {myfile.filename} file_type: {file_type}")
file_named = myfile.filename
# if document is not a xml document, give http exception
if file_named.endswith(".xml", 4) is not True:
error_exception = (
f"API requires a XML docuement, but file {myfile.filename} is {file_type}"
)
logger.critical(error_exception)
raise HTTPException(status_code=400, detail=error_exception)
try:
# async method to get data from file upload
contents = await myfile.read()
# xml to json conversion with xmltodict
result = xml_parse(
contents, encoding="utf-8", process_namespaces=True, xml_attribs=True
)
logger.info("file converted to JSON")
return result
except Exception as e:
logger.critical(f"error: {e}")
err = str(e)
# when error occurs output http exception
if err.startswith("syntax error") is True or e is not None:
error_exception = f"The syntax of the object is not valid. Error: {e}"
raise HTTPException(status_code=400, detail=error_exception)
@router.post("/json-xml", status_code=201)
async def convert_json(
myfile: UploadFile = File(...),
) -> str:
"""
convert json document to xml
Returns:
XML object
"""
# determine if file is of zero bytes
# set file_type to a value
if len(myfile.content_type) == 0:
file_type = "unknown"
else:
file_type = myfile.content_type
logger.info(f"file_name: {myfile.filename} file_type: {file_type}")
file_named = myfile.filename
# if document is not a json document, give http exception
if file_named.endswith(".json", 5) is not True:
error_exception = (
f"API requirs a JSON docuement, but file {myfile.filename} is {file_type}"
)
logger.critical(error_exception)
raise HTTPException(status_code=400, detail=error_exception)
try:
# async method to get data from file upload
content = await myfile.read()
# create a dictionary with decoded content
new_dict = json.loads(content.decode("utf8"))
# xml to json conversion with xmltodict
result = xml_unparse(new_dict, pretty=True)
logger.info("file converted to JSON")
return result
except Exception as e:
logger.critical(f"error: {e}")
err = str(e)
# when error occurs output http exception
if err.startswith("Extra data") is True or e is not None:
error_exception = f"The syntax of the object is not valid. Error: {e}"
raise HTTPException(status_code=400, detail=error_exception)
| StarcoderdataPython |
3463020 | <filename>instagramy/core/requests.py<gh_stars>10-100
""" Wrapper for urllib.request """
import random
from typing import Any
from urllib.request import Request, urlopen
from .user_agent import user_agents
def get(url: str, sessionid=None) -> Any:
"""
Function send the HTTP requests to Instagram and
Login into Instagram with session id
and return the Html Content
"""
request = Request(
url=url, headers={"User-Agent": f"user-agent: {random.choice(user_agents)}"}
)
if sessionid:
request.add_header("Cookie", f"sessionid={sessionid}")
with urlopen(request) as response:
html = response.read()
return html.decode("utf-8")
| StarcoderdataPython |
132000 | # Copyright 2014 Kylincloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.template import defaultfilters as filters
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext_lazy
from horizon import tables
from horizon.utils import filters as utils_filters
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class DeleteDHCPAgent(tables.DeleteAction):
@staticmethod
def action_present(count):
return ngettext_lazy(
"Delete DHCP Agent",
"Delete DHCP Agents",
count
)
@staticmethod
def action_past(count):
return ngettext_lazy(
"Deleted DHCP Agent",
"Deleted DHCP Agents",
count
)
policy_rules = (("network", "delete_agent"),)
def delete(self, request, obj_id):
network_id = self.table.kwargs['network_id']
api.neutron.remove_network_from_dhcp_agent(request, obj_id, network_id)
class AddDHCPAgent(tables.LinkAction):
name = "add"
verbose_name = _("Add DHCP Agent")
url = "horizon:admin:networks:adddhcpagent"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "update_agent"),)
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
def get_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class DHCPAgentsFilterAction(tables.FilterAction):
name = "agents"
class DHCPAgentsTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('ID'), hidden=True)
host = tables.WrappingColumn('host', verbose_name=_('Host'))
status = tables.Column(get_agent_status, verbose_name=_('Status'))
state = tables.Column(get_agent_state, verbose_name=_('Admin State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=_('Updated At'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_display(self, agent):
return agent.host
class Meta(object):
name = "agents"
verbose_name = _("DHCP Agents")
table_actions = (AddDHCPAgent, DeleteDHCPAgent,
DHCPAgentsFilterAction,)
row_actions = (DeleteDHCPAgent,)
hidden_title = False
| StarcoderdataPython |
387912 | from pinout import core, config
from pinout.components.layout import Group
class Swatch(Group):
"""Graphical icon for display in LegendEntry"""
def __init__(self, width=None, height=None, **kwargs):
super().__init__(**kwargs)
self.update_config(config.legend["entry"]["swatch"])
width = width or self.config["width"]
height = height or self.config["height"]
# Rect aligned left hand edge, vertically centered around origin.
shape = self.add(core.Rect(y=-height / 2, width=width, height=height))
self.add_tag("swatch")
shape.add_tag("swatch__body")
class LegendEntry(Group):
"""Legend entry comprised of a swatch and single line of text."""
def __init__(
self,
content,
width=None,
height=None,
swatch=None,
**kwargs,
):
super().__init__(**kwargs)
self.update_config(config.legend["entry"])
self.add_tag(self.config["tag"])
width = width or self.config["width"]
height = height or self.config["height"]
swatch = swatch or {}
if isinstance(swatch, dict):
swatch = Swatch(**swatch)
self.add(
core.SvgShape(
width=width,
height=height,
),
)
swatch.y = height / 2
swatch.x = (height - swatch.height) / 2
self.add(swatch)
self.add(
core.Text(
content,
x=swatch.bounding_coords().x2 + swatch.x,
y=self.height / 2,
)
)
class Legend(Group):
"""Auto generate a legend component"""
def __init__(
self,
data,
max_height=None,
**kwargs,
):
super().__init__(**kwargs)
self.update_config(config.legend)
self.add_tag(self.config["tag"])
max_height = max_height or self.config["max_height"]
entry_x = 0
entry_y = 0
for entry in data:
if type(entry) is tuple:
content, tag, *args = entry
attrs = args[0] if len(args) > 0 else {}
entry = LegendEntry(content, tag=tag, **attrs, scale=self.scale)
self.add(entry)
# Position entry in legend
if max_height and entry_y + entry.height > max_height:
entry_x = self.width
entry_y = 0
entry.x = entry_x
entry.y = entry_y
entry_y += entry.height
| StarcoderdataPython |
1979352 | import numpy as np
def run(df, docs):
for doc in docs:
doc.start("t23 - Ano", df)
df['failed_last_year'] = 0
max_grade = df['ano'].max()
df['failed_last_year'][df['ano'] == max_grade] = 1
for doc in docs:
doc.end(df)
return df
| StarcoderdataPython |
9682483 | <reponame>xaviml/z2m_ikea_controller
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
from appdaemon.plugins.hass.hassapi import Hass
from cx_const import DefaultActionsMapping
from cx_core.integration import EventData, Integration
if TYPE_CHECKING:
from cx_core.controller import Controller
class HomematicIntegration(Integration):
name = "homematic"
_registered_controller_ids: Set[str]
def __init__(self, controller: "Controller", kwargs: Dict[str, Any]):
self._registered_controller_ids = set()
super().__init__(controller, kwargs)
def get_default_actions_mapping(self) -> Optional[DefaultActionsMapping]:
return self.controller.get_homematic_actions_mapping()
async def listen_changes(self, controller_id: str) -> None:
self._registered_controller_ids.add(controller_id)
await Hass.listen_event(
self.controller, self.event_callback, "homematic.keypress"
)
async def event_callback(
self, event_name: str, data: EventData, kwargs: Dict[str, Any]
) -> None:
if data["name"] not in self._registered_controller_ids:
return
param = data["param"]
channel = data["channel"]
action = f"{param}_{channel}"
await self.controller.handle_action(action, extra=data)
| StarcoderdataPython |
296981 | import numpy as np
import pytest
from hashlib import md5
from pathlib import Path
from xml.etree import ElementTree as ET
from readimc import IMCMcdFile
class TestIMCMCDFile:
damond_mcd_file_path = Path("data/Damond2019/20170814_G_SE.mcd")
@classmethod
def setup_class(cls):
if cls.damond_mcd_file_path.exists():
cls.damond_mcd_file = IMCMcdFile(cls.damond_mcd_file_path)
cls.damond_mcd_file.open()
else:
cls.damond_mcd_file = None
@classmethod
def teardown_class(cls):
if cls.damond_mcd_file is not None:
cls.damond_mcd_file.close()
cls.damond_mcd_file = None
def test_xml(self, imc_test_data_mcd_file: IMCMcdFile):
mcd_xml = ET.tostring(
imc_test_data_mcd_file.xml,
encoding="us-ascii",
method="xml",
xml_declaration=False,
default_namespace=imc_test_data_mcd_file.xmlns,
)
mcd_xml_digest = md5(mcd_xml).digest()
assert mcd_xml_digest == b"D]\xfa\x15a\xb8\xe4\xb2z8od\x85c\xa9\xf9"
def test_xmlns(self, imc_test_data_mcd_file: IMCMcdFile):
mcd_xmlns = imc_test_data_mcd_file.xmlns
assert mcd_xmlns == "http://www.fluidigm.com/IMC/MCDSchema_V2_0.xsd"
def test_slides(self, imc_test_data_mcd_file: IMCMcdFile):
assert len(imc_test_data_mcd_file.slides) == 1
slide = imc_test_data_mcd_file.slides[0]
assert slide.id == 0
assert slide.description == "Slide"
assert slide.width_um == 75000.0
assert slide.height_um == 25000.0
assert len(slide.panoramas) == 1
assert len(slide.acquisitions) == 3
panorama = next(p for p in slide.panoramas if p.id == 1)
assert panorama.description == "Panorama_001"
assert panorama.x1_um == 31020.0
assert panorama.y1_um == 13486.0
assert panorama.width_um == 193.0
assert panorama.height_um == 162.0
acquisition = next(a for a in slide.acquisitions if a.id == 1)
assert acquisition.description == "ROI_001"
assert acquisition.start_x_um == 31080.0
assert acquisition.start_y_um == 13449.0
assert acquisition.width_um == 60.501000000000204
assert acquisition.height_um == 58.719999999999345
assert acquisition.num_channels == 5
assert tuple(acquisition.channel_metals) == (
"Ag",
"Pr",
"Sm",
"Eu",
"Yb",
)
assert tuple(acquisition.channel_masses) == (107, 141, 147, 153, 172)
assert tuple(acquisition.channel_labels) == (
"107Ag",
"Cytoker_651((3356))Pr141",
"Laminin_681((851))Sm147",
"YBX1_2987((3532))Eu153",
"H3K27Ac_1977((2242))Yb172",
)
assert tuple(acquisition.channel_names) == (
"Ag107",
"Pr141",
"Sm147",
"Eu153",
"Yb172",
)
def test_read_acquisition(self, imc_test_data_mcd_file: IMCMcdFile):
slide = imc_test_data_mcd_file.slides[0]
acquisition = next(a for a in slide.acquisitions if a.id == 1)
img = imc_test_data_mcd_file.read_acquisition(acquisition=acquisition)
assert img.dtype == np.float32
assert img.shape == (5, 60, 60)
def test_read_slide(self, imc_test_data_mcd_file: IMCMcdFile):
slide = imc_test_data_mcd_file.slides[0]
img = imc_test_data_mcd_file.read_slide(slide)
assert img.dtype == np.uint8
assert img.shape == (669, 2002, 4)
def test_read_panorama(self, imc_test_data_mcd_file: IMCMcdFile):
slide = imc_test_data_mcd_file.slides[0]
panorama = next(p for p in slide.panoramas if p.id == 1)
img = imc_test_data_mcd_file.read_panorama(panorama)
assert img.dtype == np.uint8
assert img.shape == (162, 193, 4)
def test_read_before_ablation_image(
self, imc_test_data_mcd_file: IMCMcdFile
):
slide = imc_test_data_mcd_file.slides[0]
acquisition = next(a for a in slide.acquisitions if a.id == 1)
img = imc_test_data_mcd_file.read_before_ablation_image(acquisition)
assert img is None
def test_read_after_ablation_image(
self, imc_test_data_mcd_file: IMCMcdFile
):
slide = imc_test_data_mcd_file.slides[0]
acquisition = next(a for a in slide.acquisitions if a.id == 1)
img = imc_test_data_mcd_file.read_after_ablation_image(acquisition)
assert img is None
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_xml_damond(self, imc_test_data_mcd_file: IMCMcdFile):
mcd_xml = ET.tostring(
imc_test_data_mcd_file.xml,
encoding="us-ascii",
method="xml",
xml_declaration=False,
default_namespace=imc_test_data_mcd_file.xmlns,
)
mcd_xml_digest = md5(mcd_xml).digest()
assert mcd_xml_digest == b"D]\xfa\x15a\xb8\xe4\xb2z8od\x85c\xa9\xf9"
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_xmlns_damond(self):
mcd_xmlns = self.damond_mcd_file.xmlns
assert mcd_xmlns == "http://www.fluidigm.com/IMC/MCDSchema.xsd"
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_slides_damond(self):
assert len(self.damond_mcd_file.slides) == 1
slide = self.damond_mcd_file.slides[0]
assert slide.id == 1
assert slide.description == "compensationslide1000"
assert slide.width_um == 75000.0
assert slide.height_um == 25000.0
assert len(slide.panoramas) == 8
assert len(slide.acquisitions) == 41
panorama = next(p for p in slide.panoramas if p.id == 1)
assert panorama.description == "TuningTape"
assert panorama.x1_um == 28961.0
assert panorama.y1_um == 6460.0
assert panorama.width_um == 1472.9184890671672
assert panorama.height_um == 1526.6011674842225
acquisition = next(a for a in slide.acquisitions if a.id == 1)
assert acquisition.description == "TT_G01"
assert acquisition.start_x_um == 29195.563447789347
assert acquisition.start_y_um == 6091.267354770278
assert acquisition.width_um == 51.0
assert acquisition.height_um == 50.0
assert acquisition.num_channels == 3
assert tuple(acquisition.channel_metals) == ("Eu", "Eu", "Lu")
assert tuple(acquisition.channel_masses) == (151, 153, 175)
assert tuple(acquisition.channel_labels) == ("151Eu", "153Eu", "175Lu")
assert tuple(acquisition.channel_names) == ("Eu151", "Eu153", "Lu175")
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_read_acquisition_damond(self):
slide = self.damond_mcd_file.slides[0]
acquisition = next(a for a in slide.acquisitions if a.id == 1)
img = self.damond_mcd_file.read_acquisition(acquisition=acquisition)
assert img.dtype == np.float32
assert img.shape == (3, 50, 51)
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_read_slide_damond(self):
slide = self.damond_mcd_file.slides[0]
img = self.damond_mcd_file.read_slide(slide)
assert img.dtype == np.uint8
assert img.shape == (930, 2734, 3)
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_read_panorama_damond(self):
slide = self.damond_mcd_file.slides[0]
panorama = next(p for p in slide.panoramas if p.id == 1)
img = self.damond_mcd_file.read_panorama(panorama)
assert img.dtype == np.uint8
assert img.shape == (4096, 3951, 4)
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_read_before_ablation_image_damond(self):
slide = self.damond_mcd_file.slides[0]
acquisition = next(a for a in slide.acquisitions if a.id == 1)
img = self.damond_mcd_file.read_before_ablation_image(acquisition)
assert img is None
@pytest.mark.skipif(
not damond_mcd_file_path.exists(), reason="data not available"
)
def test_read_after_ablation_image_damond(self):
slide = self.damond_mcd_file.slides[0]
acquisition = next(a for a in slide.acquisitions if a.id == 1)
img = self.damond_mcd_file.read_after_ablation_image(acquisition)
assert img is None
| StarcoderdataPython |
4972512 | <gh_stars>0
#######################################
############### Imports ###############
#######################################
from kafka import KafkaProducer
from kafka import KafkaConsumer
#import re
#import os, errno
from time import time
from datetime import timedelta
from atexit import register
import sys
from kfk_rss_read import *
############################################
############### Inital setup ###############
############################################
## Function to read nice byte sizes:
#def size_format(x, suffix='B'):
# for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
# if abs(x) < 1024.0:
# return "%3.1f%s%s" % (x, unit, suffix)
# x /= 1024.0
# return "%.1f%s%s" % (x, 'Yi', suffix)
#
## specify the location of the feed file (text file full of rss links)
#rssfeedfile = 'rssfeeds.txt'
#
## specify the location of the global GUID file
#globalGUID = 'globalGUID.log'
#
## check before streaming
#cont = input("Start streaming %s? (y/n) " % rssfeedfile)
#if cont == 'y':
# pass
#else:
# quit('Now exiting, no files downloaded')
def Ending(kafka_consumer):
kafka_consumer.close()
print('Time taken:', str(timedelta(seconds=time()-start)))
print('Messages received:', filesread)
# start timer
start = time()
###################################
######### Start Receiving #########
###################################
# define masterlog
masterlog ='masterlog.txt'
# define the counter variables:
filesread = 0
# create a list for all articles
all_articles = []
# start the kafka consumer
#consumer = KafkaConsumer('python-test',
# fetch_min_bytes=300000,
# fetch_max_wait=300000,
# auto_commit_interval_ms=1000,
# max_poll_records=10,
# bootstrap_servers=['localhost:9092'])
consumer = KafkaConsumer('python-test',
bootstrap_servers=['localhost:9092'])
register(Ending,consumer)
# read messages
#with open(masterlog, 'a+') as master:
# for msg in consumer:
# if filesread < 10:
# filesread += 1
# master.write(msg.value.decode('utf-8')+'\n')
# else: break
with open(masterlog, 'a+') as master:
for msg in consumer:
filesread += 1
master.write(msg.value.decode('utf-8')+'\n')
print(all_articles)
| StarcoderdataPython |
270149 | <gh_stars>0
__all__ = ["decoder"]
def decoder(x: list) -> int:
x = int(str("0b" + "".join(reversed(list(map(str, x))))), 2)
return x | StarcoderdataPython |
11209242 | <reponame>brutzl/pymbs
import threading
import time
class PyMbsThread(threading.Thread):
def __init__(self, function, realTime=False):
threading.Thread.__init__(self)
self.execute = False
self.function = function
self.realTime = realTime
self.scaling = 1
# Start-Time (Model)
self.model_offset = 0.0
# Start-Time (Real)
self.real_offset = 0.0
def reinit(self):
if (self.execute):
self.stop()
self.__init__(self.function, self.realTime)
def run(self):
# Initialise Offsets
self.real_offset = time.time()
self.model_offset = self.function()
t = self.model_offset
# Debug
# print "Starting Thread " + str(id(self))
# Endless Loop
self.execute = True
while self.execute:
# synchronise with real time
if (self.realTime):
# Real Elapsed Time
real = self.scaling*(time.time() - self.real_offset)
# Model Elapsed Time
model = t - self.model_offset
# Difference
deltaT = model-real
if (deltaT > 0):
time.sleep(deltaT)
# Execute next step
t = self.function()
# Debug
# print "Finished Thread " + str(id(self))
def stop(self):
self.execute = False
# Debug
# print "Stopped Thread " + str(id(self))
'''
Usage:
======
def myFunc():
print 'doing something'
time.sleep(1)
t = PymbsThread(myFunc)
t.start() # starts Thread
t.stop() # stop Thread
t.reinit() # "reset" thread
t.start() # start Thread again
t.stop()
''' | StarcoderdataPython |
9719777 | from dotenv import load_dotenv
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
import asyncio
import random
import os
load_dotenv()
# intents = discord.Intents.default()
# intents.members = True
# client = discord.Client(intents=intents)
intents = discord.Intents(messages=True,members = True, guilds=True)
bot = commands.Bot(command_prefix='!', intents=intents)
# class Slapper(commands.Converter):
# async def convert(self, ctx, argument):
# to_slap = random.choice(ctx.guild.members)
#
# print(to_slap)
#
# return '{0.author} slapped {1} because *{2}*'.format(ctx, to_slap, argument)
# @bot.command()
# async def slap(ctx, *, reason: Slapper):
# await ctx.send(reason)
@bot.command()
async def command(ctx):
# Get a Channel Object
output = "```"
output += "Commands for CS_GSU_BOT\n\n"
output += "!ping - ...\n"
output += "!fiveDolla - Collect 5 dollars for Chen. Must specify a reason.\n"
output += "!shadowKeeper - List the current keeper of the Shadow Realm\n"
output += "!shadowLeader - Display the fearless leader of the Shadow Realm\n"
output += "!hothothotties - Display the eccentric supreme general of the Jalapeño Hotties\n"
output += "!geriatricKeeper - Display the name(s) of the kind soul who has taken the oath of taking care of the elderly server members. Bless their heart\n"
output += "!saints - Display the name(s) of the class suck-up(s)\n"
output += "!tagSomeone - Randomly tag someone\n"
output += "!dance - Randomly tag server member with a dance GIF\n"
output += "!coinFlip - Flip a Coin\n"
output += "!golfClap - Clap, Clap, Clap\n"
output += "!pinterestRoyalty - Display the name(s) of those who pinterest like royalty\n"
output += "!doughnuts - Display the name(s) of those who distribute the doughnuts\n"
output += "!nerves\n"
output += "```"
await ctx.send(output)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def coinFlip(ctx):
coinFace = "heads" if random.randint(0, 1) == 0 else "tails"
await ctx.send(coinFace)
@bot.command()
async def nerves(ctx):
# Embed Object
em = discord.Embed(description="", colour=0xDEADBF)
em.set_author(name="<NAME>", icon_url="")
em.set_image(url="https://media.giphy.com/media/LRVnPYqM8DLag/giphy.gif")
await ctx.send(embed=em)
@bot.command()
async def golfClap(ctx):
# A list of dictionaries containing details about GIF's to use
claps = [
{"name": "",
"avatar": "",
"url": "https://media.giphy.com/media/XMc1Ui9rAFR1m/giphy.gif",
"description": "It\'s not unusual, {}. It\'s just not."},
{"name": "<NAME>",
"avatar": "",
"url": "https://media.giphy.com/media/gRxjhVNfFgqI0/giphy.gif",
"description": "{}, well you asked for it."},
{"name": "<NAME>",
"avatar": "",
"url": "https://media.giphy.com/media/WtBDAH97eXAmQ/giphy.gif",
"description": "I told you {}, u can\'t touch this"}
]
# Get a random number that represents an available gif
gifToUse = random.randint(0, (len(claps) - 1))
# Embed Object
em = discord.Embed(description="", colour=0xDEADBF)
em.set_author(name=claps[gifToUse]['name'], icon_url=claps[gifToUse]['avatar'])
em.set_image(url=claps[gifToUse]['url'])
# Send the message with the EMBED
await ctx.send(embed=em)
@bot.command()
async def tagSomeone(ctx):
taggedUser = random.randint(0, (len(ctx.guild.members) - 1))
i = 0
for member in ctx.guild.members:
if (i == taggedUser):
# await client.send_message(message.channel, "{} is it.".format(member.mention))
await ctx.send("{} is it.".format(member.mention))
break
i += 1
@bot.command()
async def fiveDolla(ctx, *, reason):
await ctx.send(reason)
to_slap = random.randint(0, (len(ctx.guild.members) - 1))
i = 0
for member in ctx.guild.members:
if (i == to_slap):
await ctx.send('{0.author.mention} is collecting 5 dolla from {1} for Chen because *{2}*'.format(ctx, member.mention, reason))
break
i += 1
@bot.command()
async def dance(ctx):
# A list of dictionaries containing details about GIF's to use
dances = [
{"name": "Carlton",
"avatar": "http://images.bwog.com/wp-content/uploads/2016/03/enhanced-buzz-28067-1364231406-0.jpg",
"url": "https://media.giphy.com/media/cyyac9sTiN7ji/giphy.gif",
"description": "It\'s not unusual, {}. It\'s just not."},
{"name": "Lords of Riverdance",
"avatar": "http://irishamerica.com/wp-content/uploads/2015/11/FT5S-Michael-Flatley-Dance-Irish-lord-front-smarm.jpg",
"url": "https://media.giphy.com/media/87SVefpPJAo6s/giphy.gif",
"description": "{}, well you asked for it."},
{"name": "McHammer",
"avatar": "http://www.notinhalloffame.com/media/k2/items/cache/a8a70130aed1b4387634a8604a34a91e_L.jpg",
"url": "https://media.giphy.com/media/kgKrO1A3JbWTK/giphy.gif",
"description": "I told you {}, u can\'t touch this"}
]
# Default the member to mention, the author who originated the message
memberToMention = ctx.author
# # Random number identifying a member to Troll
# theTrolled = random.randint(0, (len(message.server.members) - 1))
# Variable to hold an iterator
i = 0
# for member in message.server.members:
# # Augment
# i += 1
#
# # todo - needs to be agreed upon by other admins
# # if i == theTrolled:
# # print("name:" + member.name)
# # print("id:" + member.id)
# # print("nick:" + str(member.nick))
# Get a random number that represents an available gif
gifToUse = random.randint(0, (len(dances) - 1))
# Embed Object
em = discord.Embed(description=dances[gifToUse]['description'].format(memberToMention.mention), colour=0xDEADBF)
em.set_author(name=dances[gifToUse]['name'], icon_url=dances[gifToUse]['avatar'])
em.set_image(url=dances[gifToUse]['url'])
# Send the message with the EMBED
# await client.send_message(message.channel, embed=em)
await ctx.send(embed=em)
@bot.command()
async def shadowKeeper(ctx):
await ctx.send( "OoOoo Shadow Keeper. Fancy.")
for member in ctx.guild.members:
for role in member.roles:
# if role.id == 413888301812940802: # shadow keeper role
if role.name == "Keeper of the Shadow Realm": # shadow keeper role
await ctx.send(member.mention + " is a " + role.name)
@bot.command()
async def shadowLeader(ctx):
for member in ctx.guild.members:
for role in member.roles:
if role.name == "Supreme Leader of the Shadow Realm": # shadow keeper role
await ctx.send("All bow to the Supreme Leader of the Shadow Realm: {}".format(member.mention))
@bot.command()
async def hothothotties(ctx):
for member in ctx.guild.members:
for role in member.roles:
if role.name == "General Supremo de los Jalapeño Hotties":
await ctx.send("I present to you, el General Supremo de los Jalapeño Hotties: {}".format(member.mention))
@bot.command()
async def geriatricKeeper(ctx):
for member in ctx.guild.members:
for role in member.roles:
if role.name == "Keeper of the Geriatrics":
await ctx.send("My liege, the Keeper of the Geriatrics: {}".format(member.mention))
@bot.command()
async def saints(ctx):
for member in ctx.guild.members:
for role in member.roles:
if role.name == "Patron Saint of Sucking Up in Class":
await ctx.send("The Mother Teresa of kiss-up's: {}".format(member.mention))
@bot.command()
async def pinterestRoyalty(ctx):
for member in ctx.guild.members:
for role in member.roles:
if role.name == "Patron Saint of Pinterest":
await ctx.send("So you are a collect snippets and fonts? eh? Hail to pinterest royalty: {}".format(member.mention))
@bot.command()
async def doughnuts(ctx):
for member in ctx.guild.members:
for role in member.roles:
if role.name == "Doughnut Distributor":
await ctx.send("Mmmmmm. Doughnuts. Thanksk for sharing: {}".format(member.mention))
@has_permissions(manage_roles=True, ban_members=True)
async def newShadowKeeper():
await bot.wait_until_ready()
# Background function only for Guild "GSU - CS" (377968199645396993)
# List of phrases to "thank" a user for their service
departingKeeper = [
"{}, you did a okay job at keeping the shadow realm at bay. But, uh, next time...don't fall asleep on the job, then this won't have to be so awkward.",
"Yea, uh, you're fired. Better luck next time {}.",
"{} your services are no longer required. Please exit through the gift shop."
]
# List of phrases to "welcome" a user to the their new role
arrivingKeeper = [
"With great power comes great responsibility {}. Best of luck keeping the shadow realm...We are not behind you.",
"Wow, {}, I hope you can do a better job than the last person. Try to keep the shadow realm in check, for all our sake.",
"GLHF {}. No turning back now...the shadow realm needs you."
]
# Get a Channel Object
channel = discord.utils.get(bot.get_all_channels(), guild__name='GSU - CS', name='general')
while not bot.is_closed():
# Determine which departing phrase to use
departPhrasePosition = random.randint(0, (len(departingKeeper) - 1))
# Determine which arraying phrase to use
arrivingPhrasePosition = random.randint(0, (len(arrivingKeeper) - 1))
# Set a list of Member Objects
members = []
for member in bot.get_all_members():
# Check if the member is in guild 377968199645396993 (GS CSU)
# if member.guild.id == "377968199645396993": # Only consider members in guild 377968199645396993 (GS CSU)
if member.guild.name == "GSU - CS": # Only consider members in guild 377968199645396993 (GS CSU)
# Check this member's roles
for role in member.roles:
# Check if this member has the shadow keeper role
# if role.id == "413888301812940802": # shadow keeper role
if role.name == "Keeper of the Shadow Realm": # shadow keeper role
existingKeeper = member
shadowKeeperRole = role # todo - this should be grabbed from iterating over all server roles rather than searching for the role within a member
continue
# Ensure that this member is not the BOT
# if member.id != "413878946598486016":
if member.name != "CS_GSU_BOT":
# Boolean used to determine if the member should be appended to the list of members eligible to be shadow keepers
appendMember = True
# Iterate again over the roles for a member, checking their eligibility to be Shadow Keepers
for role in member.roles:
# appendMember was already set to false, so don't check for any other role attributes
if appendMember == False:
continue
# if role.id == "403725126530498571": # Admin's are ineligible
if role.name == "admins": # Admin's are ineligible
appendMember = False
continue
# if role.id == "424030254093303808": # The Supreme Leader of the Shadow Realm is ineligible
if role.name == "Supreme Leader of the Shadow Realm": # The Supreme Leader of the Shadow Realm is ineligible
appendMember = False
continue
# Append to the list
if appendMember:
members.append(member)
# Remove the Existing Keeper from the members list
members.remove(existingKeeper)
# Determine which member is going to become the new keeper
newKeeperPosition = random.randint(0, (len(members) - 1))
# Thank the existing keeper for their service
await existingKeeper.remove_roles(shadowKeeperRole)
# send a message to the channel
await channel.send(departingKeeper[departPhrasePosition].format(existingKeeper.mention))
# Set a new keeper
await members[newKeeperPosition].add_roles(shadowKeeperRole)
# Send a message to the channel
await channel.send(arrivingKeeper[arrivingPhrasePosition].format(members[newKeeperPosition].mention))
# Sleep for 1 day
await asyncio.sleep(86400) # task runs every 1 day
bot.loop.create_task(newShadowKeeper())
bot.run(os.getenv("DISCORD_TOKEN")) | StarcoderdataPython |
8111395 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
面包多Pay签名算法实现
Doc: https://doc.mbd.pub/api/qian-ming-suan-fa
"""
from hashlib import md5
def sign(attributes: dict, payjs_key: str) -> str:
attributes_new = {k: attributes[k] for k in sorted(attributes.keys())}
sign_str = "&".join(
[f"{key}={attributes_new[key]}" for key in attributes_new.keys()]
)
return md5((sign_str + "&key=" + payjs_key).encode(encoding="utf-8")).hexdigest()
if __name__ == "__main__":
# 用法示例
data = {"app_id": "194024590982810", "amount_total": 100,"channel": "h5","description": "312"}
# 面包多支付 app_key
key = "920ee68b4e16df01d0cd6b2ca161195d"
signed = sign(data, key)
print(signed)
# assert signed == "8544787ca7f93235a3e6c63b3c14eced"
print("ok")
| StarcoderdataPython |
3571315 | <gh_stars>1-10
class Emoji:
def __init__(self):
self.id = None
self.code = None
self.code_point = None
self.name = None
self.short_name = None
self.has_tone = False
self.tone = 0
self.emoji_order = None
self.keywords = set()
self.category = None
self.real_code = None
def all_keywords_to_lover(self):
new_keywords = set()
for keyword in self.keywords:
new_keywords.add(keyword.lower())
self.keywords = new_keywords
def remove_blank_keywords(self):
if "" in self.keywords:
self.keywords.remove("")
if " " in self.keywords:
self.keywords.remove(" ")
| StarcoderdataPython |
3275180 | from metaflow.api import step, FlowSpec
# Note that the explicit `FlowSpec` inheritance here is optional (it will be added by the `Flow` metaclass if omitted).
# Including it helps IntelliJ to analyze/syntax-highlight member accesses downstream.
#
# Some functionality, like referencing `self.input` in a join step, gets flagged by Pylint if the FlowSpec-inheritance
# isn't made explicit.
#
# TODO: get Pylint to accept self.input references in Flows w/o FlowSpec explicitly specified
class NewLinearFlow(FlowSpec):
@step
def one(self):
self.a = 111
@step
def two(self):
self.b = self.a * 2
@step
def three(self):
assert (self.a, self.b, self.foo, self.mth()) == (111, 222, '`foo`', '`mth`')
self.checked = True
@property
def foo(self):
return '`foo`'
def mth(self):
return '`mth`'
if __name__ == '__main__':
NewLinearFlow()
| StarcoderdataPython |
1972334 | <filename>dalib/adaptation/__init__.py
from . import proto
__all__ = ["proto"]
| StarcoderdataPython |
3211537 | <reponame>olgam4/design3
import cv2
from vision.domain.iCamera import ICamera
from vision.domain.iCameraFactory import ICameraFactory
from vision.infrastructure.fallbackCamera import FallbackCamera
from vision.infrastructure.openCvCamera import OpenCvCamera
class OpenCvCameraFactory(ICameraFactory):
def __init__(self, max_camera_count: int = 10) -> None:
self._max_camera_count = max_camera_count
self._cameras = []
self._find_all_camera()
def create_camera(self) -> ICamera:
if len(self._cameras) == 0:
return FallbackCamera()
index = self._cameras[0]
return OpenCvCamera(index)
def _find_all_camera(self) -> None:
index = 0
while index < self._max_camera_count:
cap = cv2.VideoCapture(index)
if cap.isOpened():
cap.release()
self._cameras.append(index)
index += 1
| StarcoderdataPython |
1671547 | import os
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musurgia.unittest import TestCase
from musurgia.fractaltree.fractalmusic import FractalMusic
from musurgia.fractaltree.fractalmusicsquare import Square
path = str(os.path.abspath(__file__).split('.')[0])
def set_tempo(square):
for module in square.get_all_modules():
module.tempo = 72
def add_info(module):
for index, leaf in enumerate(module.traverse_leaves()):
leaf.chord.add_lyric(leaf.fractal_order)
if index == 0:
leaf.chord.add_words(module.multi)
def generate_score(modules):
score = TreeScoreTimewise()
for index, module in enumerate(modules):
module.get_simple_format(
layer=module.number_of_layers
).to_stream_voice().add_to_score(score=score, part_number=index + 1)
score.finish()
partwise = score.to_partwise()
return partwise
def forward_multi(module, multi_addition):
m1, m2 = module.multi
module.multi = (m1, m2 + multi_addition)
class Test(TestCase):
def setUp(self) -> None:
self.square = Square(proportions=(1, 2, 3), tree_permutation_order=(3, 1, 2), duration=40)
set_tempo(self.square)
self.copied_square = self.square.__deepcopy__()
def test_1(self):
modules = [self.square.get_module(2, 2), self.copied_square.get_module(2, 2)]
for module in modules:
module.add_layer()
for module in modules:
add_info(module)
xml_path = path + '_test_1.xml'
generate_score(modules).write(xml_path)
self.assertCompareFiles(xml_path)
def test_2(self):
modules = [self.square.get_module(2, 2), self.copied_square.get_module(2, 2)]
multi_additions = [0, 1]
for module, multi_addition in zip(modules, multi_additions):
forward_multi(module, multi_addition)
for module in modules:
module.add_layer()
for module in modules:
add_info(module)
xml_path = path + '_test_2.xml'
generate_score(modules).write(xml_path)
self.assertCompareFiles(xml_path)
def test_3(self):
modules = [self.square.get_module(2, 2), self.copied_square.get_module(2, 2)]
for module in modules:
module.children_fractal_values
multi_additions = [0, 1]
for module, multi_addition in zip(modules, multi_additions):
forward_multi(module, multi_addition)
for module in modules:
module.add_layer()
for module in modules:
add_info(module)
xml_path = path + '_test_3.xml'
generate_score(modules).write(xml_path)
self.assertCompareFiles(xml_path)
def test_4(self):
fm_1 = FractalMusic(multi=(1, 2), tempo=60, quarter_duration=10)
fm_2 = FractalMusic(multi=(1, 1), tempo=60, quarter_duration=10)
fms = [fm_1, fm_2]
for fm in fms:
fm.midi_generator.midi_range = [60, 72]
fm_2.multi = fm_1.multi
self.assertEqual(fm_2.children_fractal_values, fm_1.children_fractal_values)
self.assertEqual(fm_2.children_fractal_orders, fm_1.children_fractal_orders)
self.assertEqual(fm_2.midi_generator.midi_range, fm_1.midi_generator.midi_range)
self.assertEqual(fm_2.children_generated_midis, fm_1.children_generated_midis)
| StarcoderdataPython |
4936076 | # -*- coding: utf-8 -*-
""" A visualization widget for Orange3.
This is a visualization widget for Orange3, that displays a joint distribution of two
variables from a dataset. The Widget is a two-dimensional kernel-density estimate graph
using Gaussian kernels. The Kernel density estimation (KDE) is a method to estimate the
probability density function (PDF) of a random variable in a non-parametric way.
This widget is useful in cases where there are similarities in data, that are difficult to
spot in other charts such as scatter plots. In addition, hidden clusters can be found, as
well as indicate whether the data form normal distributions.
The package used is called "SciPy". Source: "https://scipy.org/about.html".
To run the addon, just install it using 'pip install -e .' from its package folder.
Don't forget to first activate the orange environment.
__author__ = <NAME>
__date__ = April 2020
__version__ = 0.1.0
__type__ = Orange Addon
__platform__ = Windows (Orange enviroment)
__email__ = '<NAME>' <<EMAIL>>
__status__ = Dev
"""
import numpy as np
from AnyQt.QtCore import Qt
from AnyQt.QtWidgets import QListWidget
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.data import Table, ContinuousVariable
from Orange.widgets import widget, gui, settings
from Orange.widgets.widget import Input
import scipy.stats as st
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
""" gaussian_kde Parameters
class scipy.stats.gaussian_kde(dataset, bw_method=None)
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D array,
otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth.
This can be ‘scott’, ‘silverman’, a scalar constant or a callable.
If a scalar, this will be used directly as kde.factor.
If a callable, it should take a gaussian_kde instance as only parameter
and return a scalar. If None (default), ‘scott’ is used.
See Notes for more details.
"""
BW_METHOD = [
("Scott", "scott"),
("Silverman", "silverman"),
]
class KDE2D_w(widget.OWWidget):
name = 'KDE-2D'
description = "Visualization of two dimensional kernel-density estimate using Gaussian kernels" \
icon = 'icons/KDE2D.svg'
priority = 30
class Inputs:
data = Input("Data", Table)
attrs = settings.Setting([])
bw_methode = settings.Setting(0)
def __init__(self):
self.data = None
self.all_attrs = []
gui.listBox(self.controlArea, self, 'attrs',
labels='all_attrs',
box='Dataset attribute(s)',
selectionMode=QListWidget.ExtendedSelection,
callback=self.on_changed)
self.optionsBox = gui.widgetBox(self.controlArea, "KDE-2D Options")
gui.comboBox(
self.optionsBox,
self,
"bw_methode",
orientation=Qt.Horizontal,
label="Bandwidth Method: ",
items=[d[0] for d in BW_METHOD],
callback=self._bw_methode
)
self.optionsBox.setDisabled(True)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.mainArea.layout().addWidget(self.canvas)
@Inputs.data
def set_data(self, data):
self.data = data = None if data is None else Table(data)
self.all_attrs = []
if data is None:
# discards the old graph
self.figure.clear()
self.optionsBox.setDisabled(True)
return
self.all_attrs = [(var.name, gui.attributeIconDict[var])
for var in data.domain.variables
if (var is not data and
isinstance(var, ContinuousVariable))]
self.attrs = [0]
self.optionsBox.setDisabled(False)
self.on_changed()
def _bw_methode(self):
if self.data is None:
return
self.on_changed()
def on_changed(self):
if not self.attrs or not self.all_attrs:
return
if self.data is None:
return
if len(self.attrs) != 2:
return
# discards the old graph
self.figure.clear()
# Get names of attrs
attr_name = []
for attr in self.attrs:
attr_name.append(self.all_attrs[attr][0])
# Get data
x = np.ravel(self.data.X[:,[self.attrs[0]]])
y = np.ravel(self.data.X[:,[self.attrs[1]]])
# Calc boundaries
dX = (max(x) - min(x))/3
xmin = min(x) - dX
xmax = max(x) + dX
dY = (max(y) - min(y))/3
ymin = min(y) - dY
ymax = max(y) + dY
# Create meshgrid
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
# calc KDE
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values, bw_method=BW_METHOD[self.bw_methode][1])
# Calc Z
Z = np.reshape(kernel(positions).T, X.shape)
# create current axes
ax = self.figure.gca()
# create backaground
ax.imshow(np.rot90(Z), cmap='coolwarm', aspect='auto', extent=[xmin, xmax, ymin, ymax])
# create contour
cset = ax.contour(X, Y, Z, colors='k')
# create labels
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel(attr_name[0])
ax.set_ylabel(attr_name[1])
# set limits
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# set Title
ax.set_title('Two Dimensional Gaussian Kernel Density Estimation')
# refresh canvas
self.canvas.draw()
if __name__ == "__main__":
WidgetPreview(KDE2D_w).run(Table("iris"))
| StarcoderdataPython |
5038498 | <gh_stars>0
#!/usr/bin/env python3
"""Build transition matrix estimators"""
import csv
import os
import random
import sys
from typing import Callable, List, Tuple
import lightgbm as lgb
import numpy as np
from scipy.special import softmax
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
class Backward:
def __init__(self, model):
self._model = model
def train(self, X: np.ndarray, y: np.ndarray, _: np.ndarray) -> None:
self._model.fit(X, y)
def __call__(self,
X: np.ndarray,
T: np.ndarray,
denoise: bool = False) -> np.ndarray:
ret = self._model.predict_proba(X)
if denoise:
ret = softmax(np.linalg.inv(T) @ ret.T, axis=0).T
return ret
Model = Callable[[int, int], nn.Module]
class Forward:
def __init__(self, build: Model):
self._build = build
def train(self, X: np.ndarray, y: np.ndarray, T: np.ndarray) -> None:
T = torch.from_numpy(T.astype(np.float32))
sm = nn.Softmax(dim=1)
self._model = train(self._build, X, y, lambda x: sm(T @ sm(x).T).T)
def __call__(self,
X: np.ndarray,
T: np.ndarray,
denoise: bool = False) -> np.ndarray:
with torch.no_grad():
ret = softmax(self._model(torch.from_numpy(X.astype(
np.float32))).numpy(),
axis=1)
if not denoise:
ret = softmax(T @ ret.T, axis=0).T
return ret
def train(build: Model, X: np.ndarray, y: np.ndarray,
transform: Callable[[torch.Tensor], torch.Tensor]) -> nn.Module:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = build(X.shape[1], max(y) + 1)
if torch.cuda.device_count() > 1:
model = nn.DistributedDataParallel(model)
model.to(device)
X = torch.from_numpy(X.astype(np.float32)).to(device)
y = torch.from_numpy(y.astype(np.int64)).to(device)
optimizer = torch.optim.SGD(model.parameters(),
lr=1e-1,
weight_decay=1e-5,
momentum=0.9)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
X, y),
batch_size=256,
shuffle=True)
criterion = nn.CrossEntropyLoss()
for epoch in range(10):
for X, y in train_loader:
optimizer.zero_grad()
pred = transform(model(X))
criterion(pred, y).backward()
optimizer.step()
model.eval()
return model
class NeuralNet:
def __init__(self, build: Model):
self._build = build
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
self._model = train(self._build, X, y, lambda x: x)
def predict_proba(self, X: np.ndarray) -> np.ndarray:
with torch.no_grad():
return softmax(self._model(torch.from_numpy(X.astype(
np.float32))).numpy(),
axis=1)
def evaluate(dataset: str, T: List[List[float]], model) -> Tuple[float, float]:
with np.load(f'data/{dataset}.npz') as data:
Xtr = data['Xtr'].reshape((len(data['Xtr']), -1))
Xts = data['Xts'].reshape((len(data['Xts']), -1))
Xtr, Xtr_val, Str, Str_val = train_test_split(Xtr,
data['Str'],
test_size=0.2)
Yts = data['Yts']
T = np.array(T)
model.train(Xtr, Str, T)
acc_val = top1_accuracy(model(Xtr_val, T), Str_val)
acc = top1_accuracy(model(Xts, T, True), Yts)
return acc_val, acc
def linear(in_dim: int, out_dim: int) -> nn.Module:
return nn.Linear(in_dim, out_dim)
def three_layer(in_dim: int, out_dim: int) -> nn.Module:
return nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU(),
nn.Linear(out_dim, out_dim), nn.ReLU(),
nn.Linear(out_dim, out_dim))
def top1_accuracy(pred: np.ndarray, y: np.ndarray) -> float:
return sum(pred.argmax(axis=1) == y) / len(y)
def reset_seed(seed: int = 0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# If multi-GPUs are used.
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def main() -> None:
"""Run all training and evaluation"""
w = csv.DictWriter(
sys.stdout,
['dataset', 'model', 'acc_val', 'acc_val_std', 'acc', 'acc_std'])
w.writeheader()
for dataset, T in DATA.items():
for name, model in MODEL.items():
reset_seed()
acc_val, acc = [], []
for i in range(10):
v, a = evaluate(dataset, T, model)
acc_val.append(v)
acc.append(a)
w.writerow({
'dataset': dataset,
'model': name,
'acc_val': np.mean(acc_val),
'acc_val_std': np.std(acc_val),
'acc': np.mean(acc),
'acc_std': np.std(acc)
})
DATA = {
'FashionMNIST0.5': [[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]],
'FashionMNIST0.6': [[0.4, 0.3, 0.3], [0.3, 0.4, 0.3], [0.3, 0.3, 0.4]],
}
MODEL = {
'forward_linear': Forward(linear),
'backward_linear': Backward(NeuralNet(linear)),
'forward_three_layer': Forward(three_layer),
'backward_three_layer': Backward(NeuralNet(three_layer)),
'LGB': Backward(lgb.LGBMClassifier()),
'logistic': Backward(LogisticRegression()),
}
if __name__ == '__main__':
main()
| StarcoderdataPython |
3446886 | <filename>Protheus_WebApp/Modules/SIGAGTP/GTPA115BTestCase.py
from tir import Webapp
from datetime import datetime
DataSystem = datetime.today().strftime('%d/%m/%Y')
import unittest
import time
class GTPA115B(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
#Parametros de inicializaçao
inst.oHelper.Setup("SIGAGTP", DataSystem,"T1","D MG 01 ","88")
#Nome da rotina do Caso de Teste
inst.oHelper.Program("GTPA421")
def test_GTPA115B_CT001(self):
self.oHelper.SearchBrowse("D MG AG950020200327", "Filial+agência + Número Ficha")
self.oHelper.SetButton("Outras Ações", "Conferência de Bilhetes")
time.sleep(6)
self.oHelper.SetValue("Origem", "1")
self.oHelper.SetValue("Tipo", "E")
self.oHelper.SetValue("Status", "V")
self.oHelper.SetButton("Pesquisar")
self.oHelper.SetButton("Outras Ações", "Conferir Todos")
time.sleep(5)
self.oHelper.SetButton("Todos")
time.sleep(3)
self.oHelper.SetButton("Outras Ações", "Conferir Todos")
time.sleep(3)
self.oHelper.SetButton("Outras Ações", "Altera Contr. Docto.")
self.oHelper.SetValue("Tipo de Documento ?", "TP9500")
self.oHelper.SetValue("Série ?", "CDD")
self.oHelper.SetValue("Sub Série ?", "500")
self.oHelper.SetValue("Complemento ?", "9")
self.oHelper.SetValue("Número Documento ?", "000011")
self.oHelper.SetButton("OK")
time.sleep(2)
self.oHelper.SetKey("ENTER", grid=False)
self.oHelper.SetKey("ENTER", grid=True)
self.oHelper.ClickGridCell("CCF", 1)
self.oHelper.SetKey("ENTER", grid=True)
self.oHelper.SetButton("Confirmar")
self.oHelper.SetKey("ENTER")
self.oHelper.SetButton("Fechar")
time.sleep(5)
self.oHelper.AssertTrue()
#self.oHelper.RestoreParameters()
#
def test_GTPA115B_CT002(self):
print("test_GTPA115B_CT002")
self.oHelper.SearchBrowse("D MG AG950020200327", "Filial+agência + Número Ficha")
self.oHelper.SetButton("Outras Ações", "Conferência de Bilhetes")
time.sleep(6)
self.oHelper.SetButton("Todos")
time.sleep(3)
self.oHelper.SetButton("Outras Ações", "Conferir Todos")
time.sleep(2)
self.oHelper.SetKey("ENTER", grid=True)
self.oHelper.ClickGridCell("CCF", 1)
self.oHelper.SetKey("ENTER", grid=True)
self.oHelper.SetButton("Confirmar")
self.oHelper.SetKey("ENTER")
self.oHelper.SetButton("Fechar")
time.sleep(5)
self.oHelper.AssertTrue()
#self.oHelper.RestoreParameters()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11281366 | """
Azure Training admin
"""
from django.contrib import admin
from .models import Project, Task, Train
# Register your models here.
admin.site.register(Project)
admin.site.register(Task)
admin.site.register(Train)
| StarcoderdataPython |
11361510 | <gh_stars>1-10
from __future__ import print_function
import six
import sys
import os
import traceback
from Technical.SaveLog import SaveLog
if six.PY3:
unicode = str
sys.stdout = SaveLog()
print(os.path.basename(__file__))
__author__ = 'BS-DEV-QT-VFOURRIER'
__documentation__ = ''
| StarcoderdataPython |
6631631 | import ssa
import numpy as np
import matplotlib.pyplot as plt
def run():
reactants = np.array([[2, 0], [0, 1]])
products = np.array([[0, 1], [2, 0]])
volume = 1e-15
use_na = True
k_det = np.array([5e5, 0.2])
k1 = ssa.util.k_det_to_k_stoch(
k_det, reactants=reactants, volume=volume, use_na=use_na
)
k2 = np.array([1.66e-3, 0.2])
print(k1, k2)
x0_molar_concentration = np.array([5e-7, 0])
x01 = ssa.util.molar_concentration_to_molecule_number(
x0_molar_concentration, volume=volume, use_na=use_na
)
x02 = np.array([301.1, 0])
print(x01, x02)
t_max = 10.0
model = ssa.Model(
reactants=reactants, products=products, k=k2, x0=x02, t_max=t_max, n_procs=2
)
result = model.simulate(n_reps=5)
ssa.plot(result, show=False)
plt.savefig("dimerization_kinetics.png")
run()
| StarcoderdataPython |
5013813 | # Moduł definiujący endpointy API
from flask_restful import Resource
from flask_jwt_extended import create_access_token
from flask import jsonify, request
import json as jsonlib
from marshmallow import ValidationError
import datetime
from .validators import VUser, VUserPatch, VEmail, VJson, VUserLogin
from app import db_connector
class Login(Resource):
# /user/
def post(self):
try:
cred = VUserLogin().load(request.get_json())
user = db_connector.get_from_email(cred['email'])
except ValidationError as error:
return error.messages, 422
if not user:
return db_connector.gen_response('bad_email') # email invalid
if not user.check_password(cred['password']):
return db_connector.gen_response('bad_password') # password invalid
token = create_access_token(identity=user.id, expires_delta=datetime.timedelta(days=30))
return {'token': token}, 200
class Register(Resource):
def post(self):
# Dodaje użytkownika - rejestracja
try:
user = VUser().load(request.get_json())
return db_connector.add_user(user['nick'], user['email'], user['password'])
except ValidationError as error:
return error.messages, 422
| StarcoderdataPython |
12858500 | <reponame>Odder/PyraminXolver
from collections import deque
import pickle
from . import Pyraminx, PYRAMINX_CASE_PATH
from multiprocessing import Pool, cpu_count
def setup():
graph = create_graph()
with open(PYRAMINX_CASE_PATH, 'wb') as f:
pickle.dump(graph, f, pickle.HIGHEST_PROTOCOL)
def create_graph():
with Pool(cpu_count()) as p:
graph = p.map(explore_node, [x for x in range(933120)])
graph = generate_depths(graph)
return graph
def explore_node(node):
state = Pyraminx.id_to_state(node)
node_values = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
for i in range(1, 9):
transformation = Pyraminx.move_transformations[i - 1]
new_state = Pyraminx.apply_move(state, transformation)
new_id = Pyraminx.state_to_id(new_state)
node_values[i] = new_id
return node_values
def generate_depths(graph):
queue = deque()
graph[0][0] = 0
queue.append(0)
while queue:
i = queue.popleft()
depth = graph[i][0]
for edge in graph[i][1:]:
if graph[edge][0] == -1:
graph[edge][0] = depth + 1
queue.append(edge)
return graph
if __name__ == '__main__':
setup()
| StarcoderdataPython |
4901677 | # Generated by Django 3.0.6 on 2020-06-09 10:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0007_auto_20200609_1550'),
]
operations = [
migrations.AlterField(
model_name='routine',
name='eigth_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='fifth_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='first_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='fourth_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='ninth_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='second_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='seventh_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='sixth_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
migrations.AlterField(
model_name='routine',
name='third_period',
field=models.CharField(blank=True, max_length=30, null=' '),
),
]
| StarcoderdataPython |
1736931 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("pollingstations", "0003_residentialaddress")]
operations = [
migrations.AlterField(
model_name="residentialaddress",
name="postcode",
field=models.CharField(
null=True, db_index=True, blank=True, max_length=100
),
)
]
| StarcoderdataPython |
5093245 | #!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_sxf.py 26513 2013-10-02 11:59:50Z bishop $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR SXF driver functionality.
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2013, NextGIS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import gdaltest
from osgeo import ogr
import pytest
###############################################################################
# Open SXF datasource.
def test_ogr_sxf_1():
gdaltest.sxf_ds = None
with gdaltest.error_handler():
# Expect Warning 0 and Warning 6.
gdaltest.sxf_ds = ogr.Open('data/100_test.sxf')
if gdaltest.sxf_ds is not None:
return
pytest.fail()
###############################################################################
# Run test_ogrsf
def test_ogr_sxf_2():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' data/100_test.sxf')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
###############################################################################
#
def test_ogr_sxf_cleanup():
if gdaltest.sxf_ds is None:
pytest.skip()
gdaltest.sxf_ds = None
| StarcoderdataPython |
1990309 | <reponame>SymmetricChaos/FiniteFields<gh_stars>1-10
# Find the shortest chain of additions, starting from 1, which give n.
# This is a simple branch and bound style algorithm which starts by trying
# the greedy method and prunes branches that are worse than what it has found
# before.
def addition_chain_recur(N,L=[1],best=float('inf')):
"""Recursively search for shortest addition chain"""
# If we found a solution return it
if L[-1] == N:
return L
# If we're not better than the best known terminate by returning None
if len(L) >= best:
return None
# Otherwise search
out = []
for i in reversed(L):
d = (L[-1]+i)
# Ignore branches that would give too large of a number
if N - d >= 0:
v = addition_chain_recur(N,L+[d],best)
# If we get a None then the branch failed and we add nothing to the
# list of options
if v == None:
continue
# Otherwise we have a new best solution
if len(v) < best:
best = len(v)
out = v
# If all branches returned none then this whole section is ignored
if out == []:
return None
return out
# Simpler wrapper function
def addition_chain(n):
"""Find the shortest chain of additions to reach n"""
return addition_chain_recur(n) | StarcoderdataPython |
11207918 | <reponame>felipesch92/cursoFiap
lista_estatica = ['teste', 30]
lista_dinamica = [input('Nome do usuário'), bool(input('Está logado? '))]
print(lista_dinamica)
| StarcoderdataPython |
193089 | <filename>nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..segmentation import SimilarityIndex
def test_SimilarityIndex_inputs():
input_map = dict(
ANNContinuousVolume=dict(
argstr='--ANNContinuousVolume %s',
extensions=None,
),
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
inputManualVolume=dict(
argstr='--inputManualVolume %s',
extensions=None,
),
outputCSVFilename=dict(
argstr='--outputCSVFilename %s',
extensions=None,
),
thresholdInterval=dict(argstr='--thresholdInterval %f', ),
)
inputs = SimilarityIndex.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SimilarityIndex_outputs():
output_map = dict()
outputs = SimilarityIndex.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| StarcoderdataPython |
9675539 | import os
import sys
py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
os.add_dll_directory(py_dll_path)
import numpy as np
import cv2
from PIL import Image, ImageDraw
from matplotlib import cm
from scipy import ndimage
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class KjnFastRCNN(object):
def __init__(self, model_path='object_detection_and_tracing/models/model_OD.pth'):
super().__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model_path = model_path
self.model = self.get_model_object_detection(num_classes = 2)
if self.model_path is not None:
self.model.load_state_dict(torch.load(self.model_path))
self.model.to(self.device)
self.model.eval()
def get_model_object_detection(self, num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def predict(self, img, iou_threshold=0.3):
img = Image.fromarray(np.uint8(img)).convert('RGB')
img = torchvision.transforms.functional.to_tensor(img)
img = img.to(self.device)
with torch.no_grad():
prediction = self.model([img])
nms_results = torchvision.ops.nms(boxes = prediction[0]['boxes'], scores = prediction[0]['scores'], iou_threshold= iou_threshold)
nms_results = nms_results.tolist()
boxes = prediction[0]['boxes'].cpu().detach().numpy()
labels = prediction[0]['labels'].cpu().detach().numpy()
scores = prediction[0]['scores'].cpu().detach().numpy()
list_of_bbox_with_labels = []
i = 0
for idx, label in enumerate(labels):
if idx not in nms_results:
continue
box = boxes[i]
box = box.astype(int)
bbox_dict = {
'top_left': (box[0], box[1]),
'top_right': (box[2], box[1]),
'botom_right': (box[2], box[3]),
'botom_left': (box[0], box[3]),
'label': label,
'probability': scores[i]
}
list_of_bbox_with_labels.append(bbox_dict)
i += 1
return list_of_bbox_with_labels
def detect(self, img, iou_threshold=0.7):
img = Image.fromarray(np.uint8(img)).convert('RGB')
img = torchvision.transforms.functional.to_tensor(img)
img = img.to(self.device)
with torch.no_grad():
prediction = self.model([img])
nms_results = torchvision.ops.nms(boxes = prediction[0]['boxes'], scores = prediction[0]['scores'], iou_threshold= iou_threshold)
nms_results = nms_results.tolist()
boxes = prediction[0]['boxes'].cpu().detach().numpy()
labels = prediction[0]['labels'].cpu().detach().numpy()
scores = prediction[0]['scores'].cpu().detach().numpy()
i = 0
bbox_xcycwh, cls_conf, cls_ids = [], [], []
for idx, label in enumerate(labels):
if idx not in nms_results:
continue
box = boxes[i]
score = scores[i]
box = box.astype(int)
x0, y0, x1, y1 = box
bbox_xcycwh.append([(x1 + x0) / 2, (y1 + y0) / 2, (x1 - x0), (y1 - y0)])
cls_conf.append(score)
cls_ids.append(label)
i += 1
return np.array(bbox_xcycwh, dtype=np.float64), np.array(cls_conf), np.array(cls_ids)
def predict_with_draw(self, img, iou_threshold=0.5):
img = Image.fromarray(np.uint8(img)).convert('RGB')
img = torchvision.transforms.functional.to_tensor(img)
img = img.to(self.device)
with torch.no_grad():
prediction = self.model([img])
nms_results = torchvision.ops.nms(boxes = prediction[0]['boxes'], scores = prediction[0]['scores'], iou_threshold= iou_threshold)
nms_results = nms_results.tolist()
img = img.cpu().detach()
img = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())
boxes = prediction[0]['boxes'].cpu().detach().numpy()
labels = prediction[0]['labels'].cpu().detach().numpy()
scores = prediction[0]['scores'].cpu().detach().numpy()
list_of_bbox_with_labels = []
i = 0
for idx, label in enumerate(labels):
if idx not in nms_results:
continue
box = boxes[i]
box = box.astype(int)
bbox_dict = {
'top_left': (box[0], box[1]),
'top_right': (box[2], box[1]),
'botom_right': (box[2], box[3]),
'botom_left': (box[0], box[3]),
'label': label,
'probability': scores[i]
}
draw = ImageDraw.Draw(img)
draw.rectangle(box.tolist(), outline ="green", width=10)
list_of_bbox_with_labels.append(bbox_dict)
i += 1
img = np.asarray(img)
return list_of_bbox_with_labels, img
if __name__ == "__main__":
def load_images(path):
images = []
valid_images = ['.png', '.jpg', '.jpeg']
for f in os.listdir(path):
ext = os.path.splitext(f)[1]
if ext.lower() not in valid_images:
continue
images.append(os.path.join(path, f))
return images
import pprint
kjn = KjnFastRCNN()
for idx, image_path in enumerate(sorted(load_images('E:/kjn_biedronka/biedronka_img_dataset3/'))):
print("image_path: ", image_path)
image = cv2.imread(image_path)
bbox_xcycwh, cls_conf, cls_ids = kjn.detect(image)
print(bbox_xcycwh)
| StarcoderdataPython |
12841735 | <gh_stars>1-10
import uuid
def set_created_by_if_empty(model, user):
"""
This function is called by our save function because django
throws exceptions on object access if something doesn't exist.
You cannot dereference a related field if it doesn't exist.
Meaning you have to do a try except block.
"""
try:
# the following line throws an exception
model.created_by is not None
except:
model.created_by = user
def get_id_or_none(model):
"""
Django explodes if you dereference pk before saving to the db
"""
try:
return model.id
except:
return None
def get_instance_or_none(Model, prop, value):
try:
return Model.objects.get(**{prop:value})
except:
return None
def set_guid_if_empty(model):
if not model.guid:
model.guid = get_guid()
def get_guid():
return uuid.uuid4().hex
| StarcoderdataPython |
366573 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import kambi.query
import kambi.stores
import multiprocessing
class Metric(object):
def __init__(self, metric_id, query, sumologic_client, statuspageio_client,
interval=30, span=900000, prune=True, backfill=False,
backfill_span=86400000):
self.metric_id = metric_id
self.interval = interval
self.conn = statuspageio_client
self.span = span
self.prune = prune
query['conn'] = sumologic_client
query['span'] = span
self.query = kambi.query.Query(**query)
self.datastore = kambi.stores.MetricStore(self)
if backfill:
process = multiprocessing.Process(target=self.backfill,
args=(backfill_span,))
process.start()
def populate(self):
while True:
started = int(time.time() * 1000)
self.datastore.update(self.query.fetch())
finished = int(time.time() * 1000)
time_left = (self.interval * 1000 - (finished - started))
if time_left > 0:
time.sleep(time_left / 1000)
def backfill(self, backfill_span):
print('Starting backfill')
start = int(time.time() * 1000)
prune_lock_value = self.datastore.prune_lock
self.datastore.prune_lock = True
while start > start - backfill_span:
from_ = start - backfill_span
to = start
print('Backfilling from {from_} to {to}'.format(from_=from_,
to=to))
self.datastore.update(self.query.fetch(from_, to))
start -= self.span
self.datastore.prune_lock = prune_lock_value
print('Finished backfill')
def update(self, timestamp, value):
self.conn.update_metric(self.metric_id, timestamp, value)
| StarcoderdataPython |
11292525 | <filename>emnist/__init__.py
"""
# EMNIST
This package is a convenience wrapper around the EMNIST data set. Documentation on this data set, as well as manually
downloadable files, can be found [here](https://www.nist.gov/itl/iad/image-group/emnist-dataset).
"""
import gzip
import html
import logging
import os
import re
import zipfile
import numpy
import requests
import tqdm
LOGGER = logging.getLogger(__name__)
# These are ordered from most preferred to least preferred. The file is hosted on google drive to be polite to the
# authors and reduce impact to the original download server.
SOURCE_URLS = [
'https://drive.google.com/uc?id=1R0blrtCsGEVLjVL3eijHMxrwahRUDK26',
'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
]
CACHE_FILE_PATH = '~/.cache/emnist/emnist.zip'
PARTIAL_EXT = '_partial'
ZIP_PATH_TEMPLATE = 'gzip/emnist-{dataset}-{usage}-{matrix}-idx{dim}-ubyte.gz'
DATASET_ZIP_PATH_REGEX = re.compile(r'gzip/emnist-(.*)-test-images-idx3-ubyte\.gz')
GOOGLE_DRIVE_CONFIRMATION_LINK_REGEX = re.compile(rb'href="(/uc\?export=download.*?confirm=.*?)">Download anyway</a>')
IDX_DATA_TYPE_CODES = {
0x08: numpy.ubyte,
0x09: numpy.byte,
0x0B: numpy.int16,
0x0C: numpy.int32,
0x0D: numpy.float32,
0x0E: numpy.float64,
}
def download_file(url, save_path, session=None):
"""Download a file from the requested URL to the indicated local save path. Download is done similarly to Chrome's,
keeping the actual data in a separate file with 'partial' appended to the end of the name until the download
completes, to ensure that an incomplete or interrupted download can always be detected."""
if os.path.isfile(save_path):
raise FileExistsError(save_path)
LOGGER.info("Downloading %s to %s.", url, save_path)
temp_path = save_path + PARTIAL_EXT
try:
with open(save_path, 'wb'), open(temp_path, 'wb') as temp_file:
with (session or requests).get(url, stream=True) as response:
response.raise_for_status()
total_size = int(response.headers.get('content-length', 0))
chunk_size = 8192
total_chunks = total_size // chunk_size + bool(total_size % chunk_size)
with tqdm.tqdm(total=total_chunks, unit='B', unit_scale=True, unit_divisor=1024,
desc="Downloading %s" % os.path.basename(save_path)) as progress:
for chunk in response.iter_content(chunk_size=chunk_size):
temp_file.write(chunk)
progress.update(chunk_size)
except Exception:
try:
if os.path.isfile(temp_path):
LOGGER.info("Removing temp file at %s due to exception during download.", temp_path)
os.remove(temp_path)
finally:
if os.path.isfile(save_path):
LOGGER.info("Removing placeholder file at %s due to exception during download.", save_path)
os.remove(save_path)
raise
os.remove(save_path)
os.rename(temp_path, save_path)
LOGGER.info("Successfully downloaded %s to %s.", url, save_path)
def download_large_google_drive_file(url, save_path):
"""Google Drive has to make things complicated. There appears to be no way to circumvent their warning that the
file is too large to be virus-scanned, hence this otherwise unnecessary complexity. A different choice of file
hosting is advisable in the future."""
session = requests.session()
with session.get(url) as response:
response.raise_for_status()
content = response.content
match = re.search(GOOGLE_DRIVE_CONFIRMATION_LINK_REGEX, content)
if not match:
raise RuntimeError("Google appears to have changed their large file download process unexpectedly. "
"Please download the file manually from %s and save it to ~/.cache/emnist/emnist.zip "
"as a manual work-around." % url)
confirmed_link = url.split("/uc?")[0] + html.unescape(match.group(1).decode())
download_file(confirmed_link, save_path, session)
def get_cached_data_path():
"""Return the path where the EMNIST data is (or will be) cached."""
return os.path.expanduser(CACHE_FILE_PATH)
def clear_cached_data():
"""Delete the cached EMNIST data, including the temporary file that can be created by an interrupted download."""
cache_path = get_cached_data_path()
temp_path = cache_path + PARTIAL_EXT
for path in (cache_path, temp_path):
if os.path.isfile(path):
LOGGER.info("Removing cache file %s.", path)
os.remove(path)
LOGGER.info("Cache is clear.")
def ensure_cached_data():
"""Check that the EMNIST data is available in the local cache, and download it if not."""
cache_path = get_cached_data_path()
save_folder = os.path.dirname(cache_path)
if not os.path.isdir(save_folder):
LOGGER.info("Creating folder %s", save_folder)
os.makedirs(save_folder)
if os.path.isfile(cache_path):
LOGGER.info("Cached file found at %s.", cache_path)
if os.path.getsize(cache_path) > 0:
return cache_path
else:
LOGGER.info("Cached file %s is zero bytes and cannot be used.", cache_path)
os.remove(cache_path)
first_error = None
for source_url in SOURCE_URLS:
try:
if 'drive.google.com' in source_url:
download_large_google_drive_file(source_url, cache_path)
else:
download_file(source_url, cache_path)
break
except Exception as e:
if first_error is None:
first_error = e
else:
assert first_error, "No source URLs listed in SOURCE_URLS!"
raise first_error
return cache_path
def parse_idx(data):
"""Parse binary data in IDX format, returning it as a numpy array of the correct shape."""
data = bytes(data)
# See http://yann.lecun.com/exdb/mnist/ for an explanation of the IDX file format.
if data[0] != 0 or data[1] != 0:
raise ValueError("Data is not in IDX format.")
data_type_code = data[2]
data_type = IDX_DATA_TYPE_CODES.get(data_type_code)
if data_type is None:
raise ValueError("Unrecognized data type code %s. Is the data in IDX format?" % hex(data_type_code))
dims = data[3]
if not dims:
raise ValueError("Header indicates zero-dimensional data. Is the data in IDX format?")
shape = []
for dim in range(dims):
offset = 4 * (dim + 1)
dim_size = int(numpy.frombuffer(data[offset:offset + 4], dtype='>u4'))
shape.append(dim_size)
shape = tuple(shape)
offset = 4 * (dims + 1)
data = numpy.frombuffer(data[offset:], dtype=numpy.dtype(data_type).newbyteorder('>'))
return data.reshape(shape)
def extract_data(dataset, usage, component):
"""Extract an image or label array. The dataset must be one of those listed by list_datasets(), e.g. 'digits' or
'mnist'. Usage should be either 'train' or 'test'. Component should be either 'images' or 'labels'."""
if usage not in ('train', 'test'):
raise ValueError("Unrecognized value %r for usage. Expected 'train' or 'test'." % usage)
if component == 'images':
dim = 3
elif component == 'labels':
dim = 1
else:
raise ValueError("Unrecognized value %r for component. Expected 'images' or 'labels'." % component)
ensure_cached_data()
cache_path = get_cached_data_path()
zip_internal_path = ZIP_PATH_TEMPLATE.format(dataset=dataset, usage=usage, matrix=component, dim=dim)
with zipfile.ZipFile(cache_path) as zf:
compressed_data = zf.read(zip_internal_path)
data = gzip.decompress(compressed_data)
array = parse_idx(data)
if dim == 3:
# Why is this necessary? Was there a formatting error when the data was packaged and released by NIST?
return array.swapaxes(1, 2)
else:
return array
def extract_samples(dataset, usage):
"""Extract the samples for a given dataset and usage as a pair of numpy arrays, (images, labels). The dataset must
be one of those listed by list_datasets(), e.g. 'digits' or 'mnist'. Usage should be either 'train' or 'test'."""
images = extract_data(dataset, usage, 'images')
labels = extract_data(dataset, usage, 'labels')
if len(images) != len(labels):
raise RuntimeError("Extracted image and label arrays do not match in size. ")
return images, labels
def extract_training_samples(dataset):
"""Extract the training samples for a given dataset as a pair of numpy arrays, (images, labels). The dataset must be
one of those listed by list_datasets(), e.g. 'digits' or 'mnist'."""
return extract_samples(dataset, 'train')
def extract_test_samples(dataset):
"""Extract the test samples for a given dataset as a pair of numpy arrays, (images, labels). The dataset must be one
of those listed by list_datasets(), e.g. 'digits' or 'mnist'."""
return extract_samples(dataset, 'test')
def list_datasets():
"""Return a list of the names of the available datasets."""
ensure_cached_data()
cache_path = get_cached_data_path()
results = []
with zipfile.ZipFile(cache_path) as zf:
for path in zf.namelist():
match = DATASET_ZIP_PATH_REGEX.fullmatch(path)
if match:
results.append(match.group(1))
return results
def inspect(dataset='digits', usage='test'):
"""A convenience function for visually inspecting the labeled samples to ensure they are being extracted
correctly."""
# NOTE: This will hang if you run it from the PyCharm python console tab, whenever you have already imported
# matplotlib or if you have already called the function before. It's probably related to PyCharm's use of the
# debugger to extract variable values for display in the right-hand panel of the console. (For a brief
# explanation, see https://stackoverflow.com/a/24924921/4683578) As a simple work-around, start a fresh
# console tab each time and run it from there.
import matplotlib.pyplot as plt
backend = plt.get_backend()
interactive = plt.isinteractive()
try:
plt.switch_backend('TkAgg')
plt.ioff()
images = extract_data(dataset, usage, 'images')
labels = extract_data(dataset, usage, 'labels')
for i in range(len(images)):
image = images[i]
label = labels[i]
print("LABEL:", label)
plt.imshow(image)
plt.show(block=True)
finally:
plt.switch_backend(backend)
if interactive:
plt.ion()
else:
plt.ioff()
if __name__ == '__main__':
import sys
logging.basicConfig(stream=sys.stdout)
logging.getLogger().setLevel(0)
inspect()
| StarcoderdataPython |
190016 | <filename>src/vigorish/cli/menu_items/change_env_var_setting.py<gh_stars>1-10
"""Menu that allows the user to view and modify environment variables."""
import subprocess
from sys import exit
from bullet import colors, Input
from getch import pause
from vigorish.cli.components import print_message, yes_no_cancel_prompt, yes_no_prompt
from vigorish.cli.menu_item import MenuItem
from vigorish.constants import EMOJIS
from vigorish.util.result import Result
RESTART_WARNING = "\nApplication must be restarted for these changes to take effect!"
class ChangeEnvVarSetting(MenuItem):
def __init__(self, app, setting_name):
super().__init__(app)
self.menu_item_text = setting_name
self.menu_item_emoji = EMOJIS.get("SPIRAL")
self.setting_name = setting_name
self.current_setting = self.dotenv.get_current_value(setting_name)
self.restart_required = self.dotenv.restart_required_on_change(setting_name)
self.exit_menu = False
def launch(self):
subprocess.run(["clear"])
print_message(f"Variable Name: {self.setting_name}\n", fg="bright_magenta", bold=True)
print_message(f"Current Value: {self.current_setting}\n", fg="bright_yellow", bold=True)
if not yes_no_prompt(prompt="\nChange current setting?"):
return Result.Ok(self.exit_menu)
user_confirmed, new_value = False, None
while not user_confirmed:
subprocess.run(["clear"])
prompt = f"Enter a new value for {self.setting_name}:\n"
new_value = Input(prompt, word_color=colors.foreground["default"]).launch()
result = self.confirm_new_value(new_value)
if result.failure:
return Result.Ok(self.exit_menu)
user_confirmed = result.value
result = self.dotenv.change_value(self.setting_name, new_value)
if not self.restart_required:
return result
print_message(RESTART_WARNING, fg="bright_magenta", bold=True)
pause(message="Press any key to continue...")
exit(0)
def confirm_new_value(self, new_value):
prompt = (
f"\nUpdate {self.setting_name} to the value below?"
f"\nCurrent Value..: {self.current_setting}"
f"\nNew Value......: {new_value}"
)
return yes_no_cancel_prompt(prompt, wrap=False)
| StarcoderdataPython |
5061050 | <filename>env/lib/python3.6/site-packages/nibabel/streamlines/tests/test_tractogram_file.py<gh_stars>1-10
""" Test tractogramFile base class
"""
from ..tractogram import Tractogram
from ..tractogram_file import TractogramFile
from nose.tools import assert_raises, assert_equal
def test_subclassing_tractogram_file():
# Missing 'save' method
class DummyTractogramFile(TractogramFile):
@classmethod
def is_correct_format(cls, fileobj):
return False
@classmethod
def load(cls, fileobj, lazy_load=True):
return None
@classmethod
def create_empty_header(cls):
return None
assert_raises(TypeError, DummyTractogramFile, Tractogram())
# Missing 'load' method
class DummyTractogramFile(TractogramFile):
@classmethod
def is_correct_format(cls, fileobj):
return False
def save(self, fileobj):
pass
@classmethod
def create_empty_header(cls):
return None
assert_raises(TypeError, DummyTractogramFile, Tractogram())
# Now we have everything required.
class DummyTractogramFile(TractogramFile):
@classmethod
def is_correct_format(cls, fileobj):
return False
@classmethod
def load(cls, fileobj, lazy_load=True):
return None
def save(self, fileobj):
pass
# No error
dtf = DummyTractogramFile(Tractogram())
# Default create_empty_header is empty dict
assert_equal(dtf.header, {})
def test_tractogram_file():
assert_raises(NotImplementedError, TractogramFile.is_correct_format, "")
assert_raises(NotImplementedError, TractogramFile.load, "")
# Testing calling the 'save' method of `TractogramFile` object.
class DummyTractogramFile(TractogramFile):
@classmethod
def is_correct_format(cls, fileobj):
return False
@classmethod
def load(cls, fileobj, lazy_load=True):
return None
@classmethod
def save(self, fileobj):
pass
assert_raises(NotImplementedError,
super(DummyTractogramFile,
DummyTractogramFile(Tractogram)).save, "")
| StarcoderdataPython |
177723 | from .factory import create_cli
| StarcoderdataPython |
11337935 | <filename>gffpal/scripts/trnascan2gff.py
import sys
import argparse
from typing import Tuple, List, Dict
from typing import Sequence, Mapping
from gffpal.gff import GFF
from gffpal.gff import GFF3Record
from gffpal.gff import Strand, Phase
from gffpal.attributes import GFF3Attributes
from gffpal.parsers.trnascan import TRNAScanRecord, TRNAScanSS
import logging
logger = logging.getLogger(__name__)
TYPE_MAP: Dict[str, str] = {
"ala": "alanyl_tRNA",
"gln": "glutaminyl_tRNA",
"pro": "prolyl_tRNA",
"glu": "glutamyl_tRNA",
"met": "methionyl_tRNA",
"asn": "asparaginyl_tRNA",
"thr": "threonyl_tRNA",
"gly": "glycyl_tRNA",
"val": "valyl_tRNA",
"tyr": "tyrosyl_tRNA",
"cys": "cysteinyl_tRNA",
"iso": "isoleucyl_tRNA",
"ser": "seryl_tRNA",
"leu": "leucyl_tRNA",
"trp": "tryptophanyl_tRNA",
"sec": "selenocysteinyl_tRNA",
"pyl": "pyrrolysyl_tRNA",
"lys": "lysyl_tRNA",
"asp": "aspartyl_tRNA",
"arg": "arginyl_tRNA",
"his": "histidyl_tRNA",
"phe": "phenylalanyl_tRNA",
}
def cli_trnascan2gff(parser):
parser.add_argument(
"txt",
type=argparse.FileType('r'),
help="Input trnascan result file. Use '-' for stdin.",
)
parser.add_argument(
"ss",
type=argparse.FileType('r'),
help=(
"Input trnascan secondary structure result file."
"Use '-' for stdin."
),
)
parser.add_argument(
"-o", "--outfile",
type=argparse.FileType('w'),
default=sys.stdout,
help="Output gff file path. Default stdout.",
)
parser.add_argument(
"-s", "--source",
default="tRNAScan-SE",
help=f"What to put in the source gff column.",
)
return parser
def fix_strand(start: int, end: int) -> Tuple[int, int, Strand]:
if start > end:
strand = Strand.MINUS
tmp = end
end = start
start = tmp
elif start < end:
strand = Strand.PLUS
else:
strand = Strand.UNSTRANDED
start -= 1
return start, end, strand
def match_to_gene(
match: TRNAScanRecord,
source: str,
type: str
) -> GFF3Record:
start, end, strand = fix_strand(match.start, match.end)
gene = GFF3Record(
seqid=match.seqid,
source=source,
type=type,
start=start,
end=end,
score=match.infernal_score,
strand=strand,
phase=Phase.NOT_CDS,
attributes=GFF3Attributes(
id=f"{match.seqid}.{type}{match.num}",
)
)
return gene
def match_to_trna(
match: TRNAScanRecord,
ss: TRNAScanSS,
source: str,
type_map: Mapping[str, str] = TYPE_MAP,
parents: Sequence[GFF3Record] = []
) -> GFF3Record:
start, end, strand = fix_strand(match.start, match.end)
parent_ids = [
p.attributes.id
for p
in parents
if (p.attributes is not None
and p.attributes.id is not None)
]
if match.note is None or match.note == "":
notes: List[str] = []
else:
notes = [match.note]
trna = GFF3Record(
seqid=match.seqid,
source=source,
type=type_map.get(match.trna_type.lower(), "tRNA"),
start=start,
end=end,
score=match.infernal_score,
strand=strand,
phase=Phase.NOT_CDS,
attributes=GFF3Attributes(
id=f"{match.seqid}.tRNA{match.num}",
parent=parent_ids,
note=notes,
custom={
"secondary_structure": ss.ss,
"anticodon": match.anticodon,
"amino_acid": match.trna_type,
}
),
parents=parents
)
return trna
def match_to_introns(
match: TRNAScanRecord,
source: str,
type: str = "tRNA_intron",
parents: Sequence[GFF3Record] = [],
) -> List[GFF3Record]:
introns = []
parent_ids = [
p.attributes.id
for p
in parents
if (p.attributes is not None
and p.attributes.id is not None)
]
for istart, iend in zip(match.intron_starts, match.intron_ends):
start, end, strand = fix_strand(istart, iend)
intron = GFF3Record(
seqid=match.seqid,
source=source,
type=type,
start=start,
end=end,
score=match.infernal_score,
strand=strand,
phase=Phase.NOT_CDS,
attributes=GFF3Attributes(
id=f"{match.seqid}.{type}{match.num}",
parent=parent_ids,
),
parents=parents
)
introns.append(intron)
return introns
def match_to_anticodon(
match: TRNAScanRecord,
ss: TRNAScanSS,
source: str,
type: str = "anticodon",
parents: Sequence[GFF3Record] = []
) -> GFF3Record:
start, end, strand = fix_strand(ss.anticodon_start, ss.anticodon_end)
parent_ids = [
p.attributes.id
for p
in parents
if (p.attributes is not None
and p.attributes.id is not None)
]
anticodon = GFF3Record(
seqid=match.seqid,
source=source,
type=type,
start=start,
end=end,
score=match.infernal_score,
strand=strand,
phase=Phase.NOT_CDS,
attributes=GFF3Attributes(
id=f"{match.seqid}.{type}{match.num}",
parent=parent_ids,
),
parents=parents
)
return anticodon
def trnascan2gff(args: argparse.Namespace) -> None:
genes: List[GFF3Record] = []
matches = TRNAScanRecord.from_file(args.txt)
sses = TRNAScanSS.from_file(args.ss)
num_to_ss = {f"{r.seqid}.{r.num}": r for r in sses}
for match in matches:
ss = num_to_ss[f"{match.seqid}.{match.num}"]
if match.note is not None and "pseudo" in match.note:
type_ = "pseudogene"
else:
type_ = "tRNA_gene"
gene = match_to_gene(match, args.source, type=type_)
genes.append(gene)
trna = match_to_trna(
match,
ss,
args.source,
type_map=TYPE_MAP,
parents=[gene]
)
genes.append(trna)
introns = match_to_introns(
match,
args.source,
type="tRNA_intron",
parents=[trna]
)
genes.extend(introns)
anticodon = match_to_anticodon(
match,
ss,
args.source,
type="anticodon",
parents=[trna]
)
genes.append(anticodon)
for record in GFF(genes).traverse_children(sort=True):
print(record, file=args.outfile)
return
| StarcoderdataPython |
6476937 | <filename>Euler100/challenge6.py
# Sum square difference
# The squares of the first n natural numbers and the square of the sum.
def sumSquareDifference(n):
sumSq = sum(range(1, n+1))**2
sqSum = sum(i**2 for i in range(1, n+1))
return sumSq - sqSum
| StarcoderdataPython |
5088623 | <filename>tests/test_sharknado/test_sniffing/test_single.py
# Copyright 2020 <NAME> and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import abc
import time
from typing import (Iterable, Union, Optional, Tuple, Any, Iterator, Type,
Sequence, Callable, Hashable, Mapping, TypeVar, Dict)
import os
import pathlib
import contextlib
import more_itertools
import pytest
from marley import sharknado
from marley.sharknado.utils import sleep_until
import marley
class SingleJob(sharknado.ThinJob):
def __init__(self, path: Union[str, os.PathLike]) -> None:
self.path: pathlib.Path = pathlib.Path(path).resolve()
def _reduce(self) -> tuple:
return (type(self), self.path)
def thin_run(self) -> None:
with self.path.open('a') as file:
file.write('Hello!')
def thin_sniff(self) -> bool:
try:
text = self.path.read_text()
except FileNotFoundError:
return False
else:
assert text == 'Hello!'
return True
@pytest.mark.parametrize('use_multiprocessing', (True, )) # todo: Bring back false
def test_single(use_multiprocessing: bool) -> None:
with marley.utils.create_temp_folder() as temp_folder:
path: pathlib.Path = temp_folder / 'single.txt'
single_job = SingleJob(path)
assert not path.exists()
with sharknado.Shark(use_multiprocessing=use_multiprocessing, start=True) as shark_0:
assert not shark_0.antilles.job_to_finished_gain[single_job]
shark_0.add_directive_thin_jobs(single_job)
sleep_until(lambda: shark_0.antilles.job_to_finished_gain[single_job], 150)
assert path.read_text() == 'Hello!'
with sharknado.Shark(use_multiprocessing=use_multiprocessing, start=True) as shark_1:
assert not shark_1.antilles.job_to_finished_gain[single_job]
shark_1.add_directive_thin_jobs(single_job)
sleep_until(lambda: shark_0.antilles.job_to_finished_gain[single_job], 150)
assert path.read_text() == 'Hello!'
| StarcoderdataPython |
8180015 | """
Subsample a fasta file to generate a number of sequences.
We iterate the file twice to count the number of sequences.
"""
import os
import sys
import argparse
from roblib import stream_fasta, message
from random import shuffle
def get_sequence_ids(inputfile, verbose=False):
"""
count the sequences in the file
:param inputfile: the input file
:param verbose: more output
:return: an array of sequence IDs
"""
seqids = []
for seqid, seq in stream_fasta(inputfile, True):
seqids.append(seqid)
shuffle(seqids)
if verbose:
message(f"There are {len(seqids)} sequences in inputfile", "GREEN")
return seqids
def subsample(inputfile, n, seqids, verbose=False):
"""
subsample n sequences from inputfile with counter total sequences
:param inputfile: fasta file
:param n: number of sequences
:param seqids: the array of sequence ids
:param verbose: more output
"""
towrite = set(seqids[0:n])
written = 0
for seqid, seq in stream_fasta(inputfile, True):
if seqid in towrite:
written += 1
print(f">{seqid}\n{seq}")
if verbose:
message(f"Wrote {written} sequences", "GREEN")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-f', help='input file', required=True)
parser.add_argument('-n', help='number of sequences to write', required=True, type=int)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
seqids = get_sequence_ids(args.f, args.v)
subsample(args.f, args.n, seqids, args.v)
| StarcoderdataPython |
12847856 | <reponame>schmouk/PythonOpenSourceProject<gh_stars>0
"""
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from threading import Event, Thread
from typing import Optional
from .decorators import abstract
#=============================================================================
class RepeatedTimer( Thread ):
"""The class of repeated timers.
===-------------------------------------------------===
CAUTION:
When running this code over a non RTOS (for Real-Time
Operating System), there is NO WAY to ensure that
periods of time will be correctly respected. It MAY
and it WILL be that counts of milliseconds will not be
respected by the underlying operating system. Theref-
ore, you SHOULD NOT USE THIS CODE FOR APPLICATIONS
DEALING WITH PEOPLE SAFETY AND FOR ANY OTHER KIND OF
APPLICATIONS FOR WHICH REAL TIME OPERATING IS MANDATORY
IF THIS CODE IS NOT RUN OVER A TRUE RTOS.
Notice: MS-Windows is NOT an RTOS. Most versions of
Linux are not also, which includes MacOS versions too.
===-------------------------------------------------===
A repeated timer is a specific timer which repeats its
processing function after a fixed period of time has
elapsed.
Repeated timers must be explicitly started with a call
to their method '.start()'. They cannot be started
twice, since they inherit from threading.Threads.
Repeated timers can be definitively stopped by calling
their method '.stop()'.
Inheriting classes must implement method '.process()'.
This method contains the whole stuff that is to be
processed every time the watchdog is "awaken".
Users are encouraged to add attributes to this class.
These will then be accessible into method '.process()'
when they might be needed for this processing.
"""
#-------------------------------------------------------------------------
def __init__(self, period_s: float ,
name : Optional[str] = None,
*args, **kwargs ) -> None:
'''Constructor.
Args:
period_s: float
The interval of time, expressed as a fract-
ional value of seconds, to wait before the
timer will repeat.
name: str
The name of this timer. May be None, in
which case the underlying OS will give a
default, unique one to it. Defaults to None.
*args, **kwargs:
Arguments to be passed to the processing
function.
'''
self.stop_event= Event()
self.set_period( period_s )
self.args = args
self.kwargs = kwargs
super().__init__( name=name )
#-------------------------------------------------------------------------
@abstract
def process(self) -> None:
'''The instructions to be run when timer is repeated.
'self.args' and 'self.kwargs' are available in
this method.
Raises:
NotImplementedError: This method has not been
implemented in inheriting class.
'''
...
#-------------------------------------------------------------------------
def run(self) -> None:
'''This method is automatically called by method '.start()'.
Notice: method '.start()' is inherited from class
'threading.Thread'.
'''
self.stop_event.clear() ## just to be sure that associate internal flag is set to False
while not self.stop_event.wait( self.period_s ):
self.process()
#-------------------------------------------------------------------------
def set_period(self, period_s: int) -> None:
'''Modifies/sets the period of time used for repeating this timer.
Args:
period_s: float
The interval of time, expressed as a fract-
ional value of seconds, to wait before the
timer will repeat.
'''
assert period_s > 0.0
self.period_s = period_s
#-------------------------------------------------------------------------
def stop(self) -> None:
'''Definitively stops this repeated timer.
'''
self.stop_event.set()
#===== end of Utils.repeated_timer =====#
| StarcoderdataPython |
245591 | from gtts import gTTS
import os,time
def save_sound(message):
speech = gTTS(message)
speech.save("audio.mp3")
| StarcoderdataPython |
3334880 | <filename>math2d_spline.py
# math2d_spline.py
import math
from math2d_vector import Vector
class Spline(object):
def __init__(self):
self.point_list = []
def Deserialize(self, json_data):
self.point_list = [Vector().Deserialize(point_data) for point_data in json_data]
return self
def Serialize(self):
return [point.Serialize() for point in self.point_list]
def Interpolate(self, value):
# All derivatives should provide a parametrization in [0,1].
# Ideally, if the curve has length L, then a parameter P would yield
# the point on the curve along it at length L*P. If this property is
# satisfied, then we'll say the curve has a uniform parameterization.
raise Exception('Pure virtual call.')
def FindStepSizeForDistance(self, value, distance, step_size_delta = 0.05, eps = 0.01):
step_size = 0.05
pointA = self.Interpolate(value)
while True:
pointB = self.Interpolate(value + step_size)
length = (pointA - pointB).Length()
if math.fabs(length - distance) < eps:
break
if (length > distance and step_size_delta > 0.0) or (length < distance and step_size_delta < 0.0):
step_size_delta = -step_size_delta / 2.0
step_size += step_size_delta
return step_size
def Length(self):
pass
# A default implementation here could integrate along the spline.
# We would want to use adaptive step sizing to account for non-uniform parametrizations.
def Render(self, step_length=0.0, step_size=0.5):
from OpenGL.GL import glBegin, glEnd, glVertex2f, GL_LINE_STRIP
glBegin(GL_LINE_STRIP)
value = 0.0
try:
while value < 1.0:
point = self.Interpolate(value)
glVertex2f(point.x, point.y)
if step_length > 0.0:
step_size = self.FindStepSizeForDistance(value, step_length)
value += step_size
value = 1.0
point = self.Interpolate(value)
glVertex2f(point.x, point.y)
finally:
glEnd()
class PolylineSpline(Spline):
def __init__(self):
super().__init__()
def Interpolate(self, value, length=None):
from math2d_line_segment import LineSegment
if length is None:
length = self.Length()
distance = length * value
if distance < 0.0 or distance > length:
raise Exception('Invalid parameter value.')
i = 0
point = None
while distance >= 0.0:
point = self.point_list[i]
line_segment = LineSegment(self.point_list[i], self.point_list[i + 1])
segment_length = line_segment.Lenght()
if segment_length < distance:
distance -= segment_length
i += 1
else:
lerp_value = segment_length / distance
point = line_segment.Lerp(lerp_value)
break
return point
def Length(self):
from math2d_line_segment import LineSegment
if len(self.point_list) < 2:
return 0.0
length = 0.0
for i in range(len(self.point_list) - 1):
line_segment = LineSegment(self.point_list[i], self.point_list[i + 1])
length += line_segment.Length()
return length
class BezierSpline(Spline):
def __init__(self):
super().__init__()
def Interpolate(self, value):
from math2d_line_segment import LineSegment
point_list = [point for point in self.point_list]
while len(point_list) > 1:
new_point_list = []
for i in range(len(point_list) - 1):
line_segment = LineSegment(point_list[i], point_list[i + 1])
new_point_list.append(line_segment.Lerp(value))
point_list = new_point_list
return point_list[0]
class HermiteSpline(Spline):
def __init__(self):
super().__init__()
def Deserialize(self, json_data):
if type(json_data) is list:
return super().Deserialize(json_data)
elif type(json_data) is dict:
self.point_list = []
self.point_list.append(Vector().Deserialize(json_data['start_pos']))
self.point_list.append(Vector().Deserialize(json_data['end_pos']))
self.point_list.append(Vector().Deserialize(json_data['start_tan']))
self.point_list.append(Vector().Deserialize(json_data['end_tan']))
return self
def Serialize(self):
json_data = {
'start_pos': self.point_list[0].Serialize(),
'end_pos': self.point_list[1].Serialize(),
'start_tan': self.point_list[2].Serialize(),
'end_tan': self.point_list[3].Serialize()
}
return json_data
def Interpolate(self, value):
value_squared = value * value
value_cubed = value_squared * value
start_pos = self.point_list[0]
end_pos = self.point_list[1]
start_tan = self.point_list[2]
end_tan = self.point_list[3]
return (start_pos * ((2.0 * value_cubed) - (3.0 * value_squared) + 1.0)) +\
(start_tan * (value_cubed - (2.0 * value_squared) + value)) +\
(end_tan * (value_cubed - value_squared)) +\
(end_pos * ((-2.0 * value_cubed) + (3.0 * value_squared))) | StarcoderdataPython |
1788614 | <reponame>AlexCWolff/Portfolio<gh_stars>0
from flask import render_template, redirect, url_for
from fpflask import FunctionalFlask
from effect.do import do, do_return
from effect import Effect, Func, base_dispatcher
counter = 0
def increment_counter(num):
global counter
counter += num
def get_counter(): return counter
app = FunctionalFlask('counter')
@app.route('/')
@do
def root(request):
counter = yield Effect(Func(get_counter))
yield do_return(render_template('counter.html', counter=counter))
@app.route('/increment', methods=['POST'])
@do
def increment(request):
num = int(request.form['number'])
yield Effect(Func(increment_counter, num))
yield do_return(redirect(url_for('root')))
if __name__ == '__main__':
app.flask.config.update(PROPAGATE_EXCEPTIONS=True)
app.run(base_dispatcher)
| StarcoderdataPython |
11315850 | from .metrics import accuracy
| StarcoderdataPython |
25624 | <reponame>hgohel/Python-for-Everyday-Life
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
if __name__ == '__main__':
import money
# crafting money
euro = money.Money(1.0, 'EUR')
five_euros = money.Money(5.0, 'EUR')
ten_euros = money.Money(10.0, 'EUR')
dollar = money.Money(1.0, 'USD')
# money algebra
eleven_euros = euro + ten_euros
sixteen_euros = euro + five_euros + ten_euros
six_euros = sixteen_euros - ten_euros
# money comparisons
print('11 EUR > 6 EUR ? {}'.format(eleven_euros > six_euros))
print('11 EUR == (10 EUR + 1 EUR) ? {}'.format(eleven_euros == ten_euros + euro))
print('11 EUR > 50 EUR ? {}'.format(eleven_euros > money.Money(50.0, 'EUR')))
# playing with a wallet
wallet = money.Wallet('My Wallet')
wallet.add(euro)
wallet.add(ten_euros)
wallet.add(dollar)
print('\n{} has {} items:'.format(str(wallet), len(wallet)))
for item in wallet:
print('{}'.format(str(item)))
| StarcoderdataPython |
11207606 | #pythran export allpairs_distances_loops(int)
#runas allpairs_distances_loops(100)
#bench allpairs_distances_loops(100)
import numpy as np
def dists(X,Y):
result = np.zeros( (X.shape[0], Y.shape[0]), X.dtype)
for i in xrange(X.shape[0]):
for j in xrange(Y.shape[0]):
result[i,j] = np.sum( (X[i,:] - Y[j,:]) ** 2)
return result
def allpairs_distances_loops(d):
#X = np.random.randn(1000,d)
#Y = np.random.randn(200,d)
X = np.ones((500,d))
Y = np.ones((200,d))
return dists(X,Y)
| StarcoderdataPython |
11295651 | <filename>ht/embeds.py<gh_stars>0
import discord, asyncio, random
from discord.ext import commands
from enum import Enum
from . import utils
DEFAULT = 0x444850
class Theme(Enum):
ERROR = (0xdd3333, "4/4e/OOjs_UI_icon_error-destructive.svg/200px-OOjs_UI_icon_error-destructive.svg.png", "An error has been encountered")
MOD_MESSAGE = (0xff5d01, "4/4c/OOjs_UI_icon_notice-warning.svg/240px-OOjs_UI_icon_notice-warning.svg.png", "Official moderator message")
HELP = (0x3365ca, "5/5f/OOjs_UI_icon_info-progressive.svg/240px-OOjs_UI_icon_info-progressive.svg.png", "Command help")
SEARCH_RESULT = (0x444850, "8/8c/OOjs_UI_icon_search-ltr-invert.svg/240px-OOjs_UI_icon_search-ltr-invert.svg.png", "Search result")
GENERIC = (DEFAULT, "5/5e/VisualEditor_icon_reference-rtl-invert.svg/240px-VisualEditor_icon_reference-invert.svg.png", "Result")
ABOUT = (0x02af89, "4/4e/Echo_gratitude.svg/240px-Echo_gratitude.svg.png", "About Heraldtron")
FLAG_FACT = (DEFAULT, "1/14/OOjs_UI_icon_flag-ltr-invert.svg/200px-OOjs_UI_icon_flag-ltr-invert.svg.png", "Flag fact")
FEED = (DEFAULT, "2/21/OOjs_UI_icon_feedback-ltr-invert.svg/240px-OOjs_UI_icon_feedback-ltr-invert.svg.png", "Reddit post")
USER_INFO = (DEFAULT, "d/d4/VisualEditor_icon_profile-invert.svg/240px-VisualEditor_icon_profile-invert.svg.png", "User")
CHOICE = (DEFAULT, "d/df/OOjs_UI_icon_next-ltr-invert.svg/240px-OOjs_UI_icon_next-ltr-invert.svg.png", "Choice required")
DRAW = (DEFAULT, "d/d2/OOjs_UI_icon_edit-rtl-invert.svg/240px-OOjs_UI_icon_edit-rtl-invert.svg.png", "Drawn!")
COUNTDOWN = (DEFAULT, "b/bb/OOjs_UI_icon_clock-invert.svg/240px-OOjs_UI_icon_clock-invert.svg.png", "Countdown")
def __init__(self, colour, icon_url, heading):
self.colour = colour
self.icon_url = f"https://upload.wikimedia.org/wikipedia/commons/thumb/{icon_url}"
self.heading = heading
def create(self, title, desc, heading = None):
embed = discord.Embed(title = title, description = desc)
embed.colour = self.colour
embed.set_author(name = heading or self.heading, icon_url = self.icon_url)
return embed
#this is done in the default random class
ERROR = Theme.ERROR
MOD_MESSAGE = Theme.MOD_MESSAGE
HELP = Theme.HELP
SEARCH_RESULT = Theme.SEARCH_RESULT
GENERIC = Theme.GENERIC
ABOUT = Theme.ABOUT
FLAG_FACT = Theme.FLAG_FACT
FEED = Theme.FEED
USER_INFO = Theme.USER_INFO
CHOICE = Theme.CHOICE
DRAW = Theme.DRAW
COUNTDOWN = Theme.COUNTDOWN | StarcoderdataPython |
5055954 | <filename>class work/JacobLedbetter_11,23,15_pdf 6-10/pong.py
#Pong
#<NAME>
#12/4/13
#--------------------Notes--------------------------------------------------------
#In computer graphics, a sprite is a two-dimensional image or animation that is
# integrated into a larger scene.
#--------Import Libraries-------------------------------------------------------
import pygame
#-------------Color Pallet------------------------
black = ( 0, 0, 0)
white = (255,255,255)
red = (255, 0, 0)
green = ( 0,255, 0)
blue = ( 0, 0,255)
#-------------Initializations-----------------------
pygame.init()
screensize_x=700
screensize_y=500
screensize=[screensize_x,screensize_y]
screen_color=black
screen = pygame.display.set_mode(screensize)
pygame.display.set_caption("Pong")
font = pygame.font.Font(None, 36)
background = pygame.Surface(screen.get_size())
clock=pygame.time.Clock()
paddle_width=20
paddle_height=80
#--------------Player Sprite-------------------
class Player(pygame.sprite.Sprite):
def __init__(self,x,y):
pygame.sprite.Sprite.__init__(self)
self.width=paddle_width
self.height=paddle_height
self.image=pygame.Surface([self.width,self.height])
self.image.fill(white)
self.rect=self.image.get_rect()
self.rect.x=x
self.rect.y=y
self.speed_x=0
self.speed_y=0
def move(self):
self.rect.x+=self.speed_x
self.rect.y+=self.speed_y
def collide(self):
if self.rect.y<0:
self.rect.y=0
if self.rect.y>screensize_y-self.height:
self.rect.y=screensize_y-self.height
if self.rect.x<0:
self.rect.x=0
if self.rect.x>screensize_x-self.width:
self.rect.x=screensize_x-self.width
#--------------Ball Sprite-------------------
class Ball(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.width=10
self.height=10
self.image=pygame.Surface([self.width,self.height])
self.image.fill(blue)
self.rect=self.image.get_rect()
self.rect.x=screensize_x/2
self.rect.y=screensize_y/2
self.speed_x=-3
self.speed_y=3
def move(self):
self.rect.x+=self.speed_x
self.rect.y+=self.speed_y
def collide(self):
if self.rect.x<0 or self.rect.x>screensize_x-self.width:
self.speed_x=-1*self.speed_x
if self.rect.y<0 or self.rect.y>screensize_y-self.height:
self.speed_y=-1*self.speed_y
def gameover(self):
if self.rect.x<0 or self.rect.x>screensize_x-paddle_width:
self.rect.x=screensize_x/2
return True
else:
return False
#------------Sprite initialization----------------
balls = pygame.sprite.Group()
allsprites = pygame.sprite.RenderPlain()
player2=Player(0,0)
player1=Player(screensize_x-paddle_width,0)
ball=Ball()
balls.add(ball)
allsprites.add(player1,player2,ball)
#-----------Game Initialization------------------
rungame=True
gameover=False
#-----------Main Program Loop---------------------
while rungame:
screen.fill(screen_color)
#----------Events-----------------------------
for event in pygame.event.get():
if event.type == pygame.QUIT:
rungame=False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player1.speed_y=-4
if event.key == pygame.K_DOWN:
player1.speed_y=4
if event.key == pygame.K_w:
player2.speed_y=-4
if event.key == pygame.K_s:
player2.speed_y=4
if event.key == pygame.K_SPACE:
gameover=False
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
player1.speed_y=0
if event.key == pygame.K_DOWN:
player1.speed_y=0
if event.key == pygame.K_w:
player2.speed_y=0
if event.key == pygame.K_s:
player2.speed_y=0
#---------Game Logic-----------------------------
if not gameover:
player1.move()
player2.move()
gameover=ball.gameover()
ball.move()
player1.collide()
player2.collide
ball.collide()
if gameover:
text=font.render("Game Over: Press Space",True,white)
text_position=text.get_rect(centerx=background.get_width()/2)
text_position.top=250
screen.blit(text,text_position)
if pygame.sprite.spritecollide(player1,balls,False):
ball.speed_x=-1*ball.speed_x
if pygame.sprite.spritecollide(player2,balls,False):
ball.speed_x=-1*ball.speed_x
#------------Update Drawings-------------------------
allsprites.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit()
| StarcoderdataPython |
6549669 | import numpy as np
import metrics
import datasets
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import KFold
from hypothesis import TreeHypothesis
from consistency import MCConsistentTree
from hypothesis_search import MCHypothesisSearch
from algorithms import crembo
def compress_tree(n_estimators, max_tree_depth, max_forest_depth, X_train, y_train, c, weight=None, X_val=None,
y_val=None, score=None, delta=1):
# All but MED are trained on x_val as well
X_train = np.concatenate([X_train, X_val], axis=0)
y_train = np.concatenate([y_train, y_val], axis=0)
# train benchmark tree
b_tree = DecisionTreeClassifier(max_depth=max_tree_depth, class_weight=weight)
b_tree.fit(X_train, y_train)
f_b = TreeHypothesis('Tree_bench', b_tree)
# train random forest to create a collection of hypotheses
rf = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_forest_depth, class_weight=weight)
rf.fit(X_train, y_train)
# get trees from forest
hypotheses = []
for i in range(len(rf.estimators_)):
name = f'Tree_{i}'
hypotheses.append(TreeHypothesis(name, f=rf.estimators_[i]))
# define consistency and hypothesis search algorithms
consistency = MCConsistentTree(depth=max_tree_depth, class_weight=weight)
a = MCHypothesisSearch(consistency, X_val=X_val, y_val=y_val, score=score)
f_med, depth, y1 = crembo(s=X_train, t=hypotheses, c=c, a=a, delta=delta)
# train a tree with all the data labels from MCMA
tree = DecisionTreeClassifier(max_depth=max_tree_depth, class_weight=weight)
tree.fit(X_train, y1)
f_voting = TreeHypothesis('Tree_voting', tree)
return rf, hypotheses, f_med, f_voting, f_b
def robustness_agreement_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,
score='accuracy', weight=None, n_splits=10, delta=1):
scores = []
means = []
for i in range(num_experiments):
agreement, mean_score = robustness_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,
score=score, weight=weight, n_splits=n_splits, use_agreement=True, delta=delta)
means.append(mean_score)
scores.append(agreement)
print('\nFinal results:')
scores = np.asarray(scores)
scores = scores.mean(axis=0)
print(f'Average Agreement score: RF {scores[0]}, BM {scores[1]}, VT {scores[2]}, MED {scores[3]}')
def robustness_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,
score='accuracy', weight=None, n_splits=10, use_agreement=False, delta=1):
kf_scores = []
kf = KFold(n_splits=n_splits)
x, y, X_test, y_test = datasets.prepare_data(dataset, return_test=True)
c = datasets.get_number_of_classes(dataset)
score_func, score_metric = metrics.get_socre_foncs(score)
trees = []
for train, test in kf.split(x):
X_train, _, y_train, _ = x[train], x[test], y[train], y[test]
X_train, X_val, y_train, y_val = datasets.prepare_val(X_train, y_train)
k_scores = []
for k in range(num_experiments):
rf, _, f_med, f_all, f_m = compress_tree(num_trees, max_tree_depth, forest_depth, X_train, y_train,
c, weight=weight, X_val=X_val, y_val=y_val,
score=score_metric, delta=delta)
k_scores.append(score_func(rf, None, f_med, f_all, f_m, X_train, y_train, X_test, y_test))
trees.append((rf, f_m, f_all, f_med))
kf_scores.append(metrics.average_scores(k_scores, num_experiments))
means = metrics.mean_and_std(kf_scores, mean_only=True)
output = metrics.agreement_score(trees, X_test) if use_agreement else None
return output, means
def generalization_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,
score='accuracy', weight=None, n_splits=10, delta=1):
kf_scores = []
kf = KFold(n_splits=n_splits)
x, y, _, _, = datasets.prepare_data(dataset, return_test=False)
c = datasets.get_number_of_classes(dataset)
score_func, score_metric = metrics.get_socre_foncs(score)
for k in range(num_experiments):
f_scores = []
for train, test in kf.split(x):
X_train, X_test, y_train, y_test = x[train], x[test], y[train], y[test]
X_train, X_val, y_train, y_val = datasets.prepare_val(X_train, y_train)
rf, _, f_med, f_all, f_m = compress_tree(num_trees, max_tree_depth, forest_depth, X_train, y_train,
c, weight=weight, X_val=X_val, y_val=y_val,
score=score_metric, delta=delta)
f_scores.append(score_func(rf, None, f_med, f_all, f_m, X_train, y_train, X_test, y_test))
mean_var_win = metrics.mean_and_std(f_scores, mean_only=False)
kf_scores.append(mean_var_win)
print('\nFinal results:')
print(f'Average RF mean {sum([score[0] for score in kf_scores]) / num_experiments}, var {sum([score[1] for score in kf_scores]) / num_experiments}')
idx = 2
for t in ('BM', 'VT', 'MED'):
t_mean = sum([score[idx] for score in kf_scores]) / num_experiments
t_wins = sum([score[idx + 2] for score in kf_scores]) / num_experiments
idx += 3
print(f'Average {t} mean {t_mean}, wins {t_wins}')
return
| StarcoderdataPython |
3316178 | import os
import os.path as op
import json
# import logging
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def load_linelist_file(linelist_file):
if linelist_file is not None:
line_list = []
with open(linelist_file, 'r') as fp:
for i in fp:
line_list.append(int(i.strip()))
return line_list
def img_from_base64(imagestring):
try:
img = Image.open(io.BytesIO(base64.b64decode(imagestring)))
return img.convert('RGB')
except ValueError:
return None
def load_from_yaml_file(yaml_file):
with open(yaml_file, 'r') as fp:
return yaml.load(fp, Loader=yaml.CLoader)
def find_file_path_in_yaml(fname, root):
if fname is not None:
if op.isfile(fname):
return fname
elif op.isfile(op.join(root, fname)):
return op.join(root, fname)
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), op.join(root, fname)
)
def create_lineidx(filein, idxout):
idxout_tmp = idxout + '.tmp'
with open(filein, 'r') as tsvin, open(idxout_tmp, 'w') as tsvout:
fsize = os.fstat(tsvin.fileno()).st_size
fpos = 0
while fpos != fsize:
tsvout.write(str(fpos) + "\n")
tsvin.readline()
fpos = tsvin.tell()
os.rename(idxout_tmp, idxout)
def read_to_character(fp, c):
result = []
while True:
s = fp.read(32)
assert s != ''
if c in s:
result.append(s[: s.index(c)])
break
else:
result.append(s)
return ''.join(result)
class TSVFile(object):
def __init__(self, tsv_file, generate_lineidx=False):
self.tsv_file = tsv_file
self.lineidx = op.splitext(tsv_file)[0] + '.lineidx'
self._fp = None
self._lineidx = None
# the process always keeps the process which opens the file.
# If the pid is not equal to the currrent pid, we will re-open the file.
self.pid = None
# generate lineidx if not exist
if not op.isfile(self.lineidx) and generate_lineidx:
create_lineidx(self.tsv_file, self.lineidx)
def __del__(self):
if self._fp:
self._fp.close()
def __str__(self):
return "TSVFile(tsv_file='{}')".format(self.tsv_file)
def __repr__(self):
return str(self)
def num_rows(self):
self._ensure_lineidx_loaded()
return len(self._lineidx)
def seek(self, idx):
self._ensure_tsv_opened()
self._ensure_lineidx_loaded()
try:
pos = self._lineidx[idx]
except:
# logging.info('{}-{}'.format(self.tsv_file, idx))
raise
self._fp.seek(pos)
return [s.strip() for s in self._fp.readline().split('\t')]
def seek_first_column(self, idx):
self._ensure_tsv_opened()
self._ensure_lineidx_loaded()
pos = self._lineidx[idx]
self._fp.seek(pos)
return read_to_character(self._fp, '\t')
def get_key(self, idx):
return self.seek_first_column(idx)
def __getitem__(self, index):
return self.seek(index)
def __len__(self):
return self.num_rows()
def _ensure_lineidx_loaded(self):
if self._lineidx is None:
# logging.info('loading lineidx: {}'.format(self.lineidx))
with open(self.lineidx, 'r') as fp:
self._lineidx = [int(i.strip()) for i in fp.readlines()]
def _ensure_tsv_opened(self):
if self._fp is None:
self._fp = open(self.tsv_file, 'r')
self.pid = os.getpid()
if self.pid != os.getpid():
# logging.info('re-open {} because the process id changed'.format(self.tsv_file))
self._fp = open(self.tsv_file, 'r')
self.pid = os.getpid()
class CompositeTSVFile():
def __init__(self, file_list, seq_file, root='.'):
if isinstance(file_list, str):
self.file_list = load_list_file(file_list)
else:
assert isinstance(file_list, list)
self.file_list = file_list
self.seq_file = seq_file
self.root = root
self.initialized = False
self.initialize()
def get_key(self, index):
idx_source, idx_row = self.seq[index]
k = self.tsvs[idx_source].get_key(idx_row)
return '_'.join([self.file_list[idx_source], k])
def num_rows(self):
return len(self.seq)
def __getitem__(self, index):
idx_source, idx_row = self.seq[index]
return self.tsvs[idx_source].seek(idx_row)
def __len__(self):
return len(self.seq)
def initialize(self):
'''
this function has to be called in init function if cache_policy is
enabled. Thus, let's always call it in init funciton to make it simple.
'''
if self.initialized:
return
self.seq = []
with open(self.seq_file, 'r') as fp:
for line in fp:
parts = line.strip().split('\t')
self.seq.append([int(parts[0]), int(parts[1])])
self.tsvs = [TSVFile(op.join(self.root, f)) for f in self.file_list]
self.initialized = True
def load_list_file(fname):
with open(fname, 'r') as fp:
lines = fp.readlines()
result = [line.strip() for line in lines]
if len(result) > 0 and result[-1] == '':
result = result[:-1]
return result
class TSVDataset(object):
def __init__(self, img_file, label_file=None, hw_file=None,
linelist_file=None, imageid2idx_file=None):
"""Constructor.
Args:
img_file: Image file with image key and base64 encoded image str.
label_file: An optional label file with image key and label information.
A label_file is required for training and optional for testing.
hw_file: An optional file with image key and image height/width info.
linelist_file: An optional file with a list of line indexes to load samples.
It is useful to select a subset of samples or duplicate samples.
"""
self.img_file = img_file
self.label_file = label_file
self.hw_file = hw_file
self.linelist_file = linelist_file
self.img_tsv = TSVFile(img_file)
self.label_tsv = None if label_file is None else TSVFile(label_file, generate_lineidx=True)
self.hw_tsv = None if hw_file is None else TSVFile(hw_file)
self.line_list = load_linelist_file(linelist_file)
self.imageid2idx = None
if imageid2idx_file is not None:
self.imageid2idx = json.load(open(imageid2idx_file, 'r'))
self.transforms = None
def __len__(self):
if self.line_list is None:
if self.imageid2idx is not None:
assert self.label_tsv is not None, "label_tsv is None!!!"
return self.label_tsv.num_rows()
return self.img_tsv.num_rows()
else:
return len(self.line_list)
def __getitem__(self, idx):
img = self.get_image(idx)
img_size = img.size # w, h
annotations = self.get_annotations(idx)
# print(idx, annotations)
target = self.get_target_from_annotations(annotations, img_size, idx)
img, target = self.apply_transforms(img, target)
if self.transforms is None:
return img, target, idx, 1.0
else:
new_img_size = img.shape[1:]
scale = math.sqrt(float(new_img_size[0] * new_img_size[1]) / float(img_size[0] * img_size[1]))
return img, target, idx, scale
def get_line_no(self, idx):
return idx if self.line_list is None else self.line_list[idx]
def get_image(self, idx):
line_no = self.get_line_no(idx)
if self.imageid2idx is not None:
assert self.label_tsv is not None, "label_tsv is None!!!"
row = self.label_tsv.seek(line_no)
annotations = json.loads(row[1])
imageid = annotations["img_id"]
line_no = self.imageid2idx[imageid]
row = self.img_tsv.seek(line_no)
# use -1 to support old format with multiple columns.
img = img_from_base64(row[-1])
return img
def get_annotations(self, idx):
line_no = self.get_line_no(idx)
if self.label_tsv is not None:
row = self.label_tsv.seek(line_no)
annotations = json.loads(row[1])
return annotations
else:
return []
def get_target_from_annotations(self, annotations, img_size, idx):
# This function will be overwritten by each dataset to
# decode the labels to specific formats for each task.
return annotations
def apply_transforms(self, image, target=None):
# This function will be overwritten by each dataset to
# apply transforms to image and targets.
return image, target
def get_img_info(self, idx):
if self.imageid2idx is not None:
assert self.label_tsv is not None, "label_tsv is None!!!"
line_no = self.get_line_no(idx)
row = self.label_tsv.seek(line_no)
annotations = json.loads(row[1])
return {"height": int(annotations["img_w"]), "width": int(annotations["img_w"])}
if self.hw_tsv is not None:
line_no = self.get_line_no(idx)
row = self.hw_tsv.seek(line_no)
try:
# json string format with "height" and "width" being the keys
data = json.loads(row[1])
if type(data) == list:
return data[0]
elif type(data) == dict:
return data
except ValueError:
# list of strings representing height and width in order
hw_str = row[1].split(' ')
hw_dict = {"height": int(hw_str[0]), "width": int(hw_str[1])}
return hw_dict
def get_img_key(self, idx):
line_no = self.get_line_no(idx)
# based on the overhead of reading each row.
if self.imageid2idx is not None:
assert self.label_tsv is not None, "label_tsv is None!!!"
row = self.label_tsv.seek(line_no)
annotations = json.loads(row[1])
return annotations["img_id"]
if self.hw_tsv:
return self.hw_tsv.seek(line_no)[0]
elif self.label_tsv:
return self.label_tsv.seek(line_no)[0]
else:
return self.img_tsv.seek(line_no)[0]
class TSVYamlDataset(TSVDataset):
""" TSVDataset taking a Yaml file for easy function call
"""
def __init__(self, yaml_file, root=None, replace_clean_label=False):
print("Reading {}".format(yaml_file))
self.cfg = load_from_yaml_file(yaml_file)
if root:
self.root = root
else:
self.root = op.dirname(yaml_file)
img_file = find_file_path_in_yaml(self.cfg['img'], self.root)
label_file = find_file_path_in_yaml(self.cfg.get('label', None),
self.root)
hw_file = find_file_path_in_yaml(self.cfg.get('hw', None), self.root)
linelist_file = find_file_path_in_yaml(self.cfg.get('linelist', None),
self.root)
imageid2idx_file = find_file_path_in_yaml(self.cfg.get('imageid2idx', None),
self.root)
if replace_clean_label:
assert ("raw_label" in label_file)
label_file = label_file.replace("raw_label", "clean_label")
super(TSVYamlDataset, self).__init__(
img_file, label_file, hw_file, linelist_file, imageid2idx_file)
class ODTSVDataset(TSVYamlDataset):
"""
Generic TSV dataset format for Object Detection.
"""
def __init__(self, yaml_file, extra_fields=(), transforms=None,
is_load_label=True, **kwargs):
if yaml_file is None:
return
super(ODTSVDataset, self).__init__(yaml_file)
self.transforms = transforms
self.is_load_label = is_load_label
self.attribute_on = False
# self.attribute_on = kwargs['args'].MODEL.ATTRIBUTE_ON if "args" in kwargs else False
if self.is_load_label:
# construct maps
jsondict_file = find_file_path_in_yaml(
self.cfg.get("labelmap", None), self.root
)
if jsondict_file is None:
jsondict_file = find_file_path_in_yaml(
self.cfg.get("jsondict", None), self.root
)
if "json" in jsondict_file:
jsondict = json.load(open(jsondict_file, 'r'))
if "label_to_idx" not in jsondict:
jsondict = {'label_to_idx': jsondict}
elif "tsv" in jsondict_file:
label_to_idx = {}
counter = 1
with open(jsondict_file) as f:
for line in f:
label_to_idx[line.strip()] = counter
counter += 1
jsondict = {'label_to_idx': label_to_idx}
else:
assert (0)
self.labelmap = {}
self.class_to_ind = jsondict['label_to_idx']
self.class_to_ind['__background__'] = 0
self.ind_to_class = {v: k for k, v in self.class_to_ind.items()}
self.labelmap['class_to_ind'] = self.class_to_ind
if self.attribute_on:
self.attribute_to_ind = jsondict['attribute_to_idx']
self.attribute_to_ind['__no_attribute__'] = 0
self.ind_to_attribute = {v: k for k, v in self.attribute_to_ind.items()}
self.labelmap['attribute_to_ind'] = self.attribute_to_ind
self.label_loader = LabelLoader(
labelmap=self.labelmap,
extra_fields=extra_fields,
)
def get_target_from_annotations(self, annotations, img_size, idx):
if isinstance(annotations, list):
annotations = {"objects": annotations}
if self.is_load_label:
return self.label_loader(annotations['objects'], img_size)
def apply_transforms(self, img, target=None):
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
| StarcoderdataPython |
3350645 | <filename>SalishSeaTools/salishsea_tools/stormtools.py
# Copyright 2013-2021 The Salish Sea MEOPAR contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of tools for storm surge results from the Salish Sea Model.
"""
from __future__ import division
import csv
import datetime
from io import BytesIO
from xml.etree import cElementTree as ElementTree
import arrow
from dateutil import tz
import netCDF4 as NC
import numpy as np
import pandas as pd
import requests
from salishsea_tools import tidetools
from salishsea_tools.places import PLACES
def storm_surge_risk_level(site_name, max_ssh, ttide):
"""Calculate the storm surge risk level for :kbd:`site_name`,
a tide gauge station name.
Thresholds are:
* Highest predicted tide
* Half way between highest predicted tide and highest historical
water level
* Highest historical water level
Threshold levels are obtained from
:py:data:`salishsea_tools.places.PLACES` dict.
:arg str site_name` Name of a tide gauge station at which to
calculate the storm surge risk.
:arg float max_ssh: The maximum sea surface height predicted for
:kbd:`site_name`.
:arg ttide: Tidal predictions from ttide.
:type ttide: :py:class:`pandas.DataFrame`
:returns: :py:obj:`None` for no storm surge risk,
:kbd:`moderate risk` for water level between max tide level
and the half-way threshold,
and :kbd:`extreme risk` for water level above the half-way
threshold
"""
try:
max_tide_ssh = max(ttide.pred_all) + PLACES[site_name]['mean sea lvl']
max_historic_ssh = PLACES[site_name]['hist max sea lvl']
except KeyError as e:
raise KeyError(
'place name or info key not found in '
'salishsea_tools.places.PLACES: {}'.format(e))
extreme_threshold = max_tide_ssh + (max_historic_ssh - max_tide_ssh) / 2
risk_level = (
None if max_ssh < max_tide_ssh
else 'extreme risk' if max_ssh > extreme_threshold
else 'moderate risk')
return risk_level
def convert_date_seconds(times, start):
"""
This function converts model output time in seconds to datetime objects.
Note: Doug has a better version of this in nc_tools.timestamp
:arg times: array of seconds since the start date of a simulation.
From time_counter in model output.
:type times: int
:arg start: string containing the start date of the simulation in
format '01-Nov-2006'
:type start: str
:arg diff: string indicating the time step in the times data
E.g. months, seconds, days
:type diff: str
:returns: array of datetime objects representing the time of model outputs.
"""
arr_times = []
for ii in range(0, len(times)):
arr_start = arrow.Arrow.strptime(start, '%d-%b-%Y')
arr_new = arr_start.replace(seconds=times[ii])
arr_times.append(arr_new.datetime)
return arr_times
def convert_date_hours(times, start):
"""
This function converts model output time in hours to datetime objects.
:arg times: array of hours since the start date of a simulation.
From time_counter in model output.
:type times: int
:arg start: string containing the start date of the simulation in
format '01-Nov-2006'
:type start: str
:returns: array of datetime objects representing the time of model outputs.
"""
arr_times = []
for ii in range(0, len(times)):
arr_start = arrow.Arrow.strptime(start, '%d-%b-%Y')
arr_new = arr_start.replace(hours=times[ii])
arr_times.append(arr_new.datetime)
return arr_times
def get_CGRF_weather(start, end, grid):
"""
Returns the CGRF weather between the dates start and end at the
grid point defined in grid.
:arg start: string containing the start date of the CGRF collection in
format '01-Nov-2006'
:type start: str
:arg start: string containing the end date of the CGRF collection in
format '01-Nov-2006'
:type start: str
:arg grid: array of the CGRF grid coordinates for the point of interest
eg. [244,245]
:arg type: arr of ints
:returns: windspeed, winddir pressure and time array from CGRF data for
the times indicated.
windspeed is in m/s
winddir direction wind is blowing to in degrees measured
counterclockwise from East.
pressure is in Pa.
"""
u10 = []
v10 = []
pres = []
time = []
st_ar = arrow.Arrow.strptime(start, '%d-%b-%Y')
end_ar = arrow.Arrow.strptime(end, '%d-%b-%Y')
CGRF_path = '/ocean/dlatorne/MEOPAR/CGRF/NEMO-atmos/'
for r in arrow.Arrow.range('day', st_ar, end_ar):
mstr = "{0:02d}".format(r.month)
dstr = "{0:02d}".format(r.day)
# u
strU = 'u10_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc'
fU = NC.Dataset(CGRF_path+strU)
var = fU.variables['u_wind'][:, grid[0], grid[1]]
u10.extend(var[:])
# time
tim = fU.variables['time_counter']
time.extend(tim[:] + (r.day-st_ar.day)*24)
times = convert_date_hours(time, start)
# v
strV = 'v10_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc'
fV = NC.Dataset(CGRF_path+strV)
var = fV.variables['v_wind'][:, grid[0], grid[1]]
v10.extend(var[:])
# pressure
strP = 'slp_corr_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc'
fP = NC.Dataset(CGRF_path+strP)
var = fP.variables['atmpres'][:, grid[0], grid[1]]
pres.extend(var[:])
u10s = np.array(u10)
v10s = np.array(v10)
press = np.array(pres)
windspeed = np.sqrt(u10s**2+v10s**2)
winddir = np.arctan2(v10, u10) * 180 / np.pi
winddir = winddir + 360 * (winddir < 0)
return windspeed, winddir, press, times
def combine_data(data_list):
"""
This function combines output from a list of netcdf files into a
dict objects of model fields.
It is used for easy handling of output from thalweg and surge stations.
:arg data_list: dict object that contains the netcdf handles for the
files to be combined;
e.g. {'Thalweg1': f1, 'Thalweg2': f2,...}
where f1 = NC.Dataset('1h_Thalweg1.nc','r')
:type data_list: dict object
:returns: dict objects us, vs, lats, lons, sals, tmps, sshs
with the zonal velocity, meridional velocity, latitude,
longitude, salinity, temperature, and sea surface height
for each station.
The keys are the same as those in data_list.
For example, us['Thalweg1'] contains the zonal velocity
from the Thalweg 1 station.
"""
us = {}
vs = {}
lats = {}
lons = {}
sals = {}
tmps = {}
sshs = {}
for k in data_list:
net = data_list.get(k)
us[k] = net.variables['vozocrtx']
vs[k] = net.variables['vomecrty']
lats[k] = net.variables['nav_lat']
lons[k] = net.variables['nav_lon']
tmps[k] = net.variables['votemper']
sals[k] = net.variables['vosaline']
sshs[k] = net.variables['sossheig']
return us, vs, lats, lons, tmps, sals, sshs
def get_variables(fU, fV, fT, timestamp, depth):
"""
Generates masked u,v,SSH,S,T from NETCDF handles fU,fV,fT at timestamp and
depth.
:arg fU: netcdf handle for Ugrid model output
:type fU: netcdf handle
:arg fV: netcdf handle for Vgrid model output
:type fV: netcdf handle
:arg fT: netcdf handle for Tgrid model output
:type fT: netcdf handle
:arg timestamp: the timestamp for desired model output
:type timestamp: int
:arg depth: the model z-level for desired output
:type depth: int
:returns: masked arrays U,V,E,S,T with of zonal velocity,
meridional velocity, sea surface height, salinity, and temperature at
specified time and z-level.
"""
# get u and ugrid
u_vel = fU.variables['vozocrtx'] # u currents and grid
U = u_vel[timestamp, depth, :, :] # get data at specified level and time.
# mask u so that white is plotted on land points
mu = U == 0
U = np.ma.array(U, mask=mu)
# get v and v grid
v_vel = fV.variables['vomecrty'] # v currents and grid
V = v_vel[timestamp, depth, :, :] # get data at specified level and time.
# mask v so that white is plotted on land points
mu = V == 0
V = np.ma.array(V, mask=mu)
# grid for T points
eta = fT.variables['sossheig']
E = eta[timestamp, :, :]
mu = E == 0
E = np.ma.array(E, mask=mu)
sal = fT.variables['vosaline']
S = sal[timestamp, depth, :, :]
mu = S == 0
S = np.ma.array(S, mask=mu)
temp = fT.variables['votemper']
T = temp[timestamp, depth, :, :]
mu = T == 0
T = np.ma.array(T, mask=mu)
return U, V, E, S, T
def get_EC_observations(station, start_day, end_day):
"""Gather hourly Environment Canada (EC) weather observations for the
station and dates indicated.
The hourly EC data is stored in monthly files, so only a single month can
be downloaded at a time.
:arg station: Station name (no spaces). e.g. 'PointAtkinson'
:type station: str
:arg start_day: Start date in the format '01-Dec-2006'.
:type start_day: str
:arg end_day: End date in the format '01-Dec-2006'.
:type end_day: str
:returns: wind_speed, wind_dir, temperature, times, lat and lon:
wind speed is in m/s
wind_dir is direction wind is blowing to in degrees measured
counterclockwise from East
temperature is in Kelvin
time is UTC
Also returns latitude and longitude of the station.
"""
# These ids have been identified as interesting locations in the SoG.
# It is not necessarily a complete list.
station_ids = {
'Pam Rocks': 6817,
'Sisters Islet': 6813,
'Entrance Island': 29411,
'Sand Heads': 6831,
# NOTE: YVR station name changed in 2013. Older data use 889.
'YVR': 51442,
'YVR_old': 889,
'Point Atkinson': 844,
'Victoria': 10944,
'Campbell River': 145,
# NOTE: not exactly Patricia Bay. The EC name is Victoria Hartland CS
'<NAME>': 11007,
'Esquimalt': 52,
'Discovery Island': 27226,
'Race Rocks': 10943,
'Saturna Island': 96,
'Tsawwassen': 50228,
'Ballenas Islands': 138,
'Comox Airport': 155,
'Squamish Airport': 336,
}
# Create aliases to recognize places.py definitions
names = [
'Campbell River', 'Entrance Island', 'Pam Rocks', '<NAME>',
'Point Atkinson', 'Sand Heads', 'Sisters Islet',
]
aliases = [
'CampbellRiver', 'EntranceIsland', 'PamRocks', 'PatriciaBay',
'PointAtkinson', 'Sandheads', 'SistersIsland',
]
for alias, name in zip(aliases, names):
station_ids[alias] = station_ids[name]
st_ar = arrow.Arrow.strptime(start_day, '%d-%b-%Y')
end_ar = arrow.Arrow.strptime(end_day, '%d-%b-%Y')
PST = tz.tzoffset("PST", -28800)
wind_spd, wind_dir, temp = [], [], []
url = 'http://climate.weather.gc.ca/climate_data/bulk_data_e.html'
query = {
'timeframe': 1,
'stationID': station_ids[station],
'format': 'xml',
'Year': st_ar.year,
'Month': st_ar.month,
'Day': 1,
}
response = requests.get(url, params=query)
tree = ElementTree.parse(BytesIO(response.content))
root = tree.getroot()
# read lat and lon
for raw_info in root.findall('stationinformation'):
lat = float(raw_info.find('latitude').text)
lon = float(raw_info.find('longitude').text)
# read data
raw_data = root.findall('stationdata')
times = []
for record in raw_data:
day = int(record.get('day'))
hour = int(record.get('hour'))
year = int(record.get('year'))
month = int(record.get('month'))
t = arrow.Arrow(year, month, day, hour, tzinfo=PST)
selectors = (
(day == st_ar.day - 1 and hour >= 16)
or
(day >= st_ar.day and day < end_ar.day)
or
(day == end_ar.day and hour < 16)
)
if selectors:
try:
wind_spd.append(float(record.find('windspd').text))
t.to('utc')
times.append(t.datetime)
except TypeError:
wind_spd.append(float('NaN'))
t.to('utc')
times.append(t.datetime)
try:
wind_dir.append(float(record.find('winddir').text) * 10)
except:
wind_dir.append(float('NaN'))
try:
temp.append(float(record.find('temp').text)+273)
except:
temp.append(float('NaN'))
wind_spd = np.array(wind_spd) * 1000 / 3600 # km/hr to m/s
wind_dir = -np.array(wind_dir)+270 # met. direction to cartesian angle
with np.errstate(invalid='ignore'):
wind_dir = wind_dir + 360 * (wind_dir < 0)
temp = np.array(temp)
for i in np.arange(len(times)):
times[i] = times[i].astimezone(tz.tzutc())
return wind_spd, wind_dir, temp, times, lat, lon
def get_SSH_forcing(boundary, date):
"""A function that returns the ssh forcing for the month of the date and
boundary indicated.
:arg str boundary: A string naming the boundary. e.g 'north' or 'west'
:arg str date: A string indicating the date of interest. e.g. '01-Dec-2006'.
The day needs to be the first day of the month.
:returns: ssh_forc, time_ssh: arrays of the ssh forcing values and
corresponding times
"""
date_arr = arrow.Arrow.strptime(date, '%d-%b-%Y')
year = date_arr.year
month = date_arr.month
month = "%02d" % (month,)
if boundary == 'north':
filen = 'sshNorth'
else:
filen = 'ssh'
ssh_path = '/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/' + \
boundary + '/ssh/' + filen + '_y' + str(year) + 'm' + str(month)\
+ '.nc'
fS = NC.Dataset(ssh_path)
ssh_forc = fS.variables['sossheig']
tss = fS.variables['time_counter'][:]
l = tss.shape[0]
t = np.linspace(0, l-1, l) # time array
time_ssh = convert_date_hours(t, date)
return ssh_forc, time_ssh
def dateParserMeasured2(s):
"""
converts string in %d-%b-%Y %H:%M:%S format Pacific time to a
datetime object UTC time.
"""
PST = tz.tzoffset("PST", -28800)
# convert the string to a datetime object
unaware = datetime.datetime.strptime(s, "%d-%b-%Y %H:%M:%S ")
# add in the local time zone (Canada/Pacific)
aware = unaware.replace(tzinfo=PST)
# convert to UTC
return aware.astimezone(tz.tzutc())
def dateParserMeasured(s):
"""
converts string in %Y/%m/%d %H:%M format Pacific time to a
datetime object UTC time.
"""
PST = tz.tzoffset("PST", -28800)
# convert the string to a datetime object
unaware = datetime.datetime.strptime(s, "%Y/%m/%d %H:%M")
# add in the local time zone (Canada/Pacific)
aware = unaware.replace(tzinfo=PST)
# convert to UTC
return aware.astimezone(tz.tzutc())
def load_tidal_predictions(filename):
"""Load tidal prediction from a file.
:arg str filename: The path and file name of a CSV file that contains
ttide tidal predictions generated by
:kbd:`get_ttide_8.m`.
:returns: ttide: Tidal predictions and mean sea level,
the mean component from the harmonic analysis.
:rtype: :py:class:`pandas.DataFrame`
"""
with open(filename) as f:
mycsv = list(csv.reader(f))
msl = float(mycsv[1][1])
ttide = pd.read_csv(
filename, skiprows=3, parse_dates=[0], date_parser=dateParserMeasured2)
ttide = ttide.rename(
columns={
'time ': 'time',
' pred_8 ': 'pred_8',
' pred_all ': 'pred_all',
})
return ttide, msl
def load_observations(start, end, location):
"""
Loads tidal observations from the DFO website using tidetools function
:arg start: a string representing the starting date of the observations.
:type start: string in format %d-%b-%Y
:arg end: a string representing the end date of the observations.
:type end: string in format %d-%b-%Y
:arg location: a string representing the location for observations
:type location: a string from the following - PointAtkinson, Victoria,
PatriciaBay, CampbellRiver
:returns: wlev_meas: a dict object with the water level measurements
reference to Chart Datum
"""
stations = {'PointAtkinson': 7795, 'Victoria': 7120, 'PatriciaBay': 7277,
'CampbellRiver': 8074}
statID_PA = stations[location]
filename = 'wlev_' + str(statID_PA) + '_' + start + '_' + end + '.csv'
tidetools.get_dfo_wlev(statID_PA, start, end)
wlev_meas = pd.read_csv(filename, skiprows=7, parse_dates=[0],
date_parser=dateParserMeasured)
wlev_meas = wlev_meas.rename(columns={'Obs_date': 'time',
'SLEV(metres)': 'slev'})
return wlev_meas
def observed_anomaly(ttide, wlev_meas, msl):
"""
Calculates the observed anomaly (water level obs - tidal predictions).
:arg ttide: A struc object that contains tidal predictions from
get_ttide_8.m
:type ttide: struc with dimensions time, pred_all, pred_8
:arg wlev_meas: A struc object with observations from DFO
:type wlev_meas: sruc with dimensions time, slev
:arg msl: The mean sea level from tidal predictions
:type msl: float
:returns: ssanomaly: the ssh anomaly (wlev_meas.slev-(ttide.pred_all+msl))
"""
ssanomaly = np.zeros(len(wlev_meas.time))
for i in np.arange(0, len(wlev_meas.time)):
# check that there is a corresponding time
# if any(wlev_pred.time == wlev_meas.time[i]):
ssanomaly[i] = (wlev_meas.slev[i] -
(ttide.pred_all[ttide.time == wlev_meas.time[i]] +
msl))
if not(ssanomaly[i]):
ssanomaly[i] = float('Nan')
return ssanomaly
def modelled_anomaly(sshs, location):
"""
Calculates the modelled ssh anomaly by finding the difference between a
simulation with all forcing and a simulation with tides only.
:arg sshs: A struc object with ssh data from all_forcing and tidesonly
model runs
:type sshs: struc with dimensions 'all_forcing' and 'tidesonly'
:arg location: string defining the desired location
:type location: string either "PointAtkinson", "Victoria", "PatriciaBay",
"CampbellRiver"
:returns: anom: the difference between all_forcing and tidesonly
"""
anom = (sshs['all_forcing'][location][:, 0, 0] -
sshs['tidesonly'][location][:, 0, 0])
return anom
def correct_model(ssh, ttide, sdt, edt):
"""
Adjusts model output by correcting for error in using only 8 constituents
:arg ssh: an array with model ssh data
:type ssh: array of numbers
:arg ttide: struc with tidal predictions. Assumes tidal predictions
are on the the hour can model output is on the 1/2 hour.
:type ttide: struc with dimension time, pred_all, pred_8
:arg sdt: datetime object representing start date of simulation
:type sdt: datetime object
:arg edt: datetime object representing end date of simulation
:type edt: datetime object
:returns: corr_model: the corrected model output
"""
# find index of ttide.time at start and end
inds = ttide.time[ttide.time == sdt].index[0]
inde = ttide.time[ttide.time == edt].index[0]
difference = ttide.pred_all-ttide.pred_8
difference = np.array(difference)
# average correction over two times to shift to the model 1/2 outputs
# question: should I reconsider this calculation by interpolating?
corr = 0.5*(difference[inds:inde] + difference[inds+1:inde+1])
corr_model = ssh+corr
return corr_model
def surge_tide(ssh, ttide, sdt, edt):
"""
Calculates the sea surface height from the model run with surge only.
That is, adds tidal prediction to modelled surge.
:arg ssh: shh from surge only model run
:type ssh: array of numbers
:arg ttide: struc with tidal predictions
:type ttide: struc with dimension time, pred_all, pred_8
:arg sdt: datetime object representing start date of simulation
:type sdt: datetime object
:arg edt: datetime object representing end date of simulation
:type edt: datetime object
:returns: surgetide: the surge only run with tides added
(mean not included)
"""
# find index of ttide.time at start and end
inds = ttide.time[ttide.time == sdt].index[0]
inde = ttide.time[ttide.time == edt].index[0]
# average correction over two times to shift to the model 1/2 outputs
tide = np.array(ttide.pred_all)
tide_corr = 0.5*(tide[inds:inde] + tide[inds+1:inde+1])
surgetide = ssh+tide_corr
return surgetide
def get_statistics(obs, model, t_obs, t_model, sdt, edt):
"""
Calculates several statistics, such as mean error, maximum value, etc.
for model and observations in a given time period.
:arg obs: observation data
:type obs: array
:arg model: model data
:type model: array
:arg t_obs: observations time
:type t_obs: array
:arg t_model: model time
:type t_model: array
:arg sdt: datetime object representing start date of analysis period
:type sdt: datetime object
:arg edt: datetime object representing end date of analysis period
:type edt: datetime object
:returns: max_obs, max_model, tmax_obs, tmax_model, mean_error,
mean_abs_error, rms_error, gamma2 (see <NAME> 2006),
correlation matrix, willmott score, mean_obs, mean_model,
std_obs, std_model
"""
# truncate model
trun_model, trun_tm = truncate(
model, t_model, sdt.replace(minute=30), edt.replace(minute=30))
trun_model = trun_model[:-1]
trun_tm = trun_tm[:-1]
# truncate and interpolate observations
obs_interp = interp_to_model_time(trun_tm, obs, t_obs)
# rebase observations
# rbase_obs, rbase_to = rebase_obs(trun_obs, trun_to)
error = trun_model-obs_interp
# calculate statistics
gamma2 = np.var(error)/np.var(obs_interp)
mean_error = np.mean(error)
mean_abs_error = np.mean(np.abs(error))
rms_error = _rmse(error)
corr = np.corrcoef(obs_interp, trun_model)
max_obs, tmax_obs = _find_max(obs_interp, trun_tm)
max_model, tmax_model = _find_max(trun_model, trun_tm)
mean_obs = np.mean(obs_interp)
mean_model = np.mean(trun_model)
std_obs = np.std(obs_interp)
std_model = np.std(trun_model)
ws = willmott_skill(obs_interp, trun_model)
return (
max_obs, max_model, tmax_obs, tmax_model, mean_error, mean_abs_error,
rms_error, gamma2, corr, ws, mean_obs, mean_model, std_obs, std_model,
)
def truncate(data, time, sdt, edt):
"""
Returns truncated array for the time period of interest
:arg data: data to be truncated
:type data: array
:arg time: time output associated with data
:type time: array
:arg sdt: datetime object representing start date of analysis period
:type sdt: datetime object
:arg edt: datetime object representing end date of analysis period
:type edt: datetime object
:returns: data_t, time_t, truncated data and time arrays
"""
inds = np.where(time == sdt)[0]
inde = np.where(time == edt)[0]
data_t = np.array(data[inds:inde + 1])
time_t = np.array(time[inds:inde + 1])
return data_t, time_t
def rebase_obs(data, time):
"""
Rebases the observations so that they are given on the half hour instead
of hour.
Half hour outputs calculated by averaging between two hourly outputs.
:arg data: data to be rebased
:type data: array
:arg time: time outputs associated with data
:type time: array
:returns: rebase_data, rebase_time, the data and times shifted by half an
hour
"""
rebase_data = 0.5*(data[1:]+data[:-1])
rebase_time = []
for k in range(time.shape[0]):
rebase_time.append(time[k].replace(minute=30))
rebase_time = np.array(rebase_time)
rebase_time = rebase_time[0:-1]
return rebase_data, rebase_time
def _rmse(diff):
return np.sqrt(np.mean(diff**2))
def _find_max(data, time):
max_data = np.nanmax(data)
time_max = time[np.nanargmax(data)]
return max_data, time_max
def willmott_skill(obs, model):
"""Calculates the Willmott skill score of the model. See Willmott 1982.
:arg obs: observations data
:type obs: array
:arg model: model data
:type model: array
:returns: ws, the Willmott skill score
"""
obar = np.nanmean(obs)
mprime = model - obar
oprime = obs - obar
diff_sq = np.sum((model-obs)**2)
add_sq = np.sum((np.abs(mprime) + np.abs(oprime))**2)
ws = 1-diff_sq/add_sq
return ws
def get_NOAA_wlev(station_no, start_date, end_date):
"""Download water level data from NOAA site for one NOAA station
for specified period.
:arg station_no: Station number e.g. 9443090.
:type station_no: int
:arg start_date: Start date; e.g. '01-JAN-2010'.
:type start_date: str
:arg end_date: End date; e.g. '31-JAN-2010'
:type end_date: str
:returns: Saves text file with water level data in meters at one station.
Time zone is UTC
"""
# Name the output file
outfile = ('wlev_' + str(station_no) + '_' + str(start_date) +
'_' + str(end_date) + '.csv')
# Form urls and html information
st_ar = arrow.Arrow.strptime(start_date, '%d-%b-%Y')
end_ar = arrow.Arrow.strptime(end_date, '%d-%b-%Y')
base_url = 'http://tidesandcurrents.noaa.gov'
form_handler = (
'/stationhome.html?id='
+ str(station_no))
data_provider = (
'/api/datagetter?product=hourly_height&application=NOS.COOPS.TAC.WL'
+ '&begin_date=' + st_ar.format('YYYYMMDD') + '&end_date='
+ end_ar.format('YYYYMMDD')
+ '&datum=MLLW&station='+str(station_no)
+ '&time_zone=GMT&units=metric&interval=h&format=csv')
# Go get the data from the DFO site
with requests.Session() as s:
s.post(base_url)
r = s.get(base_url + data_provider)
# Write the data to a text file
with open(outfile, 'w') as f:
f.write(r.text)
def get_NOAA_predictions(station_no, start_date, end_date):
"""Download tide predictions from NOAA site for one NOAA station
for specified period.
:arg int station_no: Station number e.g. 9443090.
:arg str start_date: Start date; e.g. '01-JAN-2010'.
:arg str end_date: End date; e.g. '31-JAN-2010'
:returns: Saves text file with predictions in meters at one station.
Time zone is UTC
"""
# Name the output file
outfile = ('predictions_' + str(station_no) + '_' + str(start_date) + '_'
+ str(end_date) + '.csv')
# Form urls and html information
st_ar = arrow.Arrow.strptime(start_date, '%d-%b-%Y')
end_ar = arrow.Arrow.strptime(end_date, '%d-%b-%Y')
base_url = 'http://tidesandcurrents.noaa.gov'
form_handler = (
'/stationhome.html?id='
+ str(station_no))
data_provider = (
'/api/datagetter?product=predictions&application=NOS.COOPS.TAC.WL'
+ '&begin_date=' + st_ar.format('YYYYMMDD') + '&end_date='
+ end_ar.format('YYYYMMDD')
+ '&datum=MLLW&station='+str(station_no)
+ '&time_zone=GMT&units=metric&interval=h&format=csv')
# Go get the data from the DFO site
with requests.Session() as s:
s.post(base_url)
r = s.get(base_url + data_provider)
# Write the data to a text file
with open(outfile, 'w') as f:
f.write(r.text)
def get_operational_weather(start, end, grid):
"""
Returns the operational weather between the dates start and end at
the grid point defined in grid.
:arg start: string containing the start date of the weather collection
in format '01-Nov-2006'
:type start: str
:arg start: string containing the end date of the weather collection in
format '01-Nov-2006'
:type start: str
:arg grid: array of the operational grid coordinates for the point of
interest eg. [244,245]
:arg type: arr of ints
:returns: windspeed, winddir pressure and time array from weather data
for the times indicated
wind speed is m/s
winddir is direction wind is blowing in degrees counterclockwise
from east
pressure is kPa
time is UTC
"""
u10 = []
v10 = []
pres = []
time = []
st_ar = arrow.Arrow.strptime(start, '%d-%b-%Y')
end_ar = arrow.Arrow.strptime(end, '%d-%b-%Y')
ops_path = '/ocean/sallen/allen/research/Meopar/Operational/'
opsp_path = '/ocean/nsoontie/MEOPAR/GEM2.5/ops/'
for r in arrow.Arrow.range('day', st_ar, end_ar):
mstr = "{0:02d}".format(r.month)
dstr = "{0:02d}".format(r.day)
fstr = 'ops_y' + str(r.year) + 'm' + mstr + 'd' + dstr + '.nc'
f = NC.Dataset(ops_path+fstr)
# u
var = f.variables['u_wind'][:, grid[0], grid[1]]
u10.extend(var[:])
# v
var = f.variables['v_wind'][:, grid[0], grid[1]]
v10.extend(var[:])
# pressure
fpstr = ('slp_corr_ops_y' + str(r.year) + 'm' + mstr + 'd' + dstr
+ '.nc')
fP = NC.Dataset(opsp_path+fpstr)
var = fP.variables['atmpres'][:, grid[0], grid[1]]
pres.extend(var[:])
# time
tim = f.variables['time_counter']
time.extend(tim[:])
times = convert_date_seconds(time, '01-Jan-1970')
u10s = np.array(u10)
v10s = np.array(v10)
press = np.array(pres)
windspeed = np.sqrt(u10s**2+v10s**2)
winddir = np.arctan2(v10, u10) * 180 / np.pi
winddir = winddir + 360 * (winddir < 0)
return windspeed, winddir, press, times
def interp_to_model_time(time_model, varp, tp):
"""
Interpolates a variable to model output times.
:arg model_time: array of model output times as datetime objects
:type model_time: array with datetimes
:arg varp: array of variable to be interpolated
:type varp: array
:arg tp: array of times associated with variable
:type tp: array
:returns: varp_interp, the variable interpolated to model_times
"""
# Strategy: convert times to seconds past a reference value.
# Use this as the independent variable in interpolation.
# Set epoc (reference) time.
epoc = time_model[0]
# Determine tp times wrt epc
tp_wrt_epoc = []
for t in tp:
tp_wrt_epoc.append((t-epoc).total_seconds())
# Interpolate observations to model times
varp_interp = []
for t in time_model:
mod_wrt_epoc = (t-epoc).total_seconds()
varp_interp.append(np.interp(mod_wrt_epoc, tp_wrt_epoc, varp))
return varp_interp
| StarcoderdataPython |
3305166 | <gh_stars>0
# Buscando os links da pagina pela tag <a>
import requests
from bs4 import BeautifulSoup
pagina = requests.get('http://www.uninove.br')
pagebs = BeautifulSoup(pagina.text, 'html.parser')
# encontra todos os links
uninove = pagebs.find_all('a')
for i in uninove:
print(i.prettify())
input('Tecle ENTER para sair...')
| StarcoderdataPython |
1637447 | """
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from aenum import Enum, extend_enum
from typing import Optional
class DataMeasure(Enum):
"""Data measure definitions
Enumeration of measures available through data APIs. Measures are facts that are usually quantities and that can
be aggregated over a defined period of time. For example: tradePrice and volume are measures
"""
ASK_PRICE = 'askPrice'
BID_PRICE = 'bidPrice'
HIGH_PRICE = 'highPrice'
MID_PRICE = 'midPrice'
LOW_PRICE = 'lowPrice'
OPEN_PRICE = 'openPrice'
CLOSE_PRICE = 'closePrice'
TRADE_PRICE = 'tradePrice'
SPOT_PRICE = 'spot'
VOLUME = 'volume'
ADJUSTED_ASK_PRICE = 'adjustedAskPrice'
ADJUSTED_BID_PRICE = 'adjustedBidPrice'
ADJUSTED_HIGH_PRICE = 'adjustedHighPrice'
ADJUSTED_LOW_PRICE = 'adjustedLowPrice'
ADJUSTED_OPEN_PRICE = 'adjustedOpenPrice'
ADJUSTED_CLOSE_PRICE = 'adjustedClosePrice'
ADJUSTED_TRADE_PRICE = 'adjustedTradePrice'
ADJUSTED_VOLUME = 'adjustedVolume'
IMPLIED_VOLATILITY = 'impliedVolatility'
VAR_SWAP = 'varSwap'
PRICE = 'price'
NAV_PRICE = 'navPrice'
SPREAD = 'spread'
NAV_SPREAD = 'navSpread'
IMPLIED_VOLATILITY_BY_DELTA_STRIKE = 'impliedVolatilityByDeltaStrike'
FORWARD_POINT = 'forwardPoint'
DIVIDEND_YIELD = 'Dividend Yield'
EARNINGS_PER_SHARE = 'Earnings per Share'
EARNINGS_PER_SHARE_POSITIVE = 'Earnings per Share Positive'
NET_DEBT_TO_EBITDA = 'Net Debt to EBITDA'
PRICE_TO_BOOK = 'Price to Book'
PRICE_TO_CASH = 'Price to Cash'
PRICE_TO_EARNINGS = 'Price to Earnings'
PRICE_TO_EARNINGS_POSITIVE = 'Price to Earnings Positive'
PRICE_TO_SALES = 'Price to Sales'
RETURN_ON_EQUITY = 'Return on Equity'
SALES_PER_SHARE = 'Sales per Share'
ONE_YEAR = '1y'
TWO_YEARS = '2y'
THREE_YEARS = '3y'
FORWARD = 'forward'
TRAILING = 'trailing'
def __repr__(self):
return self.value
@classmethod
def list_fundamentals(cls):
return [metric.value for metric in [cls.DIVIDEND_YIELD, cls.EARNINGS_PER_SHARE, cls.EARNINGS_PER_SHARE_POSITIVE,
cls.NET_DEBT_TO_EBITDA, cls.PRICE_TO_BOOK, cls.PRICE_TO_CASH,
cls.PRICE_TO_EARNINGS, cls.PRICE_TO_EARNINGS_POSITIVE,
cls.PRICE_TO_SALES, cls.RETURN_ON_EQUITY, cls.SALES_PER_SHARE]]
class DataDimension(Enum):
"""Data dimension definitions
Enumeration of dimensions available through data APIs. Dimensions describe or provide context to measures, and can
be used to select or group data. For example: ticker and exchange are dimensions
"""
ASSET_ID = 'assetId'
NAME = 'name'
RIC = 'ric'
TENOR = 'tenor'
STRIKE_REFERENCE = 'strikeReference'
RELATIVE_STRIKE = 'relativeStrike'
EXPIRATION_DATE = 'expirationDate'
UPDATE_TIME = 'updateTime'
class Fields(Enum):
"""Data field enumeration
Enumeration of fields available through data APIs
"""
@property
def unit(self) -> Optional[str]:
# TODO: Define units and look up appropriate unit for self
return None
for enum in DataMeasure:
extend_enum(Fields, enum.name, enum.value)
for enum in DataDimension:
extend_enum(Fields, enum.name, enum.value)
| StarcoderdataPython |
145256 | <gh_stars>1-10
import pytest, random
from string import ascii_lowercase
from random import seed, choice, randrange
from src.search import SearchBM, SearchKMP
@pytest.mark.parametrize("search", [SearchBM, SearchKMP])
def test_empty_pattern_0(search):
s = search("")
assert s.search("Hello, world!") == 0
@pytest.mark.parametrize("search", [SearchBM, SearchKMP])
def test_empty_pattern_1(search):
s = search("")
assert s.search("") == 0
@pytest.mark.parametrize("search", [SearchBM, SearchKMP])
def test_pattern_0(search):
s = search("a")
assert s.search("a") == 0
assert s.search("ba") == 1
assert s.search("bab") == 1
@pytest.mark.parametrize("search", [SearchBM, SearchKMP])
def test_pattern_1(search):
s = search("ABBA")
assert s.search("a") == 1
assert s.search("abba") == 4
assert s.search("ABBA") == 0
assert s.search("AABBA") == 1
assert s.search("ABABBA") == 2
@pytest.mark.parametrize("search", [SearchBM, SearchKMP])
def test_pattern_2(search):
s = search("ABC")
assert s.search("ABBC") == 4, s.search("ABBC")
@pytest.mark.parametrize("search", [SearchBM, SearchKMP])
@pytest.mark.parametrize("n", list(range(1, 100)))
@pytest.mark.parametrize("seed", list(range(3)))
def test_random(search, n, seed):
random.seed(seed)
txt = "".join(choice(ascii_lowercase) for i in range(n))
for i in range(10):
pat = txt[randrange(0, n) : randrange(0, n)]
s = search(pat)
assert s.search(txt) == txt.find(pat)
pat += "A"
s = search(pat)
assert s.search(txt) == len(txt)
| StarcoderdataPython |
6556954 | import os
for i in range(101):
cmd = "/Users/ahakim/research/gkeyll-project/gkeyllall/gkeyll/scripts/gkeplot.py -p s2-5m-2d-rt_q_%d.h5 -c 0 -t 'N_e t=%g' --dont-show --save -o s2-5m-2d-rt_numElc_%05d" % (i, 5*i, i)
print cmd
os.system(cmd)
| StarcoderdataPython |
1888158 | <reponame>coderdojo-banbridge/astro-pi-examples<filename>sense hat/buttons/pygame_test.py
#!/usr/bin/python
from sense_hat import SenseHat
import os
import time
import pygame # See http://www.pygame.org/docs
from pygame.locals import *
print("Press Escape to quit")
time.sleep(1)
pygame.init()
pygame.display.set_mode((640, 480))
sense = SenseHat()
sense.clear() # Blank the LED matrix
sense.set_rotation(270) # Flight orientation
# 0, 0 = Top left
# 7, 7 = Bottom right
UP_PIXELS = [[3, 0], [4, 0]]
DOWN_PIXELS = [[3, 7], [4, 7]]
LEFT_PIXELS = [[0, 3], [0, 4]]
RIGHT_PIXELS = [[7, 3], [7, 4]]
CENTRE_PIXELS = [[3, 3], [4, 3], [3, 4], [4, 4]]
def set_pixels(pixels, col):
for p in pixels:
sense.set_pixel(p[0], p[1], col[0], col[1], col[2])
# Joystick is turned 90 degrees clockwise for flight orientation
def handle_event(event, colour):
if event.key == pygame.K_DOWN:
set_pixels(LEFT_PIXELS, colour)
elif event.key == pygame.K_UP:
set_pixels(RIGHT_PIXELS, colour)
elif event.key == pygame.K_LEFT:
set_pixels(UP_PIXELS, colour)
elif event.key == pygame.K_RIGHT:
set_pixels(DOWN_PIXELS, colour)
elif event.key == pygame.K_RETURN:
set_pixels(CENTRE_PIXELS, colour)
elif event.key == pygame.K_u:
sense.show_letter("u", colour)
elif event.key == pygame.K_d:
sense.show_letter("d", colour)
elif event.key == pygame.K_l:
sense.show_letter("l", colour)
elif event.key == pygame.K_r:
sense.show_letter("r", colour)
elif event.key == pygame.K_a:
sense.show_letter("a", colour)
elif event.key == pygame.K_b:
sense.show_letter("b", colour)
running = True
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
handle_event(event, WHITE)
if event.type == KEYUP:
handle_event(event, BLACK)
pygame.quit()
| StarcoderdataPython |
11249056 | <gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy(<EMAIL>)
"""
for github
"""
import requests
from .exceptions import GitException
class Git(object):
domain = None
proxy = None
proxies = {}
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
if self.proxy:
self.proxies = {
"http": self.proxy,
"https": self.proxy,
}
@classmethod
def _fmt_url(cls, path):
if not cls.domain:
raise GitException('git domain 不能为空')
return 'https://{domain}{path}'.format(domain = cls.domain, path = path)
@classmethod
def _get(cls, path):
url = cls._fmt_url(path)
return cls._request('get', url, proxies = proxies)
@classmethod
def _request(cls, method, url, proxies=None):
kw = {}
if proxies and isinstance(proxies, dict):
kw['proxies'] = proxies
return requests.request(method, url, **kw)
| StarcoderdataPython |
8068645 | import pickle
import time
from sklearn.metrics import f1_score, mean_absolute_error
import lightgbm as lgb
import cv2
import numpy as np
import pandas as pd
if __name__ == '__main__':
csvpath = r"datasets/datasets.csv"
csvdf = pd.read_csv(csvpath, header=0)
alldataX = csvdf[["calctype","col","row","convert_rate","iscoincidence","sobeld","jlxd","a1","a2","a3","a4","a5","a6","a7","a8",
"s1","s2","s3","s4","s5","s6","s7","s8","s9","s10","s11","s12","s13","s14","s15","s16","s17","s18","s19","s20"]]
alldataY = csvdf["Y"]
# 模型加载
# gbm = lgb.Booster(model_file=config.model_file[0])
with open('lgb.pickle', 'rb') as f:
gbm = pickle.load(f)
# 模型预测
start = time.time()
y_pred = gbm.predict(alldataX[:1].values)
print(time.time()-start)
score = mean_absolute_error(y_pred, alldataY[:1])
print(score)
| StarcoderdataPython |
1677260 | <reponame>ForrestPi/AnomalyDetection
import os
import torch
from torch.nn import functional as F
from dataset import return_MVTecAD_loader
from network import VAE,loss_function
import matplotlib.pyplot as plt
def train(model,train_loader,device,optimizer,epoch):
model.train()
train_loss = 0
for batch_idx, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch = model(data)
loss = loss_function(recon_batch, data, model.mu, model.logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
train_loss /= len(train_loader.dataset)
return train_loss
def eval(model,test_loader,device):
model.eval()
x_0 = iter(test_loader).next()
with torch.no_grad():
x_vae = model(x_0.to(device)).detach().cpu().numpy()
def EBM(model,test_loader,device):
model.train()
x_0 = iter(test_loader).next()
alpha = 0.05
lamda = 1
x_0 = x_0.to(device).clone().detach().requires_grad_(True)
recon_x = model(x_0).detach()
loss = F.binary_cross_entropy(x_0, recon_x, reduction='sum')
loss.backward(retain_graph=True)
x_grad = x_0.grad.data
x_t = x_0 - alpha * x_grad * (x_0 - recon_x) ** 2
for i in range(15):
recon_x = model(x_t).detach()
loss = F.binary_cross_entropy(x_t, recon_x, reduction='sum') + lamda * torch.abs(x_t - x_0).sum()
loss.backward(retain_graph=True)
x_grad = x_0.grad.data
eps = 0.001
x_t = x_t - eps * x_grad * (x_t - recon_x) ** 2
iterative_plot(x_t.detach().cpu().numpy(), i)
# gif
def iterative_plot(x_t, j):
plt.figure(figsize=(15, 4))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(x_t[i][0], cmap=plt.cm.gray)
plt.subplots_adjust(wspace=0., hspace=0.)
plt.savefig("./results/{}.png".format(j))
#plt.show()
def main():
train_loader = return_MVTecAD_loader(image_dir="./mvtec_anomaly_detection/grid/train/good/", batch_size=256, train=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
seed = 42
out_dir = './logs'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
checkpoints_dir ="./checkpoints"
if not os.path.exists(checkpoints_dir):
os.mkdir(out_dir)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
model = VAE(z_dim=512).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
num_epochs = 500
for epoch in range(num_epochs):
loss = train(model=model,train_loader=train_loader,device=device,optimizer=optimizer,epoch=epoch)
print('epoch [{}/{}], train loss: {:.4f}'.format(epoch + 1,num_epochs,loss))
if (epoch+1) % 10 == 0:
torch.save(model.state_dict(), os.path.join(checkpoints_dir,"{}.pth".format(epoch+1)))
test_loader = return_MVTecAD_loader(image_dir="./mvtec_anomaly_detection/grid/test/metal_contamination/", batch_size=10, train=False)
eval(model=model,test_loader=test_loader,device=device)
EBM(model,test_loader,device)
if __name__ == "__main__":
main() | StarcoderdataPython |
1728320 | <reponame>vahndi/quant-survey
from itertools import product
from pandas import DataFrame
from probability.distributions import BetaBinomial, BetaBinomialConjugate
from tqdm import tqdm
from typing import List, Tuple
from survey import Survey
from survey.attributes import SingleCategoryAttribute
from survey.custom_types import Categorical
from survey.experiments import ExperimentResult
from survey.questions import SingleChoiceQuestion
from survey.respondents import RespondentGroup
class SingleToSingleExperimentMixin(object):
_survey: Survey
_dependent: Categorical
_independent: Categorical
_dependent_getter: str
_independent_getter: str
_dependent_names: str
_independent_names: str
_results: List[ExperimentResult]
def set_values(self, survey: Survey,
dependent: Categorical,
independent: Categorical):
"""
Set dependent and independent values for the Experiment.
"""
if type(dependent) is str:
dependent = getattr(survey, self._dependent_getter)(dependent)
if dependent is None:
raise ValueError(
f'Error - {dependent.name} is not a '
f'{self._dependent.__name__} in the survey.'
)
self._dependent = dependent
if type(independent) is str:
independent = getattr(survey, self._independent_getter)(independent)
if independent is None:
raise ValueError(
f'Error - {independent.name} is not a '
f'{self._independent.__name__} in the survey.'
)
self._independent = independent
self._results = []
def calculate(
self,
exp_ind_values: List[str],
exp_dep_values: List[str],
ctl_ind_values: List[str],
ctl_dep_values: List[str]
) -> Tuple[BetaBinomial, BetaBinomial]:
"""
Calculate the probability that the number of responses from the
experimental group in `exp_answers` is significantly higher than the
number of responses from the control group in `ctl_answers`.
N.B. to assess the effect of respondent attributes, `exp_answers` and
`ctl_answers` should be identical.
:param exp_ind_values: The answers given by the experimental group to
the independent question.
:param exp_dep_values: The answers to the dependent question to count in
the experimental group.
:param ctl_ind_values: The answers given by the control group to the
independent question.
:param ctl_dep_values: The answers to the dependent question to count in
the control group.
"""
# find n and k for experimental respondent and answer group
n_exp = self._survey.count_responses(
question=self._dependent,
condition_category=self._independent,
condition_values=exp_ind_values
)
k_exp = self._survey.count_responses(
question=self._dependent, answers=exp_dep_values,
condition_category=self._independent,
condition_values=exp_ind_values
)
# find n and k for control respondent and answer group
n_ctl = self._survey.count_responses(
question=self._dependent,
condition_category=self._independent,
condition_values=ctl_ind_values
)
k_ctl = self._survey.count_responses(
question=self._dependent, answers=ctl_dep_values,
condition_category=self._independent,
condition_values=ctl_ind_values
)
# create beta-binomial distribution for each group
bb_exp = BetaBinomialConjugate(alpha=1, beta=1, n=n_exp, m=k_exp)
bb_ctl = BetaBinomialConjugate(alpha=1, beta=1, n=n_ctl, m=k_ctl)
# calculate probability of superiority of test group
return bb_ctl, bb_exp
def run(self, show_progress: bool = True):
"""
Analyze responses to a question between respondents with different
values of an attribute. Leads to statements of the form "respondents
with an attribute value of X are Y% more likely to answer Z than
respondents with an attribute value of ~X" e.g. "men are 50% more likely
to switch plans than women".
:param show_progress: Whether to show a tqdm progress bar of the
calculations.
"""
self._results = []
# iterate over groups of respondent independent and dependent answer
# choices
iterator = list(product(
self._independent.group_pairs(ordered=self._independent.ordered),
self._dependent.group_pairs(ordered=self._dependent.ordered)
))
if show_progress:
iterator = tqdm(iterator)
for (
(answers_ind_exp, answers_ind_ctl),
(answers_dep_exp, answers_dep_ctl)
) in iterator:
bb_ctl, bb_exp = self.calculate(
exp_ind_values=answers_ind_exp, exp_dep_values=answers_dep_exp,
ctl_ind_values=answers_ind_ctl, ctl_dep_values=answers_dep_exp
)
# compile result data
result = ExperimentResult(
survey=self._survey,
ctl_group=RespondentGroup(
respondent_values={self._independent.name: answers_ind_ctl},
response_values={self._dependent.name: answers_dep_exp}
),
exp_group=RespondentGroup(
respondent_values={self._independent.name: answers_ind_exp},
response_values={self._dependent.name: answers_dep_exp}
),
ctl_dist=bb_ctl,
exp_dist=bb_exp
)
self._results.append(result)
def results_data(
self, group_values: bool, join_str: str = ' || '
) -> DataFrame:
"""
Return a DataFrame of the experiment results.
:param group_values: Whether to group values into single columns rather
than creating a boolean column for each value.
:param join_str: String to join grouped values with.
"""
ind_name = self._independent.name
results_list = []
for result in self._results:
ind_values_exp = result.exp_group.respondent_values[
self._independent.name]
ind_values_ctl = result.ctl_group.respondent_values[
self._independent.name]
dep_values_exp = result.exp_group.response_values[
self._dependent.name]
dep_values_ctl = [val for val in self._dependent.category_names
if val not in dep_values_exp]
if group_values:
result_dict = dict(
survey_name=self._survey.name,
survey_question=self._dependent.name,
attribute_name=self._independent.name
)
result_dict[
f'{self._independent_names}_exp'
] = join_str.join(ind_values_exp)
result_dict[
f'{self._independent_names}_ctl'
] = join_str.join(ind_values_ctl)
result_dict[
f'{self._dependent_names}_exp'
] = join_str.join(dep_values_exp)
result_dict[
f'{self._dependent_names}_ctl'
] = join_str.join(dep_values_ctl)
result_dict['p_superior'] = result.prob_ppd_superior()
result_dict['effect_mean'] = result.effect_mean
result_dict['exp_mean'] = result.exp_mean
result_dict['ctl_mean'] = result.ctl_mean
else:
result_dict = {
('survey', 'name'): self._survey.name,
('survey', 'question'): self._dependent.name,
}
for attr in ind_values_exp:
result_dict[(ind_name, attr)] = True
for attr in ind_values_ctl:
result_dict[(ind_name, attr)] = False
for answer in dep_values_exp:
result_dict[('answer', answer)] = True
for answer in dep_values_ctl:
result_dict[('answer', answer)] = False
result_dict[
('result', 'p_superior')
] = result.prob_ppd_superior()
result_dict[('result', 'effect_mean')] = result.effect_mean
result_dict[('result', 'exp_mean')] = result.exp_mean
result_dict[('result', 'ctl_mean')] = result.ctl_mean
results_list.append(result_dict)
# compile respondent category results
if group_values:
results = DataFrame(results_list)[
['survey_name', 'survey_question', 'attribute_name'] +
['ind_answers_exp', 'ind_answers_ctl'] +
['dep_answers_exp', 'dep_answers_ctl'] +
['p_superior', 'effect_mean', 'exp_mean', 'ctl_mean']
]
else:
results = DataFrame(results_list)[
[('survey', 'name'), ('survey', 'question')] +
[(ind_name, ind_answer)
for ind_answer in self._independent.category_names] +
[('answer', answer)
for answer in self._dependent.category_names] +
[('result', 'p_superior'), ('result', 'effect_mean'),
('result', 'exp_mean'), ('result', 'ctl_mean')]
]
return results
class SingleChoiceQuestionDependentMixin(object):
_dependent_type = SingleChoiceQuestion
_dependent_getter = 'single_choice_question'
_dependent_names = 'dep_answers'
class SingleChoiceQuestionIndependentMixin(object):
_independent_type = SingleChoiceQuestion
_independent_getter = 'single_choice_question'
_independent_names = 'ind_answers'
class SingleCategoryAttributeDependentMixin(object):
_dependent_type = SingleCategoryAttribute
_dependent_getter = 'categorical_attribute'
_dependent_names = 'dep_attr_vals'
class SingleCategoryAttributeIndependentMixin(object):
_independent_type = SingleCategoryAttribute
_independent_getter = 'categorical_attribute'
_independent_names = 'ind_attr_vals'
| StarcoderdataPython |
1822817 | <gh_stars>10-100
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at <EMAIL>.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from PySide import QtCore, QtGui
class SlidersGroup(QtGui.QGroupBox):
valueChanged = QtCore.Signal(int)
def __init__(self, orientation, title, parent=None):
super(SlidersGroup, self).__init__(title, parent)
self.slider = QtGui.QSlider(orientation)
self.slider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.slider.setTickPosition(QtGui.QSlider.TicksBothSides)
self.slider.setTickInterval(10)
self.slider.setSingleStep(1)
self.scrollBar = QtGui.QScrollBar(orientation)
self.scrollBar.setFocusPolicy(QtCore.Qt.StrongFocus)
self.dial = QtGui.QDial()
self.dial.setFocusPolicy(QtCore.Qt.StrongFocus)
self.slider.valueChanged[int].connect(self.scrollBar.setValue)
self.scrollBar.valueChanged[int].connect(self.dial.setValue)
self.dial.valueChanged[int].connect(self.slider.setValue)
self.dial.valueChanged[int].connect(self.valueChanged)
if orientation == QtCore.Qt.Horizontal:
direction = QtGui.QBoxLayout.TopToBottom
else:
direction = QtGui.QBoxLayout.LeftToRight
slidersLayout = QtGui.QBoxLayout(direction)
slidersLayout.addWidget(self.slider)
slidersLayout.addWidget(self.scrollBar)
slidersLayout.addWidget(self.dial)
self.setLayout(slidersLayout)
def setValue(self, value):
self.slider.setValue(value)
def setMinimum(self, value):
self.slider.setMinimum(value)
self.scrollBar.setMinimum(value)
self.dial.setMinimum(value)
def setMaximum(self, value):
self.slider.setMaximum(value)
self.scrollBar.setMaximum(value)
self.dial.setMaximum(value)
def invertAppearance(self, invert):
self.slider.setInvertedAppearance(invert)
self.scrollBar.setInvertedAppearance(invert)
self.dial.setInvertedAppearance(invert)
def invertKeyBindings(self, invert):
self.slider.setInvertedControls(invert)
self.scrollBar.setInvertedControls(invert)
self.dial.setInvertedControls(invert)
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
self.horizontalSliders = SlidersGroup(QtCore.Qt.Horizontal,
"Horizontal")
self.verticalSliders = SlidersGroup(QtCore.Qt.Vertical, "Vertical")
self.stackedWidget = QtGui.QStackedWidget()
self.stackedWidget.addWidget(self.horizontalSliders)
self.stackedWidget.addWidget(self.verticalSliders)
self.createControls("Controls")
self.horizontalSliders.valueChanged[int].connect(self.verticalSliders.setValue)
self.verticalSliders.valueChanged[int].connect(self.valueSpinBox.setValue)
self.valueSpinBox.valueChanged[int].connect(self.horizontalSliders.setValue)
layout = QtGui.QHBoxLayout()
layout.addWidget(self.controlsGroup)
layout.addWidget(self.stackedWidget)
self.setLayout(layout)
self.minimumSpinBox.setValue(0)
self.maximumSpinBox.setValue(20)
self.valueSpinBox.setValue(5)
self.setWindowTitle("Sliders")
def createControls(self, title):
self.controlsGroup = QtGui.QGroupBox(title)
minimumLabel = QtGui.QLabel("Minimum value:")
maximumLabel = QtGui.QLabel("Maximum value:")
valueLabel = QtGui.QLabel("Current value:")
invertedAppearance = QtGui.QCheckBox("Inverted appearance")
invertedKeyBindings = QtGui.QCheckBox("Inverted key bindings")
self.minimumSpinBox = QtGui.QSpinBox()
self.minimumSpinBox.setRange(-100, 100)
self.minimumSpinBox.setSingleStep(1)
self.maximumSpinBox = QtGui.QSpinBox()
self.maximumSpinBox.setRange(-100, 100)
self.maximumSpinBox.setSingleStep(1)
self.valueSpinBox = QtGui.QSpinBox()
self.valueSpinBox.setRange(-100, 100)
self.valueSpinBox.setSingleStep(1)
orientationCombo = QtGui.QComboBox()
orientationCombo.addItem("Horizontal slider-like widgets")
orientationCombo.addItem("Vertical slider-like widgets")
orientationCombo.activated[int].connect(self.stackedWidget.setCurrentIndex)
self.minimumSpinBox.valueChanged[int].connect(self.horizontalSliders.setMinimum)
self.minimumSpinBox.valueChanged[int].connect(self.verticalSliders.setMinimum)
self.maximumSpinBox.valueChanged[int].connect(self.horizontalSliders.setMaximum)
self.maximumSpinBox.valueChanged[int].connect(self.verticalSliders.setMaximum)
invertedAppearance.toggled.connect(self.horizontalSliders.invertAppearance)
invertedAppearance.toggled.connect(self.verticalSliders.invertAppearance)
invertedKeyBindings.toggled.connect(self.horizontalSliders.invertKeyBindings)
invertedKeyBindings.toggled.connect(self.verticalSliders.invertKeyBindings)
controlsLayout = QtGui.QGridLayout()
controlsLayout.addWidget(minimumLabel, 0, 0)
controlsLayout.addWidget(maximumLabel, 1, 0)
controlsLayout.addWidget(valueLabel, 2, 0)
controlsLayout.addWidget(self.minimumSpinBox, 0, 1)
controlsLayout.addWidget(self.maximumSpinBox, 1, 1)
controlsLayout.addWidget(self.valueSpinBox, 2, 1)
controlsLayout.addWidget(invertedAppearance, 0, 2)
controlsLayout.addWidget(invertedKeyBindings, 1, 2)
controlsLayout.addWidget(orientationCombo, 3, 0, 1, 3)
self.controlsGroup.setLayout(controlsLayout)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| StarcoderdataPython |
9606222 | <gh_stars>10-100
from sqlalchemy.orm import Session
from fastapi import Depends
from app.schemas import tenant as schemas
from app.models.tenant import Tenant, TenantAccount
from app.controllers.billing import stripe
from app.controllers.account import create_account
def get_tenant(db_session: Session, id: int):
return db_session.query(Tenant).filter(Tenant.id == id).first()
def get_tenants(db_session: Session, skip: int = 0, limit: int = 100):
return db_session.query(Tenant).offset(skip).limit(limit).all()
def add_account_to_tenant(db_session: Session, account_id, tenant_id):
"""Create relationship between tenant and account."""
tenant_account_obj = TenantAccount(tenant_id=tenant_id, account_id=account_id)
db_session.add(tenant_account_obj)
db_session.commit()
return tenant_account_obj
def create_tenant_and_account(
db_session: Session,
name: str,
slug: str,
first_name: str,
last_name: str,
email: str,
password: str,
is_active: bool = False,
is_verified: bool = False,
do_send_emails: bool = True
):
"""Create a tenant and an account."""
tenant_obj = Tenant(name=name, slug=slug, billing_email=email)
db_session.add(tenant_obj)
db_session.flush()
# New tenant = New Customer in stripe.
customer_resp = stripe.Customer.create(
email=email,
description="Customer for {}<{}>".format(name, email),
name=name,
metadata={"tenant_id": tenant_obj.id},
)
# Record the Customer ID from stripe.
tenant_obj.stripe_customer_id = customer_resp.get("id")
db_session.commit()
# Create account
account_obj = create_account(
db_session,
first_name,
last_name,
email,
password,
is_active=is_active,
is_verified=is_verified,
send_registration_email=do_send_emails
)
# Add relationship between account to tenant.
add_account_to_tenant(db_session, account_obj.id, tenant_obj.id)
db_session.refresh(tenant_obj)
return tenant_obj
def get_tenant_by_name(db_session: Session, name: str):
"""Get a tenant by name."""
return db_session.query(Tenant).filter(Tenant.name == name).first()
| StarcoderdataPython |
12822646 | <gh_stars>0
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2016 - Nokia Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import traceback as tb
from mistral.engine import tasks
from mistral.engine import workflow_handler as wf_handler
from mistral import exceptions as exc
from mistral.workbook import parser as spec_parser
from mistral.workflow import commands as wf_cmds
from mistral.workflow import states
"""Responsible for running tasks and handling results."""
LOG = logging.getLogger(__name__)
def run_task(wf_cmd):
"""Runs workflow task.
:param wf_cmd: Workflow command.
"""
task = _build_task_from_command(wf_cmd)
try:
task.run()
except exc.MistralException as e:
wf_ex = wf_cmd.wf_ex
task_spec = wf_cmd.task_spec
msg = (
"Failed to run task [wf=%s, task=%s]: %s\n%s" %
(wf_ex, task_spec.get_name(), e, tb.format_exc())
)
LOG.error(msg)
task.set_state(states.ERROR, msg)
wf_handler.fail_workflow(wf_ex, msg)
return
if task.is_completed():
wf_handler.check_workflow_completion(wf_cmd.wf_ex)
def on_action_complete(action_ex):
"""Handles action completion event.
:param action_ex: Action execution.
"""
task_ex = action_ex.task_execution
if not task_ex:
return
task_spec = spec_parser.get_task_spec(task_ex.spec)
wf_ex = task_ex.workflow_execution
task = _create_task(
wf_ex,
task_spec,
task_ex.in_context,
task_ex
)
try:
task.on_action_complete(action_ex)
except exc.MistralException as e:
task_ex = action_ex.task_execution
wf_ex = task_ex.workflow_execution
msg = ("Failed to handle action completion [wf=%s, task=%s,"
" action=%s]: %s\n%s" %
(wf_ex.name, task_ex.name, action_ex.name, e, tb.format_exc()))
LOG.error(msg)
task.set_state(states.ERROR, msg)
wf_handler.fail_workflow(wf_ex, msg)
return
if task.is_completed():
wf_handler.check_workflow_completion(wf_ex)
def fail_task(task_ex, msg):
task = _build_task_from_execution(task_ex)
task.set_state(states.ERROR, msg)
wf_handler.fail_workflow(task_ex.workflow_execution, msg)
def continue_task(task_ex):
task = _build_task_from_execution(task_ex)
# TODO(rakhmerov): Error handling.
task.run()
if task.is_completed():
wf_handler.check_workflow_completion(task_ex.workflow_execution)
def complete_task(task_ex, state, state_info):
task = _build_task_from_execution(task_ex)
# TODO(rakhmerov): Error handling.
task.complete(state, state_info)
if task.is_completed():
wf_handler.check_workflow_completion(task_ex.workflow_execution)
def _build_task_from_execution(task_ex, task_spec=None):
return _create_task(
task_ex.workflow_execution,
task_spec or spec_parser.get_task_spec(task_ex.spec),
task_ex.in_context,
task_ex
)
def _build_task_from_command(cmd):
if isinstance(cmd, wf_cmds.RunExistingTask):
task = _create_task(
cmd.wf_ex,
spec_parser.get_task_spec(cmd.task_ex.spec),
cmd.ctx,
cmd.task_ex
)
if cmd.reset:
task.reset()
return task
if isinstance(cmd, wf_cmds.RunTask):
task = _create_task(cmd.wf_ex, cmd.task_spec, cmd.ctx)
if cmd.is_waiting():
task.defer()
return task
raise exc.MistralError('Unsupported workflow command: %s' % cmd)
def _create_task(wf_ex, task_spec, ctx, task_ex=None):
if task_spec.get_with_items():
return tasks.WithItemsTask(wf_ex, task_spec, ctx, task_ex)
return tasks.RegularTask(wf_ex, task_spec, ctx, task_ex)
| StarcoderdataPython |
292765 | # Generated by Django 2.0.2 on 2018-06-26 09:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('linker', '0017_auto_20180625_2237'),
]
operations = [
migrations.RemoveField(
model_name='analysisupload',
name='species',
),
migrations.AddField(
model_name='analysisuploadspecies',
name='analysis_upload',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='linker.AnalysisUpload'),
preserve_default=False,
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.