id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1 value |
|---|---|---|
1648384 | #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple ElasticSearch-based accessor for tests and development."""
from __future__ import absolute_import
from __future__ import print_function
import collections
import datetime
import json
import logging
import os
import six
import elasticsearch
import elasticsearch_dsl
import time
from biggraphite import accessor as bg_accessor
from biggraphite import glob_utils as bg_glob
from biggraphite.drivers import _utils
from biggraphite.drivers import ttls
log = logging.getLogger(__name__)
# TODO:
# * Support metadata
# * Metrics
# * Directories
# * Add unit tests (with real ES)
# * Support data
# * Support dated indices
# * Handle timeouts, error
# * Implement repair
# * Implement clean
INDEX_DOC_TYPE = "_doc"
# TODO: Make that configurable (in a file), this will be particularly important
# for the number of shards and replicas.
INDEX_SETTINGS = {
"settings": {
"index": {
"number_of_shards": 3,
"number_of_replicas": 1,
"refresh_interval": "60s",
"translog": {
"sync_interval": "120s",
"durability": "async",
},
"search": {
"slowlog": {
"level": "info",
"threshold": {
"query": {
"debug": "2s",
"info": "5s",
},
"fetch": {
"debug": "200ms",
"info": "500ms",
},
}
}
}
},
},
"mappings": {
INDEX_DOC_TYPE: {
"properties": {
"depth": {"type": "long"},
"created_on": {"type": "date"},
"read_on": {"type": "date"},
"updated_on": {"type": "date"},
"name": {
"type": "keyword",
"ignore_above": 1024,
},
"uuid": {
"type": "keyword",
},
"config": {
"type": "object",
# TODO: describe existing fields with more details.
},
},
# Additional properties (such as path components) or labels
# TODO: have a specific dynamic mapping for labels using "match"
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": True,
}
}
}
]
},
},
}
DEFAULT_INDEX = "biggraphite_metrics"
DEFAULT_INDEX_SUFFIX = "_%Y-%m-%d"
DEFAULT_HOSTS = ["127.0.0.1"]
DEFAULT_PORT = 9200
DEFAULT_TIMEOUT = 10
DEFAULT_USERNAME = os.getenv("BG_ELASTICSEARCH_USERNAME")
DEFAULT_PASSWORD = os.getenv("BG_ELASTICSEARCH_PASSWORD")
MAX_QUERY_SIZE = 10000
OPTIONS = {
"username": str,
"password": str,
"index": str,
"index_suffix": str,
"hosts": _utils.list_from_str,
"port": int,
"timeout": float,
}
def add_argparse_arguments(parser):
"""Add ElasticSearch arguments to an argparse parser."""
parser.add_argument(
"--elasticsearch_index",
metavar="NAME",
help="elasticsearch index.",
default=DEFAULT_INDEX,
)
parser.add_argument(
"--elasticsearch_index_suffix",
metavar="NAME",
help="elasticsearch index suffix. Supports strftime format.",
default=DEFAULT_INDEX_SUFFIX,
)
parser.add_argument(
"--elasticsearch_username", help="elasticsearch username.", default=None
)
parser.add_argument(
"--elasticsearch_password", help="elasticsearch password.", default=None
)
parser.add_argument(
"--elasticsearch_hosts",
metavar="HOST[,HOST,...]",
help="Hosts used for discovery.",
default=DEFAULT_HOSTS,
)
parser.add_argument(
"--elasticsearch_port",
metavar="PORT",
type=int,
help="The native port to connect to.",
default=DEFAULT_PORT,
)
parser.add_argument(
"--elasticsearch_timeout",
metavar="TIMEOUT",
type=int,
help="elasticsearch query timeout in seconds.",
default=DEFAULT_TIMEOUT,
)
def _components_from_name(metric_name):
res = metric_name.split(".")
return list(filter(None, res))
def document_from_metric(metric):
"""Creates an ElasticSearch document from a Metric."""
config = metric.metadata.as_string_dict()
components = _components_from_name(metric.name)
name = bg_accessor.sanitize_metric_name(metric.name)
data = {
"depth": len(components) - 1,
"name": name,
}
for i, component in enumerate(components):
data["p%d" % i] = component
data.update({
"uuid": metric.id,
"created_on": datetime.datetime.now(),
"updated_on": datetime.datetime.now(),
"read_on": None,
"config": config,
})
return data
class Error(bg_accessor.Error):
"""Base class for all exceptions from this module."""
class InvalidArgumentError(Error, bg_accessor.InvalidArgumentError):
"""Callee did not follow requirements on the arguments."""
def _parse_wildcard_component(component):
"""Given a complex component, this builds a wildcard constraint."""
value = ""
for subcomponent in component:
if isinstance(subcomponent, bg_glob.AnySequence):
value += "*"
elif isinstance(subcomponent, six.string_types):
value += subcomponent
elif isinstance(subcomponent, bg_glob.AnyChar):
value += '?'
else:
raise Error("Unhandled type '%s'" % subcomponent)
return value
def _parse_regexp_component(component):
"""Given a complex component, this builds a regexp constraint."""
if isinstance(component, bg_glob.Globstar):
return ".*"
regex = ""
for subcomponent in component:
if isinstance(subcomponent, bg_glob.Globstar):
regex += ".*"
elif isinstance(subcomponent, bg_glob.AnySequence):
regex += "[^.]*"
elif isinstance(subcomponent, six.string_types):
regex += subcomponent
elif isinstance(subcomponent, bg_glob.CharNotIn):
regex += '[^' + ''.join(subcomponent.values) + ']'
elif isinstance(subcomponent, bg_glob.CharIn):
regex += '[' + ''.join(subcomponent.values) + ']'
elif isinstance(subcomponent, bg_glob.SequenceIn):
if subcomponent.negated:
regex += '[^.]*'
else:
regex += '(' + '|'.join(subcomponent.values) + ')'
elif isinstance(subcomponent, bg_glob.AnyChar):
regex += '[^.]'
else:
raise Error("Unhandled type '%s'" % subcomponent)
return regex
def parse_complex_component(component):
"""Given a complex component, this builds a constraint."""
if all([
any([
isinstance(sub_c, bg_glob.AnySequence),
isinstance(sub_c, bg_glob.AnyChar),
isinstance(sub_c, six.string_types),
]) for sub_c in component
]):
return 'wildcard', _parse_wildcard_component(component)
return 'regexp', _parse_regexp_component(component)
def _contains_regexp_wildcard(values):
return any("*" in value for value in values)
def parse_simple_component(component):
"""Given a component with a simple type, this builds a constraint."""
value = component[0]
if isinstance(value, bg_glob.AnySequence):
return None, None # No constrain
elif isinstance(value, six.string_types):
return 'term', value
elif isinstance(value, bg_glob.CharNotIn):
return 'regexp', '[^' + ''.join(value.values) + ']'
elif isinstance(value, bg_glob.CharIn):
return 'regexp', '[' + ''.join(value.values) + ']'
elif isinstance(value, bg_glob.SequenceIn):
if _contains_regexp_wildcard(value.values):
return 'regexp', '(' + '|'.join(value.values) + ')'
else:
return 'terms', value.values
elif isinstance(value, bg_glob.AnyChar):
return 'wildcard', '?'
else:
raise Error("Unhandled type '%s'" % value)
def _get_depth_from_components(components):
return len(components) - 1
def _raise_unsupported():
raise NotImplementedError("Elasticsearch accessor does not support data operations")
class _ElasticSearchAccessor(bg_accessor.Accessor):
"""A ElasticSearch acessor that doubles as a ElasticSearch MetadataCache."""
Row = collections.namedtuple(
"Row", ["time_start_ms", "offset", "shard", "value", "count"]
)
Row0 = collections.namedtuple("Row", ["time_start_ms", "offset", "value"])
def __init__(
self,
hosts=DEFAULT_HOSTS,
port=DEFAULT_PORT,
index=DEFAULT_INDEX,
index_suffix=DEFAULT_INDEX_SUFFIX,
username=DEFAULT_USERNAME,
password=<PASSWORD>,
timeout=DEFAULT_TIMEOUT,
updated_on_ttl_sec=ttls.DEFAULT_UPDATED_ON_TTL_SEC,
read_on_ttl_sec=ttls.DEFAULT_READ_ON_TTL_SEC,
):
"""Create a new ElasticSearchAccessor."""
super(_ElasticSearchAccessor, self).__init__("ElasticSearch")
self._hosts = list(hosts)
self._port = port
self._index_prefix = index
self._index_suffix = index_suffix
self._username = username
self._password = password
self._timeout = timeout
self._known_indices = {}
self.__glob_parser = bg_glob.GraphiteGlobParser()
self.__updated_on_ttl_sec = updated_on_ttl_sec
self.__read_on_ttl_sec = read_on_ttl_sec
self.client = None
log.debug(
"Created Elasticsearch accessor with index prefix: '%s' and index suffix: '%s'" %
(self._index_prefix, self._index_suffix)
)
def connect(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).connect(*args, **kwargs)
self._connect()
self.is_connected = True
def _connect(self):
"""Connect to elasticsearch."""
if self.is_connected:
return
if self._username:
http_auth = (self._username, self._password or "")
else:
http_auth = None
kwargs = {
'sniff_on_start': True,
'sniff_on_connection_fail': True,
'retry_on_timeout': True,
'max_retries': 3,
'timeout': self._timeout,
}
if self._port:
kwargs['port'] = self._port
if http_auth:
kwargs['http_auth'] = http_auth
es = elasticsearch.Elasticsearch(
self._hosts,
**kwargs
)
log.info("Connected: %s" % es.info())
self.client = es
def shutdown(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).shutdown(*args, **kwargs)
self._shutdown()
self.is_connected = False
def _shutdown(self):
"""Shutdown Elasticsearch client."""
if self.client:
self.client.transport.close()
self.client = None
def background(self):
"""Perform periodic background operations."""
pass
def flush(self):
"""Flush any internal buffers."""
if self.client:
self.client.indices.flush(
index="%s*" % self._index_prefix,
allow_no_indices=True,
ignore_unavailable=True,
wait_if_ongoing=True,
)
self.client.indices.refresh(
index="%s*" % self._index_prefix,
allow_no_indices=True,
ignore_unavailable=True,
)
def clear(self):
"""Clear all internal data."""
self._known_indices = {}
def get_index(self, metric):
"""Get the index where a metric should be stored."""
# Here the index could be sharded further by looking at the
# metric metadata, for example, per owner.
index_name = self._index_prefix + datetime.datetime.now().strftime(self._index_suffix)
if index_name not in self._known_indices:
if not self.client.indices.exists(index=index_name):
self.client.indices.create(
index=index_name,
body=INDEX_SETTINGS,
ignore=409
)
self.client.indices.flush()
self._known_indices[index_name] = True
return index_name
def insert_points_async(self, metric, datapoints, on_done=None):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).insert_points_async(
metric, datapoints, on_done
)
_raise_unsupported()
def insert_downsampled_points_async(self, metric, datapoints, on_done=None):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).insert_downsampled_points_async(
metric, datapoints, on_done
)
_raise_unsupported()
def drop_all_metrics(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).drop_all_metrics(*args, **kwargs)
# Drop indices.
self.client.indices.delete("%s*" % self._index_prefix)
def create_metric(self, metric):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).create_metric(metric)
index_name = self.get_index(metric)
self.client.create(
index=index_name,
doc_type=INDEX_DOC_TYPE,
id=metric.id,
body=document_from_metric(metric),
ignore=409,
)
def update_metric(self, name, updated_metadata):
"""See bg_accessor.Accessor."""
super(_ElasticSearchAccessor, self).update_metric(name, updated_metadata)
name = bg_accessor.sanitize_metric_name(name)
metric = self.get_metric(name)
if metric is None:
raise InvalidArgumentError("Unknown metric '%s'" % name)
updated_metric = self.make_metric(
name,
updated_metadata,
created_on=metric.created_on,
updated_on=datetime.datetime.now(),
read_on=metric.read_on
)
self.create_metric(updated_metric)
def delete_metric(self, name):
name = bg_accessor.sanitize_metric_name(name)
query = self._create_search_query() \
.filter('term', name=name)
log.debug(json.dumps(query.to_dict()))
query.delete()
def delete_directory(self, name):
components = _components_from_name(name)
depth = _get_depth_from_components(components)
query = self._create_search_query()
for index, component in enumerate(components):
query = query.filter('term', **{"p%d" % index: component})
query = query.filter('range', depth={'gte': depth})
log.debug(json.dumps(query.to_dict()))
query.delete()
# TODO (t.chataigner) Add unittest.
def _search_metrics_from_components(self, glob, components):
search = self._create_search_query().source('name')
# Handle glob with globstar(s).
globstars = components.count(bg_glob.Globstar())
if globstars:
name_regexp = "\\.".join([_parse_regexp_component(c) for c in components])
return True, search.filter('regexp', **{"name": name_regexp})
# TODO (t.chataigner) Handle fully defined prefix (like a.b.c.*.*.*)
# with a wildcard on name.
# Handle fully defined glob.
if self.__glob_parser.is_fully_defined(components):
return False, search.filter(
'term', **{"name": bg_accessor.sanitize_metric_name(glob)})
# Handle all other use cases.
for i, c in enumerate(components):
if len(c) == 1:
filter_type, value = parse_simple_component(c)
else:
filter_type, value = parse_complex_component(c)
if filter_type:
search = search.filter(filter_type, **{"p%d" % i: value})
return False, search
def glob_metric_names(self, glob):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).glob_metric_names(glob)
if glob == "":
return []
components = self.__glob_parser.parse(glob)
glob_depth = _get_depth_from_components(components)
has_globstar, search = self._search_metrics_from_components(glob, components)
if has_globstar:
search = search.filter('range', depth={'gte': glob_depth})
else:
search = search.filter('term', depth=glob_depth)
search = search.extra(from_=0, size=MAX_QUERY_SIZE)
# TODO (t.chataigner) try to move the sort in the ES search and return a generator.
log.debug(json.dumps(search.to_dict()))
results = [h.name for h in search.execute()]
results.sort()
return iter(results)
def glob_directory_names(self, glob):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).glob_directory_names(glob)
if glob == "":
return []
components = self.__glob_parser.parse(glob)
# There are no "directory" documents, only "metric" documents. Hence appending the
# AnySequence after the provided glob: we search for metrics under that path.
has_globstar, search = self._search_metrics_from_components(
glob,
components + [[bg_glob.AnySequence()]]
)
if has_globstar:
# TODO (t.chataigner) Add a log or raise exception.
return []
glob_depth = _get_depth_from_components(components)
# Use (glob_depth + 1) to filter only directories and
# exclude metrics whose depth is glob_depth.
search = search.filter('range', depth={'gte': glob_depth + 1})
search = search.extra(from_=0, size=0) # Do not return metrics.
search.aggs.bucket('distinct_dirs', 'terms', field="p%d" % glob_depth, size=MAX_QUERY_SIZE)
log.debug(json.dumps(search.to_dict()))
response = search.execute()
# This may not be the same behavior as other drivers.
# It returns the glob with the list of possible last component for a directory.
# It doesn't return the list of fully defined directory names.
if "distinct_dirs" not in response.aggregations:
# This happend when there is no index to search for the query.
return []
buckets = response.aggregations.distinct_dirs.buckets
if glob_depth == 0:
results = [b.key for b in buckets]
else:
glob_base = glob.rsplit('.', 1)[0]
results = ["%s.%s" % (glob_base, b.key) for b in buckets]
results.sort()
return iter(results)
def has_metric(self, metric_name):
"""See bg_accessor.Accessor."""
super(_ElasticSearchAccessor, self).has_metric(metric_name)
return self.get_metric(metric_name) is not None
def get_metric(self, metric_name):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).get_metric(metric_name)
metric_name = bg_accessor.sanitize_metric_name(metric_name)
document = self.__get_document(metric_name)
if document is None:
return None
return self._document_to_metric(document)
def _document_to_metric(self, document):
metadata = bg_accessor.MetricMetadata.from_string_dict(
document.config.to_dict()
)
# TODO: Have a look at dsl doc to avoid parsing strings to dates
# https://github.com/elastic/elasticsearch-dsl-py/blob/master/docs/persistence.rst
return self.make_metric(
document.name,
metadata,
created_on=ttls.str_to_datetime(document.created_on),
updated_on=ttls.str_to_datetime(document.updated_on),
read_on=ttls.str_to_datetime(document.read_on)
)
def __get_document(self, metric_name):
search = self._create_search_query() \
.source(['uuid', 'name', 'config', 'created_on', 'updated_on', 'read_on']) \
.filter('term', name=metric_name) \
.sort({'updated_on': {'order': 'desc'}})
log.debug(json.dumps(search.to_dict()))
response = search[:1].execute()
if response is None or response.hits.total == 0:
return None
return response.hits[0]
def fetch_points(self, metric, time_start, time_end, stage, aggregated=True):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).fetch_points(
metric, time_start, time_end, stage
)
self.__update_read_on_on_need(metric)
return []
def touch_metric(self, metric):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).touch_metric(metric)
metric_name = bg_accessor.sanitize_metric_name(metric.name)
document = self.__get_document(metric_name)
if not document.updated_on:
delta = self.__updated_on_ttl_sec + 1
else:
updated_on_timestamp = ttls.str_to_timestamp(document.updated_on)
delta = int(time.time()) - int(updated_on_timestamp)
if delta >= self.__updated_on_ttl_sec:
self.__touch_document(document)
def __touch_document(self, document):
metric = self._document_to_metric(document)
new_index = self.get_index(metric)
if new_index == document.meta.index:
self.__update_existing_document(document)
else:
self.create_metric(metric)
def __update_existing_document(self, document):
index = document.meta.index
document_id = document.uuid
updated_on = datetime.datetime.now()
data = {
"doc": {
"updated_on": updated_on
}
}
self.__update_document(data, index, document_id)
document.updated_on = ttls.datetime_to_str(updated_on)
def repair(self, *args, **kwargs):
"""See the real Accessor for a description."""
super(_ElasticSearchAccessor, self).repair(*args, **kwargs)
callback_on_progress = kwargs.pop("callback_on_progress")
def _callback(m, i, t):
callback_on_progress(i, t)
# TODO Implements the function
log.warn("%s is not implemented" % self.repair.__name__)
self.map(_callback, *args, **kwargs)
def clean(self, *args, **kwargs):
"""See bg_accessor.Accessor."""
super(_ElasticSearchAccessor, self).clean(*args, **kwargs)
callback_on_progress = kwargs.pop("callback_on_progress")
kwargs.pop("max_age", None)
def _callback(m, i, t):
callback_on_progress(i, t)
# TODO Implements the function
log.warn("%s is not implemented" % self.clean.__name__)
self.map(_callback, *args, **kwargs)
def map(
self, callback, start_key=None, end_key=None, shard=0, nshards=1, errback=None
):
"""See bg_accessor.Accessor."""
super(_ElasticSearchAccessor, self).map(
callback, start_key, end_key, shard, nshards, errback
)
# TODO: implement
log.warn("map is not implemented")
metrics = []
total = len(metrics)
for i, metric in enumerate(metrics):
callback(metric, i, total)
def __update_read_on_on_need(self, metric):
if not metric.read_on:
delta = self.__read_on_ttl_sec + 1
else:
read_on_timestamp = ttls.str_to_timestamp(metric.read_on)
delta = int(time.time()) - int(read_on_timestamp)
if delta >= self.__read_on_ttl_sec:
# TODO: execute asynchronously
self.__update_read_on(metric)
def __update_read_on(self, metric):
# TODO: state if we should move the document from its index to
# the current (today) index
data = {
"doc": {
"read_on": datetime.datetime.now()
}
}
index = self.get_index(metric.name)
self.__update_document(data, index, metric.id)
def __update_document(self, data, index, document_id):
self.client.update(
index=index,
doc_type=INDEX_DOC_TYPE,
id=document_id,
body=data,
ignore=404
)
def _create_search_query(self):
return elasticsearch_dsl.Search() \
.using(self.client) \
.index("%s*" % self._index_prefix)
def build(*args, **kwargs):
"""Return a bg_accessor.Accessor using ElasticSearch."""
return _ElasticSearchAccessor(*args, **kwargs)
| StarcoderdataPython |
4842868 | import pymongo.results
import pytest
from aiohttp.test_utils import make_mocked_coro
import virtool.db.core
import virtool.utils
@pytest.fixture
def create_test_collection(mocker, test_motor):
def func(
name="samples", projection=None, silent=False
) -> virtool.db.core.Collection:
processor = make_mocked_coro(return_value={"id": "foo", "mock": True})
return virtool.db.core.Collection(
name, test_motor[name], mocker.stub(), processor, projection, silent
)
return func
class TestCollection:
@pytest.mark.parametrize("silent", [True, False])
async def test_enqueue_change(self, silent, create_test_collection):
"""
Test that `dispatch_conditionally` dispatches a message when not suppressed by the `silent`
parameter.
"""
collection = create_test_collection(silent=silent)
collection.enqueue_change("update", "foo", "bar")
if silent:
assert collection._enqueue_change.called is False
return
collection._enqueue_change.assert_called_with(
"samples", "update", ("foo", "bar")
)
@pytest.mark.parametrize("attr_silent", [True, False])
@pytest.mark.parametrize("param_silent", [True, False])
async def test_delete_many(
self, attr_silent, param_silent, test_motor, create_test_collection
):
collection = create_test_collection(silent=attr_silent)
await test_motor.samples.insert_many(
[
{"_id": "foo", "tag": 1},
{"_id": "bar", "tag": 2},
{"_id": "baz", "tag": 1},
]
)
delete_result = await collection.delete_many({"tag": 1}, silent=param_silent)
assert isinstance(delete_result, pymongo.results.DeleteResult)
assert delete_result.deleted_count == 2
if not (attr_silent or param_silent):
collection._enqueue_change.assert_called_with(
"samples", "delete", ("baz", "foo")
)
assert await test_motor.samples.find().to_list(None) == [
{"_id": "bar", "tag": 2}
]
@pytest.mark.parametrize("attr_silent", [True, False])
@pytest.mark.parametrize("param_silent", [True, False])
async def test_delete_one(
self, attr_silent, param_silent, test_motor, create_test_collection
):
collection = create_test_collection(silent=attr_silent)
await test_motor.samples.insert_many(
[
{"_id": "foo", "tag": 1},
{"_id": "bar", "tag": 2},
{"_id": "baz", "tag": 1},
]
)
delete_result = await collection.delete_one({"tag": 1}, silent=param_silent)
assert isinstance(delete_result, pymongo.results.DeleteResult)
assert delete_result.deleted_count == 1
if not (attr_silent or param_silent):
collection._enqueue_change.assert_called_with("samples", "delete", ("foo",))
assert await test_motor.samples.find().to_list(None) == [
{"_id": "bar", "tag": 2},
{"_id": "baz", "tag": 1},
]
| StarcoderdataPython |
1642697 | import pytest
from brownie import interface, Contract
from utils.voting import create_vote
from utils.config import (lido_dao_voting_address,
lido_dao_agent_address,
balancer_deployed_manager,
lido_dao_token_manager_address,
ldo_token_address)
from utils.evm_script import encode_call_script
def test_erc_20_recover_via_voting(
ldo_holder,
rewards_contract,
dao_treasury,
helpers,
accounts,
dao_voting,
ldo_token,
stranger
):
agent_contract = interface.Agent(lido_dao_agent_address)
ldo_token.transfer(rewards_contract, 10**18, {"from": dao_treasury})
balance = ldo_token.balanceOf(rewards_contract)
assert balance > 0
encoded_recover_calldata = rewards_contract.recover_erc20.encode_input(ldo_token_address, balance, stranger)
recover_script = encode_call_script([(rewards_contract.address, encoded_recover_calldata)])
forwrded_script = encode_call_script([(lido_dao_agent_address, agent_contract.forward.encode_input(recover_script))])
(vote_id, _) = create_vote(
voting=interface.Voting(lido_dao_voting_address),
token_manager=interface.TokenManager(lido_dao_token_manager_address),
vote_desc='',
evm_script=forwrded_script,
tx_params={"from": ldo_holder})
helpers.execute_vote(vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting)
assert ldo_token.balanceOf(rewards_contract) == 0
assert ldo_token.balanceOf(stranger) == balance
| StarcoderdataPython |
1613208 | <reponame>suvit/speedydeploy
from webserver import * # XXX remove this file | StarcoderdataPython |
107561 | N, K = map(int, input().split())
A = list(map(int, input().split()))
bcs = [0] * 41
for i in range(N):
a = A[i]
for j in range(41):
if a & (1 << j) != 0:
bcs[j] += 1
X = 0
for i in range(40, -1, -1):
if bcs[i] >= N - bcs[i]:
continue
t = 1 << i
if X + t <= K:
X += t
result = 0
for i in range(N):
result += X ^ A[i]
print(result)
| StarcoderdataPython |
110151 | <filename>introduccion/time.py
import pygame
import sys
pygame.init()
width = 500
height = 400
surface = pygame.display.set_mode((width, height))
pygame.display.set_caption('Tiempo')
white = (255, 255, 255)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
time = pygame.time.get_ticks() // 1000 # segundos
print(time)
surface.fill(white)
pygame.display.update() | StarcoderdataPython |
119598 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm, t
np.random.seed(1)
#%%
N = 1_0
mu = 5
sd = 2
#%%
x = np.random.randn(N)*sd + mu
#%% Z-CI
mu_hat = x.mean()
sigma_hat = x.std(ddof=1)
z_left = norm.ppf(0.0250)
z_right = norm.ppf(0.9750)
left_ci = mu_hat + z_left*sigma_hat/np.sqrt(N)
right_ci = mu_hat + z_right*sigma_hat/np.sqrt(N)
print(f"Mean: {mu_hat}")
print(f"Confidence Interval is [{left_ci}, {right_ci}]")
#%% t-CI
mu_hat = x.mean()
sigma_hat = x.std(ddof=1)
t_left = t.ppf(0.0250, df=N-1)
t_right = t.ppf(0.9750, df=N-1)
left_ci = mu_hat + t_left*sigma_hat/np.sqrt(N)
right_ci = mu_hat + t_right*sigma_hat/np.sqrt(N)
print(f"Mean: {mu_hat}")
print(f"Confidence Interval is [{left_ci}, {right_ci}]")
#%%
def experiment():
x = np.random.randn(N) * sd + mu
mu_hat = x.mean()
sigma_hat = x.std(ddof=1)
t_left = t.ppf(0.0250, df=N - 1)
t_right = t.ppf(0.9750, df=N - 1)
left_ci = mu_hat + t_left * sigma_hat / np.sqrt(N)
right_ci = mu_hat + t_right * sigma_hat / np.sqrt(N)
return mu<right_ci and mu>left_ci
def multi_experiment(n):
results = [experiment() for _ in range(n)]
return np.mean(results)
#%%
result = multi_experiment(10_000)
print(result)
| StarcoderdataPython |
3385879 | <filename>Heap/BinaryHeap.py
# coding=utf-8
"""Min Binary Heap Python implementation."""
class MinBinaryHeap:
"""Min Binary Heap class."""
def __init__(self):
self.heap = []
def parent(self, i):
"""Get index of parent of node i."""
return (i - 1) // 2
def left_child(self, i):
"""Get index of left child of node i."""
return 2 * i + 1
def right_child(self, i):
"""Get index of right child of node i."""
return 2 * i + 2
def insert_key(self, k):
"""Insert key to heap."""
self.heap.append(k)
i = len(self.heap) - 1
while i != 0 and self.heap[self.parent(i)] > self.heap[i]:
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
def decrease_key(self, i, new_k):
"""Decrease node i to value new_k."""
self.heap[i] = new_k
while i != 0 and self.heap[self.parent(i)] > self.heap[self.parent(i)]:
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
def extract_min(self):
"""Remove minimal element from heap."""
if not len(self.heap):
return -1
elif len(self.heap) == 1:
return self.heap.pop(0)
else:
root = self.heap[0]
self.heap[0] = self.heap.pop(-1)
self.heapify(0)
return root
def heapify(self, i):
"""Heapify a subtree with root at index i."""
left = self.left_child(i)
right = self.right_child(i)
smallest = i
if left < len(self.heap) and self.heap[left] < self.heap[i]:
smallest = left
if right < len(self.heap) and self.heap[right] < self.heap[smallest]:
smallest = right
if smallest != i:
self.heap[i], self.heap[smallest] = self.heap[smallest], self.heap[i]
self.heapify(smallest)
def delete_key(self, i):
"""Delete key i."""
self.decrease_key(i, self.heap[0] - 1)
self.extract_min()
def get_min(self):
"""Get smallest element in heap."""
return self.heap[0]
if __name__ == "__main__":
mbh = MinBinaryHeap()
mbh.insert_key(3)
mbh.insert_key(2)
mbh.delete_key(1)
mbh.insert_key(15)
mbh.insert_key(5)
mbh.insert_key(4)
mbh.insert_key(45)
print(mbh.extract_min())
print(mbh.get_min())
mbh.decrease_key(2, 1)
print(mbh.get_min())
| StarcoderdataPython |
1713751 | <reponame>bletourmy/django-terms
# coding: utf-8
from __future__ import unicode_literals
import sys
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Model, CharField, TextField, BooleanField
from django.utils.translation import ugettext_lazy as _
from .managers import TermManager, CACHE_KEYS
def python_2_unicode_compatible(klass):
# Taken from django.utils.encoding
PY3 = sys.version_info[0] == 3
if not PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
@python_2_unicode_compatible
class Term(Model):
name = CharField(
_('name'), max_length=100, unique=True, help_text=_(
'Variants of the name can be specified with a “|” separator '
'(e.g. “name|names|to name”).'))
case_sensitive = BooleanField(_('case sensitive'), default=False)
definition = TextField(_('definition'), blank=True,
help_text=_('Accepts HTML tags.'))
url = CharField(_('link'), max_length=200, blank=True,
help_text=_('Address to which the term will redirect '
'(instead of redirecting to the definition).'))
objects = TermManager()
class Meta(object):
verbose_name = _('term')
verbose_name_plural = _('terms')
ordering = ('name',)
def __str__(self):
return self.original_name
def save(self, *args, **kwargs):
cache.delete_many(CACHE_KEYS)
super(Term, self).save(*args, **kwargs)
def get_absolute_url(self):
if self.url:
return self.url
return reverse('term', kwargs={'pk': self.pk})
def name_variants(self, variant_slice=slice(0, None)):
return self.name.replace('&', '&').split('|')[variant_slice]
@property
def original_name(self):
return self.name_variants(0)
| StarcoderdataPython |
1788782 | <reponame>ForrestPi/VAEGAN
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from model.modules import ConvBNLReLU, UpsampleNearestCBLR
class Encoder(nn.Module):
def __init__(self, nc, fmaps, latent_variable_size):
super().__init__()
self.nc = nc
self.fmaps = fmaps
self.latent_variable_size = latent_variable_size
self.layer1 = ConvBNLReLU(nc, fmaps, 4, 2, 1)
self.layer2 = ConvBNLReLU(fmaps, fmaps * 2, 4, 2, 1)
self.layer3 = ConvBNLReLU(fmaps * 2, fmaps * 4, 4, 2, 1)
self.layer4 = ConvBNLReLU(fmaps * 4, fmaps * 8, 4, 2, 1)
self.layer5 = ConvBNLReLU(fmaps * 8, fmaps * 8, 4, 2, 1)
self.fc1 = nn.Linear(fmaps * 8 * 4 * 4, latent_variable_size)
self.fc2 = nn.Linear(fmaps * 8 * 4 * 4, latent_variable_size)
def forward(self, x):
h1 = self.layer1(x)
h2 = self.layer2(h1)
h3 = self.layer3(h2)
h4 = self.layer4(h3)
h5 = self.layer5(h4)
h5 = h5.view(x.shape[0], -1)
mu = self.fc1(h5)
logvar = self.fc2(h5)
return mu, logvar
class Decoder(nn.Module):
def __init__(self, latent_variable_size, fmaps, nc, add_sigmoid=False):
super().__init__()
self.latent_variable_size = latent_variable_size
self.fmaps = fmaps
self.nc = nc
self.fc = nn.Sequential(
nn.Linear(latent_variable_size, fmaps*8*2*4*4),
nn.ReLU()
)
tch = fmaps * 8 * 2
self.layer5 = UpsampleNearestCBLR(tch, tch//2, 3, 1)
tch //= 2
self.layer4 = UpsampleNearestCBLR(tch, tch//2, 3, 1)
tch //=2
self.layer3 = UpsampleNearestCBLR(tch, tch // 2, 3, 1)
tch //= 2
self.layer2 = UpsampleNearestCBLR(tch, tch // 2, 3, 1)
tch //= 2
self.layer1 = UpsampleNearestCBLR(tch, tch // 2, 3, 1)
tch //= 2
self.out = nn.Sequential(
nn.Conv2d(tch, nc, 1)
)
if add_sigmoid:
self.out.add_module("sigmoid", nn.Sigmoid())
def forward(self, z):
h5 = self.fc(z)
h5 = h5.view(z.shape[0], -1, 4, 4)
h4 = self.layer5(h5)
h3 = self.layer4(h4)
h2 = self.layer3(h3)
h1 = self.layer2(h2)
x = self.layer1(h1)
x = self.out(x)
return x
class VAE(nn.Module):
def __init__(self, args):
super(VAE, self).__init__()
latent_variable_size = args.latent_variable_size
fmaps = args.fmaps
nc = args.nc
if args.rec_loss == "bce":
add_sigmoid = True
else:
add_sigmoid = False
self.nc = nc
self.fmaps = fmaps
self.latent_variable_size = latent_variable_size
self.encode = Encoder(nc, fmaps, latent_variable_size)
self.decode = Decoder(latent_variable_size, fmaps, nc, add_sigmoid)
def reparametrize(self, mu, logvar):
sigma = logvar.mul(0.5).exp()
eps = torch.randn_like(sigma)
return eps.mul(sigma).add_(mu)
def get_latent_var(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
return z
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
x_rec = self.decode(z)
return x_rec, mu, logvar
def save_model(self, model_dir="../model", model_name="vae.pkl"):
torch.save(self.state_dict(), os.path.join(model_dir, model_name))
return
def load_model(self, model_dir="../model", model_name="vae.pkl"):
self.load_state_dict(torch.load(os.path.join(model_dir, model_name)))
return | StarcoderdataPython |
13185 | <gh_stars>1-10
from Sender import Sender
from Receiver import Receiver
import scipy
import numpy as np
import scipy.io
import scipy.io.wavfile
import matplotlib.pyplot as plt
from scipy import signal
def readFromFile(path):
file = open(path, "rb")
data = file.read()
file.close()
return data
def readWav(file_name) -> np.ndarray:
rate, data = scipy.io.wavfile.read(file_name)
if data.dtype == np.int16:
return data.astype(np.float32, order='C') / 32768.0
return data
testData = readWav('testbitsnopilots.wav')
subset = readWav('wrongbitstest.wav')
r = Receiver()
rate = 160
corr = 235292
offset = r.findOffsetToFirstChange(testData)
truncated = r.truncateToTauS(testData, offset)
plt.plot(testData[corr - len(subset)//2:corr + len(subset)//2])
plt.show()
plt.plot(subset)
plt.show()
plt.plot(truncated)
plt.show()
demod = r.demodulate(truncated, 1/16, 1/40)
result = []
start = 0
for i in range(20):
if i == 2:
a = 5
plt.plot(truncated[start: start + 10 * 36 * 160])
plt.show
a = 6
#part_demod = r.demodulate(truncated[start: start + 10*36 * 160], 1/16, 1/40)
#result.append(list(r.repdecode(part_demod, 10)))
start = start + 10*36*160
print('result', result)
print(demod)
print(len(demod[1:]))
print(repdecode(demod[1:], 10))
sender = Sender()
demod = repdecode(demod, 10)
expected = sender.getTestDataAsBits()
error_sum = np.sum(np.abs(expected - demod))
print('error sum', error_sum)
print('error weight', np.sum(expected - demod))
print('error percentage', error_sum / len(expected) * 100) | StarcoderdataPython |
1686374 | import os
from fast_align.generate_alignments import generate_word_alignments_fast_align
from mgiza.generate_alignments import generate_word_alignments_mgiza
from SimAlign.generate_alignments import generate_word_alignments_simalign
from awesome.generate_alignments import generate_word_alignments_awesome
from typing import Optional, List
from tokenization.conll2text import conll2text
from tokenization.utils import count_lines
from projection.annotation_proyection import dataset_projection
import argparse
def generate_alignments(
source_train: Optional[str],
source_dev: Optional[str],
source_test: Optional[str],
target_train: Optional[str],
target_dev: Optional[str],
target_test: Optional[str],
source_augmentation: Optional[str],
target_augmentation: Optional[str],
output_dir: str,
output_name: str,
do_fastalign: bool = False,
do_mgiza: bool = False,
do_simalign: bool = True,
do_awesome: bool = False,
remove_awesome_model: bool = True,
awesome_model_path: str = None,
):
"""
Generate word alignments for the given datasets.
:param str source_train: Path to the source language training dataset. A txt file, one sentence per line.
:param str source_dev: Path to the source language development dataset. A txt file, one sentence per line.
:param str source_test: Path to the source language test dataset. A txt file, one sentence per line.
:param str target_train: Path to the target language training dataset. A txt file, one sentence per line.
:param str target_dev: Path to the target language development dataset. A txt file, one sentence per line.
:param str target_test: Path to the target language test dataset. A txt file, one sentence per line.
:param str source_augmentation: Path to the source language augmentation dataset. A txt file, one sentence per line.
:param str target_augmentation: Path to the target language augmentation dataset. A txt file, one sentence per line.
:param str output_dir: Path to the output directory.
:param str output_name: Name of the output files
:param bool do_fastalign: Whether to generate word alignments with fastalign.
:param bool do_mgiza: Whether to generate word alignments with mgiza.
:param bool do_simalign: Whether to generate word alignments with simalign.
:param bool do_awesome: Whether to generate word alignments with awesome.
:param bool remove_awesome_model: Whether to remove the trained awesome model after the alignment generation.
:param str awesome_model_path: Path to a pretrained awesome model.
"""
# 1) Sanity checks
assert source_train or source_dev or source_test, f"Nothing to do"
assert target_train or target_dev or target_test, f"Nothing to do"
assert (source_train is not None and target_train is not None) or (
source_train is None and target_train is None
), f"Source train: {source_train}. Target train: {target_train}"
assert (source_dev is not None and target_dev is not None) or (
source_dev is None and target_dev is None
), f"Source dev: {source_dev}. Target dev: {target_dev}"
assert (source_test is not None and target_test is not None) or (
source_test is None and target_test is None
), f"Source test: {source_test}. Target test: {target_test}"
assert (source_augmentation is not None and target_augmentation is not None) or (
source_augmentation is None and target_augmentation is None
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Projection
source_paths: List[str] = []
target_paths: List[str] = []
if source_train:
source_paths.append(source_train)
target_paths.append(target_train)
if source_dev:
source_paths.append(source_dev)
target_paths.append(target_dev)
if source_test:
source_paths.append(source_test)
target_paths.append(target_test)
if do_mgiza:
output_names = []
if source_train:
output_names.append(output_name + ".mgiza.train")
if source_dev:
output_names.append(output_name + ".mgiza.dev")
if source_test:
output_names.append(output_name + ".mgiza.test")
print(
f"Generate word alignments Mgiza.\n"
f"Source paths: {source_paths}.\n"
f"Target paths: {target_paths}.\n"
f"source_parallel_corpus: {source_augmentation}.\n"
f"target_parallel_corpus: {target_augmentation}.\n"
f"Output names: {output_names}.\n"
f"Output_dir: {output_dir}.\n"
)
generate_word_alignments_mgiza(
source_paths=source_paths,
target_paths=target_paths,
source_parallel_corpus=[source_augmentation]
if source_augmentation
else None,
target_parallel_corpus=[target_augmentation]
if target_augmentation
else None,
output_names=output_names,
output_dir=output_dir,
)
if do_fastalign:
output_names = []
if source_train:
output_names.append(output_name + ".fast_align.train")
if source_dev:
output_names.append(output_name + ".fast_align.dev")
if source_test:
output_names.append(output_name + ".fast_align.test")
print(
f"Generate word alignments Fast Align.\n"
f"Source paths: {source_paths}.\n"
f"Target paths: {target_paths}.\n"
f"source_parallel_corpus: {source_augmentation}.\n"
f"target_parallel_corpus: {target_augmentation}.\n"
f"Output names: {output_names}.\n"
f"Output_dir: {output_dir}.\n"
)
generate_word_alignments_fast_align(
source_paths=source_paths,
target_paths=target_paths,
source_parallel_corpus=[source_augmentation]
if source_augmentation
else None,
target_parallel_corpus=[target_augmentation]
if target_augmentation
else None,
output_names=output_names,
output_dir=output_dir,
)
if do_simalign:
if source_train and target_train:
print(
f"Generate word alignments SimAlign. "
f"source_file: {source_train}. "
f"target_file: {target_train}. "
f"output: {os.path.join(output_dir, f'{output_name}.simalign.train')}"
)
generate_word_alignments_simalign(
source_file=source_train,
target_file=target_train,
output=os.path.join(output_dir, f"{output_name}.simalign.train"),
)
if source_dev and target_dev:
print(
f"Generate word alignments SimAlign. "
f"source_file: {source_dev}. "
f"target_file: {target_dev}. "
f"output: {os.path.join(output_dir, f'{output_name}.simalign.dev')}"
)
generate_word_alignments_simalign(
source_file=source_dev,
target_file=target_dev,
output=os.path.join(output_dir, f"{output_name}.simalign.dev"),
)
if source_test and target_test:
print(
f"Generate word alignments SimAlign. "
f"source_file: {source_test}. "
f"target_file: {target_test}. "
f"output: {os.path.join(output_dir, f'{output_name}.simalign.test')}"
)
generate_word_alignments_simalign(
source_file=source_test,
target_file=target_test,
output=os.path.join(output_dir, f"{output_name}.simalign.test"),
)
if do_awesome:
output_names = []
if source_train:
output_names.append(output_name + ".awesome.train.talp")
if source_dev:
output_names.append(output_name + ".awesome.dev.talp")
if source_test:
output_names.append(output_name + ".awesome.test.talp")
print(
f"Generate word alignments awesome.\n"
f"Source paths: {source_paths}.\n"
f"Target paths: {target_paths}.\n"
f"source_parallel_corpus: {source_augmentation}.\n"
f"target_parallel_corpus: {target_augmentation}.\n"
f"Output names: {output_names}.\n"
f"Output_dir: {output_dir}.\n"
)
generate_word_alignments_awesome(
source_paths=source_paths,
target_paths=target_paths,
source_parallel_corpus=[source_augmentation]
if source_augmentation
else None,
target_parallel_corpus=[target_augmentation]
if target_augmentation
else None,
output_names=output_names,
output_dir=output_dir,
remove_tmp_dir=False if awesome_model_path else remove_awesome_model,
tmp_dir=awesome_model_path,
)
def run_projection(
source_train: Optional[str],
source_dev: Optional[str],
source_test: Optional[str],
target_train: Optional[str],
target_dev: Optional[str],
target_test: Optional[str],
source_augmentation: Optional[str],
target_augmentation: Optional[str],
output_dir: str,
output_name: str,
do_fastalign: bool = False,
do_mgiza: bool = False,
do_simalign: bool = True,
do_awesome: bool = False,
remove_awesome_model: bool = True,
awesome_model_path: str = None,
):
"""
Perform annotation projection for the given datasets.
:param str source_train: Path to the source language training dataset. A tsv file.
:param str source_dev: Path to the source language development dataset. A tsv file.
:param str source_test: Path to the source language test dataset. A tsv file.
:param str target_train: Path to the target language training dataset. A txt file, one sentence per line.
:param str target_dev: Path to the target language development dataset. A txt file, one sentence per line.
:param str target_test: Path to the target language test dataset. A txt file, one sentence per line.
:param str source_augmentation: Path to the source language augmentation dataset. A txt file, one sentence per line.
:param str target_augmentation: Path to the target language augmentation dataset. A txt file, one sentence per line.
:param str output_dir: Path to the output directory.
:param str output_name: Name of the output files
:param bool do_fastalign: Whether to generate word alignments with fastalign.
:param bool do_mgiza: Whether to generate word alignments with mgiza.
:param bool do_simalign: Whether to generate word alignments with simalign.
:param bool do_awesome: Whether to generate word alignments with awesome.
:param bool remove_awesome_model: Whether to remove the trained awesome model after the alignment generation.
:param str awesome_model_path: Path to a pretrained awesome model.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
assert source_train or source_dev or source_test, f"Nothing to do"
assert target_train or target_dev or target_test, f"Nothing to do"
assert (source_train is not None and target_train is not None) or (
source_train is None and target_train is None
), f"Source train: {source_train}. Target train: {target_train}"
assert (source_dev is not None and target_dev is not None) or (
source_dev is None and target_dev is None
), f"Source dev: {source_dev}. Target dev: {target_dev}"
assert (source_test is not None and target_test is not None) or (
source_test is None and target_test is None
), f"Source test: {source_test}. Target test: {target_test}"
assert (source_augmentation is not None and target_augmentation is not None) or (
source_augmentation is None and target_augmentation is None
)
if source_train:
source_train_txt = os.path.join(
output_dir, os.path.basename(os.path.splitext(source_train)[0]) + ".txt"
)
conll2text(input_path=source_train, sentences_output_path=source_train_txt)
lines_source = count_lines(input_path=source_train_txt)
lines_target = count_lines(input_path=target_train)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_train_txt}): {lines_source}\n"
f"Target ({target_train}): {lines_target}"
)
else:
source_train_txt = None
if source_dev:
source_dev_txt = os.path.join(
output_dir, os.path.basename(os.path.splitext(source_dev)[0]) + ".txt"
)
conll2text(input_path=source_dev, sentences_output_path=source_dev_txt)
lines_source = count_lines(input_path=source_dev_txt)
lines_target = count_lines(input_path=target_dev)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_dev_txt}): {lines_source}\n"
f"Target ({target_dev}): {lines_target}"
)
else:
source_dev_txt = None
if source_test:
source_test_txt = os.path.join(
output_dir, os.path.basename(os.path.splitext(source_test)[0]) + ".txt"
)
conll2text(input_path=source_test, sentences_output_path=source_test_txt)
lines_source = count_lines(input_path=source_test_txt)
lines_target = count_lines(input_path=target_test)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_test_txt}): {lines_source}\n"
f"Target ({target_test}): {lines_target}"
)
else:
source_test_txt = None
if source_augmentation:
lines_source = count_lines(input_path=source_augmentation)
lines_target = count_lines(input_path=target_augmentation)
assert lines_source == lines_target, (
f"The number of lines in the source and target files are different.\n"
f"Source ({source_augmentation}): {lines_source}\n"
f"Target ({target_augmentation}): {lines_target}"
)
generate_alignments(
source_train=source_train_txt,
target_train=target_train,
source_dev=source_dev_txt,
target_dev=target_dev,
source_test=source_test_txt,
target_test=target_test,
source_augmentation=source_augmentation,
target_augmentation=target_augmentation,
output_dir=output_dir,
output_name=output_name,
do_fastalign=do_fastalign,
do_mgiza=do_mgiza,
do_simalign=do_simalign,
do_awesome=do_awesome,
remove_awesome_model=remove_awesome_model,
awesome_model_path=awesome_model_path,
)
alignment_list = []
if do_mgiza:
alignment_list.append("mgiza")
if do_fastalign:
alignment_list.append("fastalign")
if do_simalign:
alignment_list.append("simalign")
if do_awesome:
alignment_list.append("awesome")
dataset_list = []
if source_train:
dataset_list.append("train")
if source_dev:
dataset_list.append("dev")
if source_test:
dataset_list.append("test")
output_files: List[str] = []
for alignment_method in alignment_list:
for dataset_split in dataset_list:
if alignment_method == "mgiza" or alignment_method == "fast_align":
alignments_path = os.path.join(
output_dir,
f"{output_name}.{alignment_method}.{dataset_split}.grow_diag_final-and.talp",
)
elif alignment_method == "simalign":
alignments_path = os.path.join(
output_dir,
f"{output_name}.{alignment_method}.{dataset_split}.itermax.talp",
)
elif alignment_method == "awesome":
alignments_path = os.path.join(
output_dir,
f"{output_name}.{alignment_method}.{dataset_split}.talp",
)
else:
raise ValueError(f"{alignment_method} not supported")
if dataset_split == "train":
source_dataset = source_train
target_dataset = target_train
elif dataset_split == "dev":
source_dataset = source_dev
target_dataset = target_dev
elif dataset_split == "test":
source_dataset = source_test
target_dataset = target_test
else:
raise ValueError(f"{dataset_split} dataset split not supported")
dataset_projection(
source_dataset=source_dataset,
target_sentences=target_dataset,
alignments_path=alignments_path,
batch_size=10000,
output_path=os.path.join(
output_dir, f"{output_name}.{alignment_method}.{dataset_split}.tsv"
),
)
output_files.append(
os.path.join(
output_dir, f"{output_name}.{alignment_method}.{dataset_split}.tsv"
)
)
if source_train_txt:
os.remove(source_train_txt)
if source_dev_txt:
os.remove(source_dev_txt)
if source_test_txt:
os.remove(source_test_txt)
print("Done!")
print("Output files:")
print("\n".join(output_files))
print("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate alignments for a given dataset."
)
parser.add_argument(
"--source_train",
default=None,
type=str,
help="Path to the source training file. TSV format",
)
parser.add_argument(
"--target_train",
default=None,
type=str,
help="Path to the target training file. A txt file with one sentence per line",
)
parser.add_argument(
"--source_dev",
default=None,
type=str,
help="Path to the source development file. TSV format",
)
parser.add_argument(
"--target_dev",
default=None,
type=str,
help="Path to the target development file. A txt file with one sentence per line",
)
parser.add_argument(
"--source_test",
default=None,
type=str,
help="Path to the source test file. TSV format",
)
parser.add_argument(
"--target_test",
default=None,
type=str,
help="Path to the target test file. A txt file with one sentence per line",
)
parser.add_argument(
"--source_augmentation",
default=None,
type=str,
help="Path to the source augmentation file. A txt file with one sentence per line",
)
parser.add_argument(
"--target_augmentation",
default=None,
type=str,
help="Path to the target augmentation file. A txt file with one sentence per line",
)
parser.add_argument(
"--output_dir",
type=str,
help="Path to the output directory",
)
parser.add_argument(
"--output_name",
type=str,
help="Name of the output file",
)
parser.add_argument(
"--do_mgiza",
action="store_true",
help="Whether to generate alignments using mgiza",
)
parser.add_argument(
"--do_fastalign",
action="store_true",
help="Whether to generate alignments using fast_align",
)
parser.add_argument(
"--do_simalign",
action="store_true",
help="Whether to generate alignments using simalign",
)
parser.add_argument(
"--do_awesome",
action="store_true",
help="Whether to generate alignments using awesome",
)
parser.add_argument(
"--remove_awesome_model",
action="store_true",
help="Whether to remove the trained awesome model after the alignment is generated",
)
parser.add_argument(
"--awesome_model_path",
default=None,
type=str,
help="If provided, the path to a pretrained awesome model",
)
args = parser.parse_args()
run_projection(
source_train=args.source_train,
target_train=args.target_train,
source_dev=args.source_dev,
target_dev=args.target_dev,
source_test=args.source_test,
target_test=args.target_test,
source_augmentation=args.source_augmentation,
target_augmentation=args.target_augmentation,
output_dir=args.output_dir,
output_name=args.output_name,
do_mgiza=args.do_mgiza,
do_fastalign=args.do_fastalign,
do_simalign=args.do_simalign,
do_awesome=args.do_awesome,
remove_awesome_model=args.remove_awesome_model,
awesome_model_path=args.awesome_model_path,
)
| StarcoderdataPython |
3313616 | <filename>maintcont.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
The controller bot for maintainer.py
Exactly one instance should be running of it. To check, use /whois maintcont on irc.freenode.net
This script requires the Python IRC library http://python-irclib.sourceforge.net/
Warning: experimental software, use at your own risk
"""
__version__ = '$Id: f6562e1d518a0140f74d8a04add8d56d186fbfba $'
# Author: Balasyum
# http://hu.wikipedia.org/wiki/User:Balasyum
import externals
externals.check_setup('irclib')
from ircbot import SingleServerIRCBot
from irclib import nm_to_n
import threading
import time
import math
tasks = 'rciw|censure'
projtasks = {}
mainters = []
activity = {}
class MaintcontBot(SingleServerIRCBot):
def __init__(self, nickname, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
t = threading.Thread(target=self.lister)
t.setDaemon(True)
t.start()
def on_privmsg(self, c, e):
nick = nm_to_n(e.source())
c = self.connection
cmd = e.arguments()[0]
do = cmd.split()
if do[0] == "workerjoin":
c.privmsg(nick, "accepted")
mainters.append([nick, do[1]])
activity[nick] = time.time()
print "worker got, name:", nick, "job:", do[1]
self.retasker(do[1])
elif do[0] == "active":
activity[nick] = time.time()
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def lister(self):
while True:
print
print "worker list:"
for mainter in mainters:
if time.time() - activity[mainter[0]] > 30:
print "*", mainter[0], "has been removed"
mainters.remove(mainter)
del activity[mainter[0]]
self.retasker(mainter[1])
continue
print "mainter name:", mainter[0], "job:", mainter[1]
print "--------------------"
print
time.sleep(1*60)
def retasker(self, group, optask = ''):
ingroup = 0
for mainter in mainters:
if mainter[1] == group:
ingroup += 1
if ingroup == 0:
return
if group in projtasks:
grt = projtasks[group]
else:
grt = tasks
tpc = grt.split('|')
tpcn = round(len(tpc) / ingroup)
i = 0
for mainter in mainters:
if mainter[1] != group:
continue
tts = '|'.join(tpc[int(round(i * tpcn)):int(round((i + 1) * tpcn))])
if tts != False:
self.connection.privmsg(mainter[0], "tasklist " + tts)
i += 1
def main():
bot = MaintcontBot("maintcont", "irc.freenode.net")
bot.start()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3274073 | <gh_stars>1-10
import psycopg2
import os
import json
test_data = '''
{
"received": [
{
"from": {
"display": "mail.example.com",
"reverse": "Unknown",
"ip": "10.0.0.2"
},
"by": "mailsrv.example.com",
"protocol": "ESMTP",
"ssl": "(version=TLS1_2 cipher=ECDHE-ECDSA-AES128-SHA bits=128/128)",
"spf": true,
"dkim": true,
"dmarc": false
},
{
"from": {
"display": "mail.example.com",
"reverse": "Unknown",
"ip": "10.0.0.1"
},
"by": "mailsrv.example.com",
"protocol": "ESMTP",
"ssl": "(version=TLS1_2 cipher=ECDHE-ECDSA-AES128-SHA bits=128/128)",
"spf": true,
"dkim": true,
"dmarc": false
}
],
"attach": ["fuzzy hash of attachment1", "attachment2"],
"pattern": ["service name 1", "service name 2"],
"from": "",
"reply-to": "",
"subject": "hash"
}
'''
def json_string2dict(mail_json_str):
mail_json_dict = json.loads(mail_json_str)
return mail_json_dict
def compare_data(mail_json_dict):
db_url = os.environ['DATABASE_URL']
connection = psycopg2.connect(db_url, sslmode='require')
cur = connection.cursor()
if mail_json_dict.get("from","")!="":
cur.execute("SELECT COUNT(DISTINCT m_id) FROM overview WHERE from_header=%s;", (mail_json_dict["from"],))
results = cur.fetchall()
mail_json_dict["from"] = mail_json_dict["from"] + " (ヒット数:" + str(results[0][0]) + ")"
if mail_json_dict.get("reply-to","")!="":
cur.execute("SELECT COUNT(DISTINCT m_id) FROM overview WHERE reply_to=%s;", (mail_json_dict["reply-to"],))
results = cur.fetchall()
mail_json_dict["reply-to"] = mail_json_dict["reply-to"] + " (ヒット数:" + str(results[0][0]) + ")"
if mail_json_dict.get("subject","")!="":
cur.execute("SELECT COUNT(DISTINCT m_id) FROM overview WHERE subject=%s;", (mail_json_dict["subject"],))
results = cur.fetchall()
mail_json_dict["subject"] = mail_json_dict["subject"] + " (ヒット数:" + str(results[0][0]) + ")"
for pat in range(len(mail_json_dict["pattern"])):
cur.execute("SELECT COUNT(DISTINCT m_id) FROM pattern WHERE pattern=%s;", (mail_json_dict["pattern"][pat],))
results = cur.fetchall()
mail_json_dict["pattern"][pat] = mail_json_dict["pattern"][pat] + " (ヒット数:" + str(results[0][0]) + ")"
for att in range(len(mail_json_dict["attach"])):
cur.execute("SELECT COUNT(DISTINCT m_id) FROM attach WHERE attach=%s;", (mail_json_dict["attach"][att],))
results = cur.fetchall()
mail_json_dict["attach"][att] = mail_json_dict["attach"][att] + " (ヒット数:" + str(results[0][0]) + ")"
for receive in mail_json_dict.get("received"):
if receive.get("from",{}).get("display","")!="":
cur.execute("SELECT COUNT(DISTINCT m_id) FROM received WHERE from_display=%s;", (receive["from"]["display"],))
results = cur.fetchall()
receive["from"]["display"] = receive["from"]["display"] + " (ヒット数:" + str(results[0][0]) + ")"
if receive.get("from",{}).get("ip","")!="":
cur.execute("SELECT COUNT(DISTINCT m_id) FROM received WHERE from_ip=%s;", (receive["from"]["ip"],))
results = cur.fetchall()
receive["from"]["ip"] = receive["from"]["ip"] + " (ヒット数:" + str(results[0][0]) + ")"
if receive.get("by","")!="":
cur.execute("SELECT COUNT(DISTINCT m_id) FROM received WHERE by=%s;", (receive["by"],))
results = cur.fetchall()
receive["by"] = receive["by"] + " (ヒット数:" + str(results[0][0]) + ")"
cur.close()
return mail_json_dict
if __name__ == "__main__":
mail_json_dict = json_string2dict(test_data)
compare_data(mail_json_dict) | StarcoderdataPython |
1645794 | import numpy as np
import argparse
import config
import os
import datetime
import sys
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Lambda, Reshape, Layer
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
import tensorflow as tf
DIR_NAME = './data/rollout/'
SCREEN_SIZE_X = 64
SCREEN_SIZE_Y = 64
batch_size = 10
IM_DIM = 64
DEPTH = 32
LATENT_DEPTH = 512
K_SIZE = 5
def sampling(args):
mean, logsigma = args
epsilon = keras.backend.random_normal(shape=keras.backend.shape(mean))
return mean + tf.exp(logsigma / 2) * epsilon
def encoder():
input_E = keras.layers.Input(shape=(IM_DIM, IM_DIM, 3))
X = keras.layers.Conv2D(filters=DEPTH*2, kernel_size=K_SIZE, strides=2, padding='same')(input_E)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*4, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*8, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(LATENT_DEPTH)(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
mean = keras.layers.Dense(LATENT_DEPTH,activation="tanh")(X)
logsigma = keras.layers.Dense(LATENT_DEPTH,activation="tanh")(X)
latent = keras.layers.Lambda(sampling, output_shape=(LATENT_DEPTH,))([mean, logsigma])
kl_loss = 1 + logsigma - keras.backend.square(mean) - keras.backend.exp(logsigma)
kl_loss = keras.backend.mean(kl_loss, axis=-1)
kl_loss *= -0.5
return keras.models.Model(input_E, [latent,kl_loss])
def generator():
input_G = keras.layers.Input(shape=(LATENT_DEPTH,))
X = keras.layers.Dense(8*8*DEPTH*8)(input_G)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Reshape((8, 8, DEPTH * 8))(X)
X = keras.layers.Conv2DTranspose(filters=DEPTH*8, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2DTranspose(filters=DEPTH*4, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2DTranspose(filters=DEPTH, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=3, kernel_size=K_SIZE, padding='same')(X)
X = keras.layers.Activation('sigmoid')(X)
return keras.models.Model(input_G, X)
def discriminator():
input_D = keras.layers.Input(shape=(IM_DIM, IM_DIM, 3))
X = keras.layers.Conv2D(filters=DEPTH, kernel_size=K_SIZE, strides=2, padding='same')(input_D)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*4, kernel_size=K_SIZE, strides=2, padding='same')(input_D)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.Conv2D(filters=DEPTH*8, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*8, kernel_size=K_SIZE, padding='same')(X)
inner_output = keras.layers.Flatten()(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(DEPTH*8)(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
output = keras.layers.Dense(1)(X)
return keras.models.Model(input_D, [output, inner_output])
def import_data(N, M):
filelist = os.listdir(DIR_NAME)
filelist = [x for x in filelist if x != '.DS_Store']
filelist.sort()
length_filelist = len(filelist)
if length_filelist > N:
filelist = filelist[:N]
if length_filelist < N:
N = length_filelist
data = np.zeros((M*N, SCREEN_SIZE_X, SCREEN_SIZE_Y, 3), dtype=np.float32)
idx = 0
file_count = 0
for file in filelist:
try:
new_data = np.load(DIR_NAME + file)['obs']
data[idx:(idx + M), :, :, :] = new_data
idx = idx + M
file_count += 1
if file_count%50==0:
print('Imported {} / {} ::: Current data size = {} observations'.format(file_count, N, idx))
except Exception as e:
print(e)
print('Skipped {}...'.format(file))
print('Imported {} / {} ::: Current data size = {} observations'.format(file_count, N, idx))
return data, N
E = encoder()
G = generator()
D = discriminator()
lr=0.001
#lr=0.0001
E_opt = keras.optimizers.Adam(lr=lr)
G_opt = keras.optimizers.Adam(lr=lr)
D_opt = keras.optimizers.Adam(lr=lr)
inner_loss_coef = 1
normal_coef = 0.1
kl_coef = 0.5
@tf.function
def train_step_vaegan(x):
lattent_r = tf.random.normal((100, LATENT_DEPTH))
with tf.GradientTape(persistent=True) as tape :
lattent,kl_loss = E(x)
fake = G(lattent)
dis_fake,dis_inner_fake = D(fake)
dis_fake_r,_ = D(G(lattent_r))
dis_true,dis_inner_true = D(x)
vae_inner = dis_inner_fake-dis_inner_true
vae_inner = vae_inner*vae_inner
mean,var = tf.nn.moments(E(x)[0], axes=0)
var_to_one = var - 1
normal_loss = tf.reduce_mean(mean*mean) + tf.reduce_mean(var_to_one*var_to_one)
kl_loss = tf.reduce_mean(kl_loss)
vae_diff_loss = tf.reduce_mean(vae_inner)
f_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(dis_fake), dis_fake))
r_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(dis_fake_r), dis_fake_r))
t_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(tf.ones_like(dis_true), dis_true))
gan_loss = (0.5*t_dis_loss + 0.25*f_dis_loss + 0.25*r_dis_loss)
vae_loss = tf.reduce_mean(tf.abs(x-fake))
E_loss = vae_diff_loss + kl_coef*kl_loss + normal_coef*normal_loss
G_loss = inner_loss_coef*vae_diff_loss - gan_loss
D_loss = gan_loss
E_grad = tape.gradient(E_loss,E.trainable_variables)
G_grad = tape.gradient(G_loss,G.trainable_variables)
D_grad = tape.gradient(D_loss,D.trainable_variables)
del tape
E_opt.apply_gradients(zip(E_grad, E.trainable_variables))
G_opt.apply_gradients(zip(G_grad, G.trainable_variables))
D_opt.apply_gradients(zip(D_grad, D.trainable_variables))
return [gan_loss, vae_loss, f_dis_loss, r_dis_loss, t_dis_loss, vae_diff_loss, E_loss, D_loss, kl_loss, normal_loss]
def main(args):
new_model = args.new_model
N = int(args.N)
M = int(args.time_steps)
epochs = int(args.epochs)
try:
data, N = import_data(N, M)
except:
print('NO DATA FOUND')
raise
if not new_model:
try:
D.load_weights("./saved-models/D_training_.h5")
E.load_weights("./saved-models/E_training_.h5")
G.load_weights("./saved-models/G_training_.h5")
except:
print("Either set --new_model or ensure ./vae/weights.h5 exists")
raise
print('DATA SHAPE = {}'.format(data.shape))
step = 0
max_step = 100
log_freq = 1
metrics_names = ["gan_loss", "vae_loss", "fake_dis_loss", "r_dis_loss", "t_dis_loss", "vae_inner_loss", "E_loss", "D_loss", "kl_loss", "normal_loss"]
metrics = []
for m in metrics_names :
metrics.append(tf.keras.metrics.Mean('m', dtype=tf.float32))
def save_model():
D.save('saved-models/D_training_' + '.h5')
G.save('saved-models/G_training_' + '.h5')
E.save('saved-models/E_training_' + '.h5')
def print_metrics():
s = ""
for name,metric in zip(metrics_names,metrics) :
s+= " " + name + " " + str(np.around(metric.result().numpy(), 3))
print(f"\rStep : " + str(step) + " " + s, end="", flush=True)
for metric in metrics :
metric.reset_states()
for i in range(2000,5001,100):
step+=1
if not i % log_freq :
print_metrics()
results = train_step_vaegan(data[i-100:i])
for metric,result in zip(metrics, results) :
metric(result)
save_model()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Train VAE'))
parser.add_argument('--N',default = 10000, help='number of episodes to use to train')
parser.add_argument('--new_model', action='store_true', help='start a new model from scratch?')
parser.add_argument('--time_steps', type=int, default=300,
help='how many timesteps at start of episode?')
parser.add_argument('--epochs', default = 10, help='number of epochs to train for')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
1724857 | <gh_stars>1-10
from __future__ import division
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix
from scipy.linalg import blas
from pyscf.nao.m_sparsetools import csr_matvec, csc_matvec, csc_matvecs
import math
def chi0_mv(self, dvin, comega=1j*0.0, dnout=None):
"""
Apply the non-interacting response function to a vector
Input Parameters:
-----------------
self : tddft_iter or tddft_tem class
sp2v : vector describing the effective perturbation [spin*product] --> value
comega: complex frequency
"""
if dnout is None: dnout = np.zeros_like(dvin, dtype=self.dtypeComplex)
sp2v = dvin.reshape((self.nspin,self.nprod))
sp2dn = dnout.reshape((self.nspin,self.nprod))
for s in range(self.nspin):
vdp = csr_matvec(self.cc_da, sp2v[s].real) # real part
sab = (vdp*self.v_dab).reshape((self.norbs,self.norbs))
nb2v = self.gemm(1.0, self.xocc[s], sab)
nm2v_re = self.gemm(1.0, nb2v, self.xvrt[s].T)
vdp = csr_matvec(self.cc_da, sp2v[s].imag) # imaginary
sab = (vdp*self.v_dab).reshape((self.norbs, self.norbs))
nb2v = self.gemm(1.0, self.xocc[s], sab)
nm2v_im = self.gemm(1.0, nb2v, self.xvrt[s].T)
vs,nf = self.vstart[s],self.nfermi[s]
if self.use_numba:
self.div_numba(self.ksn2e[0,s], self.ksn2f[0,s], nf, vs, comega, nm2v_re, nm2v_im)
else:
for n,(en,fn) in enumerate(zip(self.ksn2e[0,s,:nf], self.ksn2f[0,s,:nf])):
for m,(em,fm) in enumerate(zip(self.ksn2e[0,s,vs:],self.ksn2f[0,s,vs:])):
nm2v = nm2v_re[n, m] + 1.0j*nm2v_im[n, m]
nm2v = nm2v * (fn - fm) * \
( 1.0 / (comega - (em - en)) - 1.0 / (comega + (em - en)) )
nm2v_re[n, m] = nm2v.real
nm2v_im[n, m] = nm2v.imag
for n in range(vs+1,nf): #padding m<n i.e. negative occupations' difference
for m in range(n-vs): nm2v_re[n,m],nm2v_im[n,m] = 0.0,0.0
nb2v = self.gemm(1.0, nm2v_re, self.xvrt[s]) # real part
ab2v = self.gemm(1.0, self.xocc[s].T, nb2v).reshape(self.norbs*self.norbs)
vdp = csr_matvec(self.v_dab, ab2v)
chi0_re = vdp*self.cc_da
nb2v = self.gemm(1.0, nm2v_im, self.xvrt[s]) # imag part
ab2v = self.gemm(1.0, self.xocc[s].T, nb2v).reshape(self.norbs*self.norbs)
vdp = csr_matvec(self.v_dab, ab2v)
chi0_im = vdp*self.cc_da
sp2dn[s] = chi0_re + 1.0j*chi0_im
return dnout
#
#
#
def chi0_mv_gpu(self, v, comega=1j*0.0):
# tddft_iter_gpu, v, cc_da, v_dab, no,
# comega=1j*0.0, dtype=np.float32, cdtype=np.complex64):
# check with nspin=2
"""
Apply the non-interacting response function to a vector using gpu for
matrix-matrix multiplication
"""
assert self.nspin==1
if self.dtype != np.float32:
print(self.dtype)
raise ValueError("GPU version only with single precision")
vext = np.zeros((v.shape[0], 2), dtype = self.dtype, order="F")
vext[:, 0] = v.real
vext[:, 1] = v.imag
# real part
vdp = csr_matvec(self.cc_da, vext[:, 0])
sab = (vdp*self.v_dab).reshape([self.norbs, self.norbs])
self.td_GPU.cpy_sab_to_device(sab, Async = 1)
self.td_GPU.calc_nb2v_from_sab(reim=0)
# nm2v_real
self.td_GPU.calc_nm2v_real()
# start imaginary part
vdp = csr_matvec(self.cc_da, vext[:, 1])
sab = (vdp*self.v_dab).reshape([self.norbs, self.norbs])
self.td_GPU.cpy_sab_to_device(sab, Async = 2)
self.td_GPU.calc_nb2v_from_sab(reim=1)
# nm2v_imag
self.td_GPU.calc_nm2v_imag()
self.td_GPU.div_eigenenergy_gpu(comega)
# real part
self.td_GPU.calc_nb2v_from_nm2v_real()
self.td_GPU.calc_sab(reim=0)
self.td_GPU.cpy_sab_to_host(sab, Async = 1)
# start calc_ imag to overlap with cpu calculations
self.td_GPU.calc_nb2v_from_nm2v_imag()
vdp = csr_matvec(self.v_dab, sab)
self.td_GPU.calc_sab(reim=1)
# finish real part
chi0_re = vdp*self.cc_da
# imag part
self.td_GPU.cpy_sab_to_host(sab)
vdp = csr_matvec(self.v_dab, sab)
chi0_im = vdp*self.cc_da
# ssum_re = np.sum(abs(chi0_re))
# ssum_im = np.sum(abs(chi0_im))
# if math.isnan(ssum_re) or math.isnan(ssum_im):
# print(__name__)
# print('comega ', comega)
# print(v.shape, v.dtype)
# print("chi0 = ", ssum_re, ssum_im)
# print("sab = ", np.sum(abs(sab)))
# raise RuntimeError('ssum == np.nan')
return chi0_re + 1.0j*chi0_im
| StarcoderdataPython |
3364218 | """
Neuron __init__.
"""
try:
from spikey.snn.neuron.neuron import Neuron
from spikey.snn.neuron.rand_potential import RandPotential
except ImportError as e:
raise ImportError(f"neuron/__init__.py failed: {e}")
| StarcoderdataPython |
1792897 | from enum import Enum
from .site_models import News
from selenium import webdriver
from datetime import datetime
from typing import List
from . import dong_fang
from . import can_kao_xiao_xi
# from . import renmin
# from . import sina
# from . import zhong_guo_xin_wen
class SupportedSites(Enum):
'''
支持的所有网站
'''
DONG_FANG = 0
CAN_KAO_XIAO_XI = 1
website_names = {
SupportedSites.DONG_FANG: '东方网',
SupportedSites.CAN_KAO_XIAO_XI: '参考消息网',
}
table = {
SupportedSites.DONG_FANG: dong_fang.get_news_since,
SupportedSites.CAN_KAO_XIAO_XI: can_kao_xiao_xi.get_news_since,
}
def get_news_since(site: SupportedSites, driver: webdriver.Firefox, time: datetime) -> List[News]:
'''
利用浏览器driver,从site中提取从time之后的新闻
'''
return table[site](driver, time)
| StarcoderdataPython |
1763055 | <reponame>ZimCodes/Zyod<gh_stars>1-10
import os
from lib.driver import browser
from selenium.webdriver.chromium.options import ChromiumOptions
from selenium.webdriver.chromium.webdriver import ChromiumDriver
class Chromium(browser.Browser):
"""Chromium WebDriver"""
def __init__(self, opts, driver_opts=ChromiumOptions()):
"""Initializes Chromium WebDriver
:param Opts opts: Opts class
:param Options driver_opts: WebDriver Options
"""
driver_opts.headless = opts.headless
self._prefs = {}
super().__init__(driver_opts, opts)
self.driver_name = opts.driver_type
def _set_preferences(self, opts) -> None:
"""Configure Settings/Preferences of WebDriver
:param Opts opts: Opts class
:return:
"""
if opts.download_dir is not None:
os.makedirs(opts.download_dir)
self._prefs['download.default_directory'] = opts.download_dir
if opts.do_download:
self._prefs['download.prompt_for_download'] = False
self._prefs['profile.default_content_setting_values.popups'] = 0
self._prefs["download.directory_upgrade"] = True
self._prefs['plugins.always_open_pdf_externally'] = True
self._prefs["browser.helperApps.alwaysAsk.force"] = False
self._driver_opts.add_experimental_option('prefs', self._prefs)
def get_driver(self):
"""Gets configured Chromium WebDriver"""
return ChromiumDriver(self.driver_name, 'webkit', desired_capabilities=self._capabilities)
| StarcoderdataPython |
1648107 | """
Scripts to query BPL Solr to find expired/removed Overdrive records
"""
import json
import os
import time
from bookops_bpl_solr import SolrSession
from utils import save2csv
def get_creds(fh):
with open(fh, "rb") as jsonfile:
creds = json.load(jsonfile)
return creds
def find_total_hits(response):
return response.json()["response"]["numFound"]
def calc_number_of_requests_needed(rows, total_hits):
left_over = total_hits % rows
req_loops = int(total_hits / rows)
if left_over:
req_loops += 1
return req_loops
def extract_data(response, out):
data = response.json()
docs = data["response"]["docs"]
for d in docs:
row = [
f"b{d['id']}a",
d["material_type"],
d["econtrolnumber"],
d["eurl"],
d["digital_avail_type"],
d["digital_copies_owned"],
]
save2csv(out, row)
def make_page_request(session, start):
response = session.find_expired_econtent(rows=50, result_page=start)
print(f"Result page #: {start} = {response.status_code}, url={response.url}")
return response
def find_expired_bibs(creds):
out = "./reports/BPL/expired_solr.csv"
save2csv(
out,
["bib #", "format", "reserve #", "url", "availability type", "owned copies"],
)
with SolrSession(
authorization=creds["client_key"], endpoint=creds["endpoint"]
) as session:
response = session.find_expired_econtent(rows=50)
print(f"Initial request response = {response.status_code}")
result_page = 0
total_hits = find_total_hits(response)
print(f"Total hits: {total_hits}")
req_loops = calc_number_of_requests_needed(50, total_hits)
for l in range(req_loops):
response = make_page_request(session, start=50 * l)
extract_data(response, out)
time.sleep(1)
def find_bib(creds):
with SolrSession(
authorization=creds["client_key"], endpoint=creds["endpoint"]
) as session:
response = session.search_bibNo(
keyword="11241108", default_response_fields=False
)
print(response.json())
def find_isbn(creds):
with SolrSession(
authorization=creds["client_key"], endpoint=creds["endpoint"]
) as session:
response = session.search_isbns(
keywords=["9781984850010"], default_response_fields=False
)
print(response.json())
def find_reserveNo(creds):
with SolrSession(
authorization=creds["client_key"], endpoint=creds["endpoint"]
) as session:
response = session.search_reserveId(
keyword="3B9FC49F-E3AD-41A1-9A34-611A4E6D5954".lower(),
default_response_fields=False,
)
print(response.json())
if __name__ == "__main__":
cred_fh = os.path.join(os.environ["USERPROFILE"], ".bpl-solr\\bpl-solr-prod.json")
creds = get_creds(cred_fh)
# find_expired_bibs(creds)
find_bib(creds)
# find_isbn(creds)
# find_reserveNo(creds)
| StarcoderdataPython |
3347672 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0003_question_owner'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer_text', models.TextField(verbose_name=b'answer')),
('created_on', models.DateTimeField(auto_now_add=True)),
('answers_question', models.ForeignKey(to='questions.Question')),
],
options={
'db_table': 'answers',
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='comments',
name='comments_question',
),
migrations.DeleteModel(
name='Comments',
),
]
| StarcoderdataPython |
1616947 | <gh_stars>0
import unittest
from dialogapi.requests import Requests
class RequestsTest(unittest.TestCase):
def test_method_call(self):
url = "https://www.nttdocomo.co.jp/"
res = Requests(verify=True).get(url)
self.assertEqual(res.status_code, 200)
| StarcoderdataPython |
1734004 | <filename>21608.py
import sys
input = sys.stdin.readline
'''
최대 학생 수는 400명이니까
완전탐색 하면 대략 16만 회 << 가능할듯?
학생 번호가 주어졌을 때 조건에 맞는 자리 찾아주는 함수 필요
'''
# functions
def find_seat(student, like, N):
"""classroom 배열에 학생 배치"""
seat_pos = (-1, -1)
seat_adj_like = -1
seat_adj_empty = -1
for r in range(N):
for c in range(N):
if classroom[r][c] != 0:
continue
like_cnt, empty_cnt = look_around(r, c, N, like)
# 1번 조건 비교
if like_cnt > seat_adj_like:
seat_pos = (r, c)
seat_adj_like = like_cnt
seat_adj_empty = empty_cnt
# 2번 조건 비교
elif like_cnt == seat_adj_like \
and empty_cnt > seat_adj_empty:
seat_pos = (r, c)
seat_adj_empty = empty_cnt
# 3번 조건은 for문 탐색순서 덕분에 자동 해결
r, c = seat_pos
classroom[r][c] = student
def look_around(row, col, N, like):
"""인접 자리를 둘러보고 좋아하는 학생 수 반환"""
directions = ((0, 1), (0, -1), (1, 0), (-1, 0))
like_cnt = 0
empty_cnt = 0
for dr, dc in directions:
r = row + dr
c = col + dc
if not 0 <= r < N or not 0 <= c < N:
continue
if classroom[r][c] == 0:
empty_cnt += 1
elif classroom[r][c] in like:
like_cnt += 1
return (like_cnt, empty_cnt)
# input
N = int(input())
student_order = []
student_info = {}
for _ in range(N ** 2):
a, b, c, d, e = map(int, input().split())
student_order.append(a)
student_info[a] = (b, c, d, e)
# process
classroom = [[0 for _ in range(N)] for _ in range(N)]
# 학생 배치
for s in student_order:
like = student_info[s]
find_seat(s, like, N)
# 만족도 계산
sol = 0
score_table = [0, 1, 10, 100, 1000]
for r in range(N):
for c in range(N):
like = student_info[classroom[r][c]]
sol += score_table[look_around(r, c, N, like)[0]]
# output
print(sol) | StarcoderdataPython |
1798391 | from abc import ABC, abstractmethod
import torch
from data_utils import helper
from data_utils import pose_features
# A normalizer provides an interface to normalize and denormalize a batch of poses.
# The normalization/denormalization is always a deterministic process.
class BaseNormalizer(ABC):
@classmethod
@abstractmethod
def normalize_single(cls, poses):
pass
@classmethod
@abstractmethod
def normalize_pair(cls, poses, labels):
pass
@staticmethod
@abstractmethod
def denormalize(poses, params):
pass
@classmethod
def _compute_z_direction(cls, shifted_poses):
# The pose is assumed to be shifted to a meaningful center point that (approximately) lies
# on the palm plane.
device = shifted_poses.device
palm_bones = shifted_poses[:, :6]
normal_estimates = torch.zeros(palm_bones.shape[0], 15, 3, device=device)
idx = 0
for i, bones_1 in enumerate(palm_bones.transpose(0, 1)[:-1]):
for bones_2 in palm_bones.transpose(0, 1)[i + 1:]:
cross = torch.cross(bones_1, bones_2, dim=1)
norm = torch.norm(cross, dim=1).view(-1, 1)
mask = torch.isclose(norm, torch.tensor(0.0, device=device), atol=1e-5)
mask = mask.view(shifted_poses.shape[0])
normal_estimates[~mask, idx] = cross[~mask] / norm[~mask]
normal_estimates[mask, idx] = torch.zeros(3, device=device)
idx += 1
z_direction = torch.mean(normal_estimates, dim=1)
return z_direction
@classmethod
def _compute_plane_alignment_rot_mat(cls, palm_normals):
batch_size = palm_normals.shape[0]
device = palm_normals.device
rot_mats = torch.zeros(batch_size, 3, 3, device=device)
# For a single pose, the target vector t is the negative z-direction (0.0, 0.0, -1.0).
# We seek the rotation matrix R that rotates the given palm normal n onto the target, such
# that t = R * n.
target_vecs = torch.tensor([0.0, 0.0, -1.0], device=device).view(1, 3).repeat(batch_size, 1)
palm_normals = palm_normals / torch.norm(palm_normals, dim=1).view(-1, 1)
# The vectors t and n span a plane. First find the normal of that plane by computing the
# cross product.
plane_normals = torch.cross(palm_normals, target_vecs, dim=1)
# In that plane the cosine of the angle between t and n can be computed as follows.
cosines = helper.batch_dot(palm_normals, target_vecs)
# If t and n are anti-parallel, the solution following this approach is not unique (infinite
# possible planes). Therefore filter them out with a mask and simply multiply n by -1.
mask = torch.isclose(cosines, torch.tensor(-1.0, device=device), atol=1e-5)
rot_mats[mask] = -1.0 * torch.eye(3, device=device)
# For the rest apply some more magic from https://math.stackexchange.com/questions/180418
plane_normals = plane_normals[~mask]
cosines = cosines[~mask]
cross_prod_mats = torch.zeros(plane_normals.shape[0], 3, 3, device=device)
cross_prod_mats[:, [1, 0, 0], [2, 2, 1]] = plane_normals
cross_prod_mats[:, [2, 2, 1], [1, 0, 0]] = plane_normals
cross_prod_mats[:, [0, 1, 2], [1, 2, 0]] *= -1.0
cross_prod_mats_sq = torch.bmm(cross_prod_mats, cross_prod_mats)
rot_mats[~mask] = torch.eye(3, device=device) + cross_prod_mats
rot_mats[~mask] += cross_prod_mats_sq * (1.0 / (1.0 + cosines)).view(-1, 1, 1)
return rot_mats
@classmethod
def _compute_inplane_rot_mat(cls, x_directions_2d):
# It is assumed that the z-axis of the pose coordinate frames is already correctly aligned.
# Rotating the pose inside that plane is therefore a 2D problem.
batch_size = x_directions_2d.shape[0]
device = x_directions_2d.device
target_x_directions_2d = torch.zeros_like(x_directions_2d, device=device)
target_x_directions_2d[:, 0] = 1.0
angles = helper.vector_angle_2d(x_directions_2d, target_x_directions_2d)
inplane_rot_mats = torch.zeros(batch_size, 3, 3, device=device)
inplane_rot_mats[:, 2, 2] = 1.0
inplane_rot_mats[:, [0, 1], [0, 1]] = torch.cos(angles).reshape(batch_size, 1)
inplane_rot_mats[:, 0, 1] = -torch.sin(angles)
inplane_rot_mats[:, 1, 0] = -inplane_rot_mats[:, 0, 1]
return inplane_rot_mats
# A no-op placeholder.
class NoNorm(BaseNormalizer):
@classmethod
def normalize_single(cls, poses):
return poses, {'some_param': torch.zeros(poses.shape[0])}
@classmethod
def normalize_pair(cls, poses, labels):
return poses, labels, {'some_param': torch.zeros(poses.shape[0])}
@staticmethod
def denormalize(poses, params):
return poses
# Center all samples such that the overall variance of coordinate values is decreased.
class Shifter(BaseNormalizer):
@classmethod
def normalize_single(cls, poses):
# Shift poses such that the center of mass it at (0, 0, 0).
# Shift the labels by the same amount.
shifts = - poses.mean(dim=1).view(-1, 1, 3)
shifted_poses = poses + shifts
return shifted_poses, {'shift': shifts}
@classmethod
def normalize_pair(cls, poses, labels):
shifted_poses, params = cls.normalize_single(poses)
shifted_labels = labels + params['shift']
return shifted_poses, shifted_labels, params
@staticmethod
def denormalize(poses, params):
return poses - params['shift']
# Rotate poses such that they are always viewed from a similar view point. The position and scale
# of the poses is not changed.
class ViewPointNormalizer(BaseNormalizer):
@classmethod
def normalize_single(cls, poses):
# First compute the hand palm normals. For the computation the data already needs to be
# centered at some point on the hand palm, which is here defined by W, IMCP, MMCP, RMCP.
palm_centered_poses = poses - poses[:, [0, 2, 3, 4]].mean(dim=1).view(-1, 1, 3)
z_directions = cls._compute_z_direction(palm_centered_poses)
# Rotate the pose such that the normal of the hand palm points into negative z-direction.
# The normal is approximated by computing the average of all pair wise cross products of the
# vectors between the origin and the palm joints. The x-axis direction equals the average
# vector from origin to IMCP, MMCP, RMCP and PMCP (in the previously defined plain).
# Remember that after the above shifting operation, the origin lies in the palm plane.
plane_alignment_rot_mats = cls._compute_plane_alignment_rot_mat(z_directions)
shifts = - poses.mean(dim=1).view(-1, 1, 3)
centered_poses = poses + shifts
plain_aligned_poses_t = torch.bmm(plane_alignment_rot_mats, centered_poses.transpose(1, 2))
x_directions_2d = plain_aligned_poses_t[:, :2, 2:6].mean(dim=2)
inplane_rot_mats = cls._compute_inplane_rot_mat(x_directions_2d)
rotated_poses = torch.bmm(inplane_rot_mats, plain_aligned_poses_t).transpose(1, 2)
rotated_poses -= shifts
rotations = torch.bmm(inplane_rot_mats, plane_alignment_rot_mats)
return rotated_poses, {'rotation': rotations, 'shift': shifts}
@classmethod
def normalize_pair(cls, poses, labels):
rotated_poses, params = cls.normalize_single(poses)
# Labels are rotated around the center of the predicted pose.
shifted_labels = labels + params['shift']
rotated_labels = torch.bmm(shifted_labels, params['rotation'].transpose(1, 2))
rotated_labels -= params['shift']
return rotated_poses, rotated_labels, params
@staticmethod
def denormalize(poses, params):
centered_poses = poses + params['shift']
rotated_poses = torch.bmm(centered_poses, params['rotation'])
rotated_poses -= params['shift']
return rotated_poses
class GlobalNormalizer(BaseNormalizer):
def __init__(self, individual_scaling=False):
self.individual_scaling = individual_scaling
def normalize_single(self, poses):
shift = poses.view(-1, 3).mean(dim=0).view(1, 1, 3)
shifted_poses = poses - shift
if self.individual_scaling:
scaling = shifted_poses.norm(dim=2).mean(dim=1).view(-1, 1, 1)
else:
scaling = shifted_poses.view(-1, 3).norm(dim=1).mean().view(1)
scaled_poses = 1.0 / scaling * shifted_poses
return scaled_poses, {'shift': shift, 'scaling': scaling}
def normalize_pair(self, poses, labels):
normalized_poses, params = self.normalize_single(poses)
shifted_labels = labels - params['shift']
normalized_labels = 1.0 / params['scaling'] * shifted_labels
return normalized_poses, normalized_labels, params
@staticmethod
def denormalize(poses, params):
return params['scaling'] * poses + params['shift']
# Deprecation warning: Wasn't used/updated/tested recently.
# This normalizer shifts, rotates and scales each pose (and each label) individually and independent
# from each other to remove any distracting variance. The assumption here is that a pose corrector
# that is just based on the predicted pose of some backbone model (no image evidence) cannot account
# for errors caused by global shift, rotation or scaling. Under this assumption, removing this
# "noise" should not remove any valuable information from the data.
# Be careful in practice: some datasets and/or backbone models don't fulfill this assumption,
# leading to correlations in the data between global errors and absolute positions.
class IndividualNormalizer(BaseNormalizer):
@classmethod
def normalize_single(cls, poses):
batch_size = poses.shape[0]
device = poses.device
# Shift data such that the weighted mean of all joints is at (0, 0, 0).
# The weights are heuristically defined as follows.
weights = torch.zeros(poses.shape[1], 1, device=device)
weights[[0, 2, 3, 4]] = 0.2 # W, IMCP, MMCP, RMCP
weights[[1, 5]] = 0.1 # TMCP, PMCP
weighted_means = (weights * poses).sum(dim=1)
shifts = - weighted_means
shifted_poses = poses + shifts.view(batch_size, 1, -1)
# Scale data such that the average bone length of the pose is 1.0.
bone_lengths = pose_features.lengths_of_all_bones(poses)
mean_bone_length = bone_lengths.view(batch_size, -1).mean(dim=1)
scalings = 1.0 / mean_bone_length.view(batch_size, 1, 1)
scaled_poses = shifted_poses * scalings
# Rotate the pose such that the normal of the hand palm points into negative z-direction.
# The normal is approximated by computing the average of all pair wise cross products of the
# vectors between the origin and the palm joints. The x-axis direction equals the average
# vector from origin to IMCP, MMCP, RMCP and PMCP (in the previously defined plain).
# Remember that after the above shifting operation, the origin lies in the palm plane.
z_directions = cls._compute_z_direction(scaled_poses)
plane_alignment_rot_mats = cls._compute_plane_alignment_rot_mat(z_directions)
rotated_poses_t = torch.bmm(plane_alignment_rot_mats, scaled_poses.transpose(1, 2))
x_directions_2d = rotated_poses_t[:, :2, 2:5].mean(dim=2)
inplane_rot_mats = cls._compute_inplane_rot_mat(x_directions_2d)
rotated_poses = torch.bmm(inplane_rot_mats, rotated_poses_t).transpose(1, 2)
rotations = torch.bmm(inplane_rot_mats, plane_alignment_rot_mats)
return rotated_poses, {'shift': shifts, 'scaling': scalings, 'rotation': rotations}
@classmethod
def normalize_pair(cls, poses, labels):
normalized_poses, params = cls.normalize_single(poses)
normalized_labels, _ = cls.normalize_single(labels)
return normalized_poses, normalized_labels, params
@classmethod
def denormalize(cls, poses, params):
denormalized_poses = torch.bmm(poses, params['rotation'])
denormalized_poses = denormalized_poses / params['scaling'].view(-1, 1, 1)
denormalized_poses = denormalized_poses - params['shift'].view(-1, 1, 3)
return denormalized_poses
| StarcoderdataPython |
40460 | <reponame>WolffunGame/experiment-agent<filename>tests/acceptance/test_acceptance/__init__.py
# __init__ is empty
| StarcoderdataPython |
3355489 | <reponame>ark-1/circleci-jetbrains-space-orb
import base64
import os
import subprocess
import json
from typing import Optional, List
def substitute_envs(s: str) -> str:
shell = if_not_empty(
subprocess.check_output("command -v bash; exit 0", shell=True, universal_newlines=True).strip()
) or subprocess.check_output("command -v sh", shell=True, universal_newlines=True)
def replacement(var_name1: Optional[str], var_name2: Optional[str], expr: Optional[str]) -> str:
print(var_name1, var_name2, expr)
var_name = var_name1 or var_name2
if var_name is not None:
return os.getenv(var_name)
else:
value = subprocess \
.check_output([shell, '-c', expr], universal_newlines=True, env=os.environ) \
.rstrip('\n')
return json.dumps(value)[1:-1]
import re
return re.sub('\\${([\\w]+)\\b}|\\$([\\w]+)|\\$\\(([^)]+)\\)',
lambda match: replacement(match.group(1), match.group(2), match.group(3)), s)
def build_message_body(custom: Optional[str], template: Optional[str]) -> str:
if custom is not None:
t2 = substitute_envs(custom)
elif template is not None:
template_value = os.getenv(template)
if template_value is None or template_value == '':
raise ValueError("No such template:", template)
t2 = substitute_envs(template_value)
else:
raise ValueError("Error: No message template selected. "
"Select either a custom template "
"or one of the pre-included ones via the 'custom' or 'template' parameters.")
print('Message body:')
print(t2)
return t2
def post_to_jb_space(msg: str, channels: List[str], profiles: List[str], client_id: bytes, client_secret: bytes,
space_url: str):
if len(channels) == 0 and len(profiles) == 0:
print('No channel was provided. Enter value for $JB_SPACE_DEFAULT_CHANNEL env var, '
'$JB_SPACE_DEFAULT_RECIPIENT_PROFILE env var, channel parameter, or recipient_profile parameter')
return
auth_response = subprocess.check_output(
'curl -s -f -X POST -H "Authorization: Basic ' +
base64.b64encode(client_id + b':' + client_secret).decode('UTF-8') +
'" -d "grant_type=client_credentials&scope=**" ' + space_url + '/oauth/token',
shell=True, universal_newlines=True
)
token: Optional[str] = json.loads(auth_response)['access_token']
if token is None:
print("Cannot authenticate into JetBrains Space: ")
return
msg_loaded = json.loads(msg)
def send_msg(recipient):
body = json.dumps({'recipient': recipient, 'content': msg_loaded, 'unfurlLinks': False})
response = json.loads(subprocess.check_output(
'curl -X POST -H "Authorization: Bearer ' + token + '" -d \'' + body.replace("'", "'\\''") +
"' " + space_url + '/api/http/chats/messages/send-message',
shell=True, universal_newlines=True
))
if 'error' in response:
print(response)
raise ValueError(response['error_description'])
for channel in channels:
print('Sending to channel:', channel)
send_msg({
'className': 'MessageRecipient.Channel',
'channel': {
'className': 'ChatChannel.FromName',
'name': channel
}
})
for profile in profiles:
print('Sending to profile:', profile)
send_msg({
'className': 'MessageRecipient.Member',
'member': 'username:' + profile
})
def notify(custom: Optional[str], template: Optional[str], channels: List[str], profiles: List[str],
client_id: bytes, client_secret: bytes, space_url: str, event: str,
current_branch: str, branch_patterns: List[str]):
with open('/tmp/JB_SPACE_JOB_STATUS', 'rt') as f:
status = f.readline().strip()
if status == event or event == "always":
if not branch_filter(current_branch, branch_patterns):
print("NO JB SPACE ALERT")
print('Current branch does not match any item from the "branch_pattern" parameter')
print('Current branch:', current_branch)
else:
print('Sending notification')
post_to_jb_space(build_message_body(custom, template), channels, profiles,
client_id, client_secret, space_url)
else:
print("NO JB SPACE ALERT")
print()
print("This command is set to send an alert on:", event)
print("Current status:", status)
def branch_filter(current_branch: str, branch_patterns: List[str]) -> bool:
import re
for i in branch_patterns:
if re.fullmatch(i.strip(), current_branch):
return True
return False
def if_not_empty(s: Optional[str]) -> Optional[str]: return None if s is None or s == '' else s
def remove_prefixes(s: str, prefixes: List[str]) -> str:
for prefix in prefixes:
if s.startswith(prefix):
s = s[len(prefix):]
return s
def main():
channels = os.getenv("JB_SPACE_PARAM_CHANNEL_NAME") or ""
profiles = os.getenv("JB_SPACE_PARAM_RECIPIENT_PROFILE") or ""
if channels == "$JB_SPACE_DEFAULT_CHANNEL" and profiles != "$JB_SPACE_DEFAULT_RECIPIENT_PROFILE":
channels = ""
elif channels != "$JB_SPACE_DEFAULT_CHANNEL" and profiles == "$JB_SPACE_DEFAULT_RECIPIENT_PROFILE":
profiles = ""
if channels == "$JB_SPACE_DEFAULT_CHANNEL":
channels = os.getenv("JB_SPACE_DEFAULT_CHANNEL") or ""
if profiles == "$JB_SPACE_DEFAULT_RECIPIENT_PROFILE":
profiles = os.getenv("JB_SPACE_DEFAULT_RECIPIENT_PROFILE") or ""
notify(
custom=if_not_empty(os.getenv("JB_SPACE_PARAM_CUSTOM")),
template=if_not_empty(os.getenv("JB_SPACE_PARAM_TEMPLATE")),
channels=[] if channels == '' else [i.strip() for i in channels.split(',')],
profiles=[] if profiles == '' else [i.strip() for i in profiles.split(',')],
client_id=bytes(os.getenv("JB_SPACE_CLIENT_ID"), 'UTF-8'),
client_secret=bytes(os.getenv("JB_SPACE_CLIENT_SECRET"), 'UTF-8'),
space_url='https://' + remove_prefixes(os.getenv("JB_SPACE_URL"), prefixes=['https://', 'http://']),
event=os.getenv("JB_SPACE_PARAM_EVENT"),
current_branch=os.getenv("CIRCLE_BRANCH"),
branch_patterns=os.getenv("JB_SPACE_PARAM_BRANCH_PATTERN").split(',')
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4823496 |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .base import AgentSchedulingComponent
# ------------------------------------------------------------------------------
#
# This is a scheduler which does not schedule, at all. It leaves all placement
# to executors such as srun, jsrun, aprun etc.
#
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
#
class Noop(AgentSchedulingComponent):
'''
The Noop scheduler does not perform any placement.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentSchedulingComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def schedule_task(self, task):
return
# --------------------------------------------------------------------------
#
def unschedule_task(self, task):
return
# --------------------------------------------------------------------------
#
def _configure(self):
pass
# --------------------------------------------------------------------------
#
def _try_allocation(self, task):
# signal success
return True
# ------------------------------------------------------------------------------
| StarcoderdataPython |
1759040 | """Module provider for Infomaniak"""
import json
import logging
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
ENDPOINT = "https://api.infomaniak.com"
NAMESERVER_DOMAINS = ["infomaniak.com"]
def provider_parser(subparser):
"""Generate provider parser for Infomaniak"""
subparser.description = """
Infomaniak Provider requires a token with domain scope.
It can be generated for your Infomaniak account on the following URL:
https://manager.infomaniak.com/v3/infomaniak-api"""
subparser.add_argument("--auth-token", help="specify the token")
class Provider(BaseProvider):
"""Provider class for Infomaniak"""
def __init__(self, config):
super(Provider, self).__init__(config)
# Handling missing required parameters
if not self._get_provider_option("auth_token"):
raise Exception("Error, token is not defined")
# Construct DNS Infomaniak environment
self.domain_id = None
self.endpoint_api = ENDPOINT
self.session = None
def _authenticate(self):
domains = self._get(
"/1/product", {"service_name": "domain", "customer_name": self.domain}
)
LOGGER.debug("found domains %s", domains)
for domain in domains["data"]:
if domain["customer_name"] == self.domain:
self.domain_id = domain["id"]
break
else:
raise AuthenticationError(f"Domain {self.domain} not found")
def _create_record(self, rtype, name, content):
ttl = self._get_lexicon_option("ttl")
records = list(
filter(
lambda x: x["content"] == content
and x["type"] == rtype
and self._relative_name(x["name"]) == self._relative_name(name),
self._list_records(rtype, name, content),
)
)
if len(records) > 0:
LOGGER.debug(
"create_record (ignored, duplicate): %s %s %s", rtype, name, content
)
return True
data = {
"type": rtype,
"source": self._relative_name(name),
"target": content,
}
if ttl:
data["ttl"] = ttl
result = self._post(f"/1/domain/{self.domain_id}/dns/record", data)
LOGGER.debug("create_record: %s", result["data"])
return True
def _list_records(self, rtype=None, name=None, content=None):
records = []
record_data = self._get(f"/1/domain/{self.domain_id}/dns/record")
for record in record_data["data"]:
records.append(
{
"type": record["type"],
"name": record["source_idn"],
"ttl": record["ttl"],
"content": record["target_idn"],
"id": record["id"],
}
)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record
for record in records
if record["name"].lower() == self._full_name(name.lower())
]
if content:
records = [
record
for record in records
if record["content"].lower() == content.lower()
]
LOGGER.debug("list_records: %s", records)
return records
def _get_record(self, identifier):
record_data = self._get(f"/1/domain/{self.domain_id}/dns/record/{identifier}")
record = {
"type": record_data["data"]["type"],
"name": record_data["data"]["source_idn"],
"ttl": record_data["data"]["ttl"],
"content": record_data["data"]["target_idn"],
"id": record_data["data"]["id"],
}
LOGGER.debug("get_record: %s", record)
return record
def _update_record(self, identifier, rtype=None, name=None, content=None):
records = self._list_records(rtype, name)
if not identifier:
if len(records) == 1:
identifier = records[0]["id"]
record = records[0]
elif len(records) > 1:
raise Exception("Several record identifiers match the request")
else:
raise Exception("Record identifier could not be found")
else:
record = self._get_record(identifier)
data = {"ttl": record["ttl"]}
if name:
data["source"] = self._relative_name(name)
if content:
data["target"] = content
self._put(f"/1/domain/{self.domain_id}/dns/record/{identifier}", data)
LOGGER.debug("update_record: %s", identifier)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
self._delete(f"/1/domain/{self.domain_id}/dns/record/{record_id}")
LOGGER.debug("delete_record: %s", True)
return True
def _request(self, action="GET", url="/", data=None, query_params=None):
headers = {}
target = self.endpoint_api + url
body = ""
if data is not None:
headers["Content-type"] = "application/json"
body = json.dumps(data)
headers["Authorization"] = f"Bearer {self._get_provider_option('auth_token')}"
result = requests.request(
method=action, url=target, params=query_params, data=body, headers=headers
)
result.raise_for_status()
json_result = result.json()
if json_result["result"] != "success":
raise Exception("API didn't return success status")
return json_result
| StarcoderdataPython |
3224314 | <reponame>DanTGL/AdventOfCode2020<gh_stars>0
import math
inputs = [(line[0], int(line[1:])) for line in open("day12/input").read().splitlines()]
dirs = {
"E": [ 1, 0],
"N": [ 0, 1],
"W": [-1, 0],
"S": [-1, 0]
}
dir = 0
pos = [0, 0]
for i in inputs:
if i[0] == "L":
dir += math.radians(i[1])
elif i[0] == "R":
dir -= math.radians(i[1])
elif i[0] == "F":
print(dir, math.cos(dir), math.sin(dir))
pos[0] += math.cos(dir) * i[1]
pos[1] += math.sin(dir) * i[1]
else:
d = dirs[i[0]]
pos[0] += d[0] * i[1]
pos[1] += d[1] * i[1]
print(abs(pos[0]) + abs(pos[1])) | StarcoderdataPython |
3293867 | <reponame>stefanmerb/dash_webapp<filename>simple_webapp.py
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
app.layout = html.Div(
[
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(dbc.Col(html.H1("Schnellrechner"), style={"color" : "blue"})),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Höhe"),
#style={"border" : "blue solid", "align-items" : "center"},
width = {"size" : 1, "offset" : 1}),
dbc.Col(dbc.Input(id="input1", placeholder="...", type="number",
style={"width" : "10%"})),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Breite"),
width = {"size" : 1, "offset" : 1}),
dbc.Col(dbc.Input(id="input2", placeholder="...", type="number",
style={"width" : "10%"})),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Länge"),
width={"size": 1, "offset": 1}),
dbc.Col(dbc.Input(id="input3", placeholder="...", type="number",
style={"width": "10%"})),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row([
dbc.Col(html.Div(""),
#style={"border": "blue solid"},
width={"size": 1, "offset": 1}),
dbc.Button("Berechnen", id = "button1", color="primary", className="mr-1", n_clicks=0,
style={"width" : "10%"}),
]
),
dbc.Row(dbc.Col(html.H1(""))),
dbc.Row(
[
dbc.Col(html.Div("Kostenwert"),
width={"size": 1, "offset": 1}),
dbc.Col(html.P(id="output",
style={"width": "10%"})),
]
),
]
)
global counter
counter = 0
@app.callback(Output("output", "children"),
[
Input("input1", "value"),
Input("input2", "value"),
Input("input3", "value"),
Input("button1", "n_clicks")
])
def output_text(val1, val2, val3, n_click):
global counter
if n_click > 0 and n_click > counter:
erg = (val1/val2) + 2.3*val3
n_click_new = n_click + 1
counter+=1
return erg
else:
return ""
if __name__ == "__main__":
app.run_server() | StarcoderdataPython |
1695635 | <filename>run_queries.py
import argparse
import os
from inverted_index import InvertedIndex
from preprocessor import Preprocessor
from similarity_measures import TF_Similarity, TFIDF_Similarity, BM25_Similarity
parser = argparse.ArgumentParser(description='Run all queries on the inverted index.')
parser.add_argument('--new', default=True, help='If True then build a new index from scratch. If False then attempt to'
' reuse existing index')
parser.add_argument('--sim', default='BM25', help='The type of similarity to use. Should be "TF" or "TFIDF" or "BM25')
args = parser.parse_args()
index = InvertedIndex(Preprocessor())
index.index_directory(os.path.join('gov', 'documents'), use_stored_index=True)
sim_name_to_class = {'TF': TF_Similarity,
'TFIDF': TFIDF_Similarity,
'BM25': BM25_Similarity}
sim = sim_name_to_class[args.sim]
index.set_similarity(sim)
print(f'Setting similarity to {sim.__name__}')
print()
print('Index ready.')
topics_file = os.path.join('gov', 'topics', 'gov.topics')
runs_file = os.path.join('runs', 'retrieved.runs')
tf = open(topics_file, "r")
rank = 0
query_id = str()
for query in tf:
query_stem = query.lstrip('1234567890 ') # Remove the first numbers and a space of a query
for digit in query: # extract the query id
if digit.isdigit():
query_id += digit
else:
break
for document_id in index.run_query(query_stem): # the default number of max_result docs is 10
rf = open(runs_file, "a")
rf.write('{} {} {} {} {} {}\n'.format(query_id, 'Q0', document_id[1], rank, document_id[0], 'MY_IR_SYSTEM'))
rank += 1
rf.close()
rank = 0
query_id = str()
tf.close()
| StarcoderdataPython |
1787296 | <filename>Sec20_Greedy/q1029.py<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
class Solution:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
costs.sort(key = lambda x: x[0] - x[1])
ans = 0
n = len(costs) // 2
for i in range(n):
ans += costs[i][0] + costs[i+n][1]
return ans
| StarcoderdataPython |
3260066 | <reponame>Panopto/universal-content-library-specification
import argparse
import os.path
import hashlib
import boto
import boto3
import re
from botocore.errorfactory import ClientError
from boto.s3.connection import S3Connection
def get_file_from_s3(aws_access_key,
aws_secret_key,
aws_region,
local_path,
bucket_name,
key_prefix):
'''
Syncs files from s3.
'''
if (not local_path) or (not bucket_name) or (not key_prefix):
raise Exception(
'local_path [{0}],'
'bucket_name [{1}],'
'key_prefix [{2}],'
'are required.'.format(
local_path,
bucket_name,
key_prefix
))
if aws_access_key is None or aws_secret_key is None:
s3 = boto3.client('s3') # use the default AWS CLI profile
else:
session = boto3.session.Session(region_name=aws_region)
s3 = session.client('s3',
config=boto3.session.Config(
signature_version='s3v4'),
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
s3.download_file(bucket_name, key_prefix, local_path)
def directory_exists(bucket_name, key_prefix):
print("Not yet implemented")
def file_exists(aws_access_key,
aws_secret_key,
aws_region,
bucket_name,
key_prefix):
"""
Checks if a S3 key exists
"""
exists = True
if aws_access_key is None or aws_secret_key is None:
s3 = boto3.client('s3') # use the default AWS CLI profile
else:
session = boto3.session.Session(region_name=aws_region)
s3 = session.client(
's3',
config=boto3.session.Config(signature_version='s3v4'),
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key
)
try:
s3.head_object(Bucket=bucket_name, Key=key_prefix)
except ClientError:
exists = False
return exists
| StarcoderdataPython |
1763520 | from flappy import *
def nextGeneration():
birds = [None]*POPULATION
for i in range(POPULATION):
birds[i] = bird()
birds[i].initialize()
return birds | StarcoderdataPython |
3298049 | import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset_cl import ContrastiveData
class Encoder(nn.Module):
def __init__(self, embed_size, hidden_size, temperature):
super(Encoder, self).__init__()
self.skill_embed_size = embed_size
self.hidden_size = hidden_size
self.t = temperature
self.skill_encoder = nn.Sequential(
nn.Linear(126, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.skill_embed_size),
# nn.Softmax()
)
def forward(self, x):
b, n_item, size = x.shape
x = x.reshape(-1, size)
temp = self.skill_encoder(x)
temp = temp.reshape(b, n_item, self.skill_embed_size)
sim = F.cosine_similarity(temp[:, 0, :].unsqueeze(1), temp, dim=-1)
sim = torch.exp(sim[:, 1:] / self.t)
loss = sim[:, 0] / torch.sum(sim, dim=1)
return temp, sim, -loss.mean()
# dataset = ContrastiveData()
# tt = dataset[:2]
# en = Encoder(100, 400, 0.5)
# temp, sim, loss = en.forward(tt)
| StarcoderdataPython |
1627240 | from multiprocessing import Process, Queue
import os
import time, random
def put_proc(q, urls):
print("Child putting process %s started. " % (os.getpid(),))
for url in urls:
q.put(url)
print('Putting %s to queue.' % (url,))
time.sleep(random.random() * 3)
def get_proc(q):
print("Child reading process %s started." % (os.getpid(),))
while True:
url = q.get(True)
print("Getting from queue is %s " % (url,))
if __name__ == '__main__':
print("Parent process is started %s " % (os.getpid()))
queue = Queue()
process_writer1 = Process(target=put_proc, args=(queue, ['url1', 'url2', 'url3', 'url4']))
process_writer2 = Process(target=put_proc, args=(queue, ['url5', 'url6', 'url7', 'url8']))
process_reader3 = Process(target=get_proc, args=(queue,))
process_writer1.start()
process_writer2.start()
process_reader3.start()
process_writer1.join()
process_writer2.join()
process_reader3.terminate() # 手动结束读进程
| StarcoderdataPython |
3398962 | <reponame>farfanoide/libhdd-sched
import unittest
import json
from lib.algorithms import FCFS
from lib.simulation import Simulation, SimulationResult
from lib.parsers import parse_lot
class TestFcfs(unittest.TestCase):
simulation_dict = json.loads(file.read(open('./examples/protosimulation.json')))
simulation = Simulation(simulation_dict)
expected = '*500 *400 *100 53 151 33 353 100 455 15 101 126 366 415'
expected_reqs = parse_lot(expected).requirements
# movs 2681 final_dir: true lot_intro p 45, 25 or 45, 80 method fcfs
def setUp(self):
self.results = FCFS().execute(self.simulation)
def test_fcfs_returns_correct_object(self):
self.assertIsInstance(self.results, SimulationResult)
def test_fcfs_attended_requirements_order(self):
self.assertEquals(
self.results.attended_requirements,
self.expected_reqs)
def test_fcfs_default_behaviour(self):
self.assertTrue(self.results.success)
# def test_fcfs_order(self):
# self.assertEquals(sel)
| StarcoderdataPython |
3385059 | <reponame>fabric-testbed/UserInformationService<filename>server/swagger_server/test/test_preferences_controller.py
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.preference_type import PreferenceType # noqa: E501
from swagger_server.models.preferences import Preferences # noqa: E501
from swagger_server.test import BaseTestCase
class TestPreferencesController(BaseTestCase):
"""PreferencesController integration test stubs"""
def test_preferences_preftype_uuid_get(self):
"""Test case for preferences_preftype_uuid_get
get user preferences of specific type (settings, permissions or interests; open only to self)
"""
response = self.client.open(
'//preferences/{preftype}/{uuid}'.format(preftype=PreferenceType(), uuid='uuid_example'),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_preferences_preftype_uuid_put(self):
"""Test case for preferences_preftype_uuid_put
update user preferences by type (open only to self)
"""
query_string = [('preferences', None)]
response = self.client.open(
'//preferences/{preftype}/{uuid}'.format(uuid='uuid_example', preftype=PreferenceType()),
method='PUT',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_preferences_uuid_get(self):
"""Test case for preferences_uuid_get
get all user preferences as an object (open only to self)
"""
response = self.client.open(
'//preferences/{uuid}'.format(uuid='uuid_example'),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| StarcoderdataPython |
1781620 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import os.path as op
import wget
if sys.version_info[0] < 3:
int_types = (int, long)
urlopen = urllib.urlopen
else:
int_types = (int,)
basestring = str
from urllib.request import urlopen
def download_file(src_ftp, dst_file, prt=sys.stdout, loading_bar=True):
"""Download specified file if necessary."""
if os.path.isfile(dst_file):
return
do_gunzip = src_ftp[-3:] == '.gz' and dst_file[-3:] != '.gz'
dst_wget = "{DST}.gz".format(DST=dst_file) if do_gunzip else dst_file
# Write to stderr, not stdout so this message will be seen when running nosetests
wget_msg = "wget.download({SRC} out={DST})\n".format(SRC=src_ftp, DST=dst_wget)
sys.stderr.write(" {WGET}".format(WGET=wget_msg))
if loading_bar:
loading_bar = wget.bar_adaptive
try:
wget.download(src_ftp, out=dst_wget, bar=loading_bar)
if do_gunzip:
if prt is not None:
prt.write(" gunzip {FILE}\n".format(FILE=dst_wget))
gzip_open_to(dst_wget, dst_file)
except IOError as errmsg:
import traceback
traceback.print_exc()
sys.stderr.write("**FATAL cmd: {WGET}".format(WGET=wget_msg))
sys.stderr.write("**FATAL msg: {ERR}".format(ERR=str(errmsg)))
sys.exit(1)
def gzip_open_to(fin_gz, fout):
"""Unzip a file.gz file."""
with gzip.open(fin_gz, 'rb') as zstrm:
with open(fout, 'wb') as ostrm:
ostrm.write(zstrm.read())
assert os.path.isfile(fout), "COULD NOT GUNZIP({G}) TO FILE({F})".format(G=fin_gz, F=fout)
os.remove(fin_gz)
| StarcoderdataPython |
144129 | <reponame>RoyMachineLearning/lofo-importance
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_validate
from tqdm import tqdm_notebook
import multiprocessing
import warnings
from lofo.infer_defaults import infer_model
class LOFOImportance:
def __init__(self, df, features, target,
scoring, model=None, cv=4, n_jobs=None):
df = df.copy()
self.fit_params = {}
if model is None:
model, df, categoricals = infer_model(df, features, target, n_jobs)
self.fit_params["categorical_feature"] = categoricals
n_jobs = 1
self.model = model
self.df = df
self.features = features
self.target = target
self.scoring = scoring
self.cv = cv
self.n_jobs = n_jobs
if self.n_jobs is not None and self.n_jobs > 1:
warning_str = "Warning: If your model is multithreaded, please initialise the number \
of jobs of LOFO to be equal to 1, otherwise you may experience issues."
warnings.warn(warning_str)
def _get_cv_score(self, X, y):
fit_params = self.fit_params.copy()
if "categorical_feature" in self.fit_params:
fit_params["categorical_feature"] = [cat for cat in fit_params["categorical_feature"] if cat in X.columns]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cv_results = cross_validate(self.model, X, y, cv=self.cv, scoring=self.scoring, fit_params=fit_params)
return cv_results['test_score']
def _get_cv_score_parallel(self, feature, feature_list, result_queue, base=False):
test_score = self._get_cv_score(self.df[feature_list], self.df[self.target])
if not base:
result_queue.put((feature, test_score))
return test_score
def get_importance(self):
base_cv_score = self._get_cv_score(self.df[self.features], self.df[self.target])
if self.n_jobs is not None and self.n_jobs > 1:
pool = multiprocessing.Pool(self.n_jobs)
manager = multiprocessing.Manager()
result_queue = manager.Queue()
base_cv_score = self._get_cv_score_parallel('all', self.features, result_queue, True)
for f in self.features:
feature_list = [feature for feature in self.features if feature != f]
pool.apply_async(self._get_cv_score_parallel, (f, feature_list, result_queue))
pool.close()
pool.join()
lofo_cv_scores = [result_queue.get() for _ in range(len(self.features))]
lofo_cv_scores_normalized = np.array([base_cv_score - lofo_cv_score
for f, lofo_cv_score in lofo_cv_scores])
self.features = [score[0] for score in lofo_cv_scores]
else:
lofo_cv_scores = []
for f in tqdm_notebook(self.features):
feature_list = [feature for feature in self.features if feature != f]
lofo_cv_scores.append(self._get_cv_score(self.df[feature_list], self.df[self.target]))
lofo_cv_scores_normalized = np.array([base_cv_score - lofo_cv_score for lofo_cv_score in lofo_cv_scores])
importance_df = pd.DataFrame()
importance_df["feature"] = self.features
importance_df["importance_mean"] = lofo_cv_scores_normalized.mean(axis=1)
importance_df["importance_std"] = lofo_cv_scores_normalized.std(axis=1)
return importance_df.sort_values("importance_mean", ascending=False)
def plot_importance(importance_df, figsize=(8, 8)):
importance_df = importance_df.copy()
importance_df["color"] = (importance_df["importance_mean"] > 0).map({True: 'g', False: 'r'})
importance_df.sort_values("importance_mean", inplace=True)
importance_df.plot(x="feature", y="importance_mean", xerr="importance_std",
kind='barh', color=importance_df["color"], figsize=figsize)
| StarcoderdataPython |
3367083 | <reponame>cedeplar/crawler<filename>tests/test_HouseRequest.py
import responses
from crawler.HouseRequest import HouseRequest
def test_get_request():
house = HouseRequest()
assert house._venda_or_locacao == 'venda'
expected = 'https://www.netimoveis.com/venda/&pagina=10&busca=' \
'{"valorMinimo": null, "valorMaximo": null, "quartos": null, ' \
'"suites": null, "banhos": null, "vagas": null, ' \
'"idadeMinima": null, "areaMinima": null, "areaMaxima": null, ' \
'"bairros": [], "ordenar": null}&quantidadeDeRegistro=50'
assert house._create_get_request(10) == expected
def test_get_req_num_houses():
house = HouseRequest(False)
assert house._venda_or_locacao == 'locacao'
expected = 'https://www.netimoveis.com/quantidade/Resultado/' \
'ResultadoQuantidade/?transacao=locacao&estado=minas-gerais&' \
'cidade=belo-horizonte&tipo=apartamento&' \
'busca={"valorMinimo": null, "valorMaximo": null, ' \
'"quartos": null, "suites": null, "banhos": null, ' \
'"vagas": null, "idadeMinima": null, "areaMinima": null, ' \
'"areaMaxima": null, "bairros": [], "ordenar": null}'
assert house.__url_num_houses('minas-gerais',
'belo-horizonte',
'apartamento') == expected
@responses.activate
def test_total_registros():
house = HouseRequest()
url_num_houses = house.__url_num_houses('minas-gerais',
'belo-horizonte',
'apartamento')
responses.add(responses.GET, url_num_houses,
json={'erro': False, 'mensagem': None,
'totalDeRegistros': 2211, 'unico': None,
'lista': None}, status=200)
assert house._num_houses('minas-gerais',
'belo-horizonte',
'apartamento') == 2211
| StarcoderdataPython |
1705588 | <reponame>kesia-barros/exercicios-python<gh_stars>0
lista = ('Lapis', 1.75,'Borracha', 2.00, "Caderno", 15.00,
"Estojo", 25.00, "Transferidor", 4.20, "Compasso", 9.99,
"Mochila", 120.32, "Canetas", 22.30, "livro", 34.90)
print("-="*20)
print(" LISTAGEM DE PREÇOS")
print("-="*20)
for pos in range(0, len(lista)): # vai de 0 ate o tamanho da lista por isso se usa o len()
if pos % 2 == 0: # se a posição for par
print(f"{lista[pos]:.<30}", end="")
else:
print(f"R${lista[pos]:>7.2f}")
print("-="*20)
#outra forma de fazer:
produtos = ("Lápis", 1.75, "Borracha", 2.00, "Caderno", 15.90, "Estojo", 25.00, "Transferidor", 4.20,
"Compasso", 9.99, "Mochila", 120.32, "Canetas", 22.30, "Livro", 34.90)
print("="*50)
print("{:^50}".format("LISTAGEM DE PREÇOS"))
print("="*50)
for c in range(0, len(produtos), 2):
print(f"{produtos[c]:.<40}", end="")
print(f"R$ {produtos[c+1]:>7.2f}")
print("="*50) | StarcoderdataPython |
1764203 | <reponame>nderkach/mitmproxy<filename>libmproxy/web/__init__.py
from __future__ import absolute_import, print_function
import tornado.ioloop
import tornado.httpserver
from .. import controller, flow
from . import app
class Stop(Exception):
pass
class WebState(flow.State):
def __init__(self):
flow.State.__init__(self)
class Options(object):
attributes = [
"app",
"app_domain",
"app_ip",
"anticache",
"anticomp",
"client_replay",
"eventlog",
"keepserving",
"kill",
"intercept",
"no_server",
"refresh_server_playback",
"rfile",
"scripts",
"showhost",
"replacements",
"rheaders",
"setheaders",
"server_replay",
"stickycookie",
"stickyauth",
"stream_large_bodies",
"verbosity",
"wfile",
"nopop",
"wdebug",
"wport",
"wiface",
]
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
for i in self.attributes:
if not hasattr(self, i):
setattr(self, i, None)
class WebMaster(flow.FlowMaster):
def __init__(self, server, options):
self.options = options
self.app = app.Application(self.options.wdebug)
super(WebMaster, self).__init__(server, WebState())
self.last_log_id = 0
def tick(self):
flow.FlowMaster.tick(self, self.masterq, timeout=0)
def run(self): # pragma: no cover
self.server.start_slave(
controller.Slave,
controller.Channel(self.masterq, self.should_exit)
)
iol = tornado.ioloop.IOLoop.instance()
http_server = tornado.httpserver.HTTPServer(self.app)
http_server.listen(self.options.wport)
tornado.ioloop.PeriodicCallback(self.tick, 5).start()
try:
iol.start()
except (Stop, KeyboardInterrupt):
self.shutdown()
def handle_request(self, f):
app.ClientConnection.broadcast("add_flow", f.get_state(True))
flow.FlowMaster.handle_request(self, f)
if f:
f.reply()
return f
def handle_response(self, f):
app.ClientConnection.broadcast("update_flow", f.get_state(True))
flow.FlowMaster.handle_response(self, f)
if f:
f.reply()
return f
def handle_error(self, f):
app.ClientConnection.broadcast("update_flow", f.get_state(True))
flow.FlowMaster.handle_error(self, f)
return f
def handle_log(self, l):
self.last_log_id += 1
app.ClientConnection.broadcast(
"add_event", {
"id": self.last_log_id,
"message": l.msg,
"level": l.level
}
)
self.add_event(l.msg, l.level)
l.reply()
| StarcoderdataPython |
3367058 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pcf.util.aws.tag_specifications import TagSpecifications
TS_LOL_CATS = TagSpecifications('sad', lol='cats')
TS_CATS_LOL = TagSpecifications('sad', cats='lol')
TS_GG_CATS_LOL_CATS = TagSpecifications('sad', gg='cats', lol='cats')
TS_LOL_CATS_GG_CATS = TagSpecifications('sad', lol='cats', gg='cats')
TS_EMPTY = TagSpecifications('sad')
class TestTagSpecification:
def test_init(self):
tag_specification = TagSpecifications("gg", rofl="lol")
assert tag_specification.tags == {"rofl":"lol"}
assert tag_specification.resource_type == "gg"
############## Test Separator ###############
tag_specification = TagSpecifications("gg", rofl="lol", lol="cats")
assert tag_specification.tags == {"rofl":"lol", "lol": "cats"}
assert tag_specification.resource_type == "gg"
############## Test Separator ###############
tag_specification = TagSpecifications("gg")
assert tag_specification.tags == {}
assert tag_specification.resource_type == "gg"
def test_render(self):
tag_specification = TagSpecifications("gg", rofl="lol")
rendered_tags = tag_specification.render()
assert len(rendered_tags) == 1
assert rendered_tags[0]["ResourceType"] == "gg"
assert len(rendered_tags[0]["Tags"]) == 1
assert rendered_tags[0]["Tags"][0] == {"Key": "rofl", "Value": "lol"}
############## Test Separator ###############
tag_specification = TagSpecifications("gg", rofl="lol", lol="cats")
rendered_tags = tag_specification.render()
allowed_tag_specs = [{"Key": "lol", "Value": "cats"}, {"Key": "rofl", "Value": "lol"}]
assert len(rendered_tags) == 1
assert rendered_tags[0]["ResourceType"] == "gg"
assert len(rendered_tags[0]["Tags"]) == 2
assert rendered_tags[0]["Tags"][0] in allowed_tag_specs
assert rendered_tags[0]["Tags"][1] in allowed_tag_specs
############## Test Separator ###############
tag_specification = TagSpecifications("gg")
rendered_tags = tag_specification.render()
assert len(rendered_tags) == 1
assert rendered_tags[0]["ResourceType"] == "gg"
assert len(rendered_tags[0]["Tags"]) == 0
def test_add_tag(self):
tag_specification = TagSpecifications("gg", rofl="lol")
tag_specification.add_tag("lol", "cats")
assert tag_specification.tags == {"rofl":"lol", "lol": "cats"}
tag_specification.add_tag("lol", "wp")
assert tag_specification.tags == {"rofl":"lol", "lol": "wp"}
def test_add_tags(self):
tag_specification = TagSpecifications("gg", rofl="lol")
tag_specification.add_tags(lol="cats", gg="wp")
assert tag_specification.tags == {"rofl":"lol", "lol": "cats", "gg":"wp"}
tag_specification.add_tags()
assert tag_specification.tags == {"rofl":"lol", "lol": "cats", "gg":"wp"}
tag_specification.add_tags(lol="meow", rofl="copter")
assert tag_specification.tags == {"rofl":"copter", "lol": "meow", "gg":"wp"}
tag_specification.add_tags(**{"gg":"wp", "gl":"hf"})
assert tag_specification.tags == {"rofl":"copter", "lol": "meow", "gg":"wp", "gl":"hf"}
def test_delete_tag(self):
tag_specification = TagSpecifications("gg", rofl="lol")
tag_specification.delete_tag("gl")
assert tag_specification.tags == {"rofl":"lol"}
tag_specification.delete_tag(None)
assert tag_specification.tags == {"rofl":"lol"}
tag_specification.delete_tag("rofl")
assert tag_specification.tags == {}
tag_specification.delete_tag("rofl")
assert tag_specification.tags == {}
def test_delete_tags(self):
tag_specification = TagSpecifications("gg", rofl="lol", lol="cats")
tag_specification.delete_tags()
assert tag_specification.tags == {"rofl":"lol", "lol":"cats"}
tag_specification.delete_tags("gg", "gl")
assert tag_specification.tags == {"rofl":"lol", "lol":"cats"}
tag_specification.delete_tags("rofl", "lol")
assert tag_specification.tags == {}
def test_create_from_aws_response(self):
aws_response_full = {
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'gl',
'Value': 'hf'
},
]
},
]
}
tag_specification = TagSpecifications.create_from_aws_response(aws_response_full)
assert tag_specification.resource_type == 'instance'
assert tag_specification.tags == {'gl': 'hf'}
############## Test Separator ###############
aws_response_partial = [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'gg',
'Value': 'wp'
},
]
},
]
tag_specification = TagSpecifications.create_from_aws_response(aws_response_partial)
assert tag_specification.resource_type == 'instance'
assert tag_specification.tags == {'gg': 'wp'}
def test_subtract_empty():
assert (TS_CATS_LOL - TS_EMPTY).tags == { 'cats': 'lol' }
assert (TS_GG_CATS_LOL_CATS - TS_EMPTY).tags == { 'gg': 'cats', 'lol': 'cats'}
assert (TS_EMPTY - TS_CATS_LOL).tags == {}
assert (TS_EMPTY - TS_GG_CATS_LOL_CATS).tags == {}
def test_subtract_unordered():
assert (TS_GG_CATS_LOL_CATS - TS_LOL_CATS_GG_CATS).tags == {}
assert (TS_LOL_CATS_GG_CATS - TS_GG_CATS_LOL_CATS).tags == {}
def test_subract_one_tag():
assert (TS_GG_CATS_LOL_CATS - TS_LOL_CATS).tags == { 'gg': 'cats' }
assert (TS_LOL_CATS - TS_GG_CATS_LOL_CATS).tags == {}
def test_subtract_disjoint():
assert (TS_GG_CATS_LOL_CATS - TS_CATS_LOL).tags == { 'gg': 'cats', 'lol': 'cats' }
assert (TS_CATS_LOL - TS_GG_CATS_LOL_CATS).tags == { 'cats': 'lol' }
| StarcoderdataPython |
5721 | <filename>ics/mergeGatingSets.py
#!/usr/bin/env python
"""
Usage examples:
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv
sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv"
sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv"
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
To delete all tmp files use:
find . -name \merged_tmp*.feather -type f -delete
"""
def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False):
out = []
batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))]
if testbatch:
batchList = batchList[:1]
matchStr = 'gs_*.feather'
if ncpus > 1 and _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
pool=Pool(processes=ncpus))
else:
if _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
parallel=False)
else:
func = partial(mergeSamples,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
matchStr=matchStr,
test=testsamples,
metaCols=metaCols,
filters=filters)
res = list(map(func, batchList))
outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather))
return outFilename
def testMatching(dataFolder):
out = []
for bf in os.listdir(dataFolder):
batchFolder = opj(dataFolder, bf)
if os.path.isdir(opj(dataFolder, bf)):
featherLU = matchSamples(batchFolder, test=False)
tmp = pd.Series(featherLU).to_frame()
tmp.loc[:, 'batch'] = bf
tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf)
out.append(tmp)
return pd.concat(out, axis=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract features and merge batches into one CSV.')
parser.add_argument('--folder', type=str,
help='Data folder containing all batch folders.',
default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata')
parser.add_argument('--function', type=str,
help='Name of extraction to apply ("functions")',
default='functions')
parser.add_argument('--subsets', type=str,
help='Filename listing subsets for analysis.',
default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv')
parser.add_argument('--out', type=str,
help='Output filename for CSV.',
default='merged_out.csv')
parser.add_argument('--ncpus', type=int,
help='Number of CPUs/cores to use for parallelization.',
default=1)
parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.')
parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.')
parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.')
parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV')
parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com')
args = parser.parse_args()
try:
import parmap
from multiprocessing import Pool
_PARMAP = True
except:
_PARMAP = False
print('Could not find package "parmap", parallelization not enabled.')
import itertools
import pandas as pd
import numpy as np
from os.path import join as opj
import os
from functools import partial
import time
import sys
import feather
"""Make sure the utils are on path before importing"""
sys.path.append(args.utils)
# from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples
from ics import *
if args.matchingonly:
metaDf = testMatching(args.folder)
metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out))
print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out))
else:
subsets, markers, functions, exclude = parseSubsets(args.subsets)
features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=5)),
'bool_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=0)),
'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets,
functions=functions,
markers=markers,
compressions=[('ALL', 2),
(['IFNg','IL2', 'TNFa'], 2)])),
'functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
compressions=[('ALL', 1),
('ALL', 2),
(['IFNg','IL2', 'TNFa'], 1),
(['IFNg','IL2', 'TNFa'], 2),
(['IFNg','IL2'], 1)])),
'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))}
extractionFunc, extractionKwargs = features[args.function]
if args.testbatch:
print('Test: processing samples from one batch')
if args.testsamples:
print('Test: processing two samples per batch')
outFile = opj(args.folder, args.out)
if args.feather:
outFile = outFile.replace('.csv', '.feather')
wrote = mergeBatches(args.folder,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
testsamples=args.testsamples,
testbatch=args.testbatch,
outFile=outFile,
metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'],
filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]},
useFeather=int(args.feather),
ncpus=args.ncpus)
if wrote == outFile:
print('Wrote extracted data to %s.' % outFile)
else:
print('Error writing file to disk: %s' % wrote) | StarcoderdataPython |
3386860 | <reponame>Sunfacing/sc-projects
"""
stanCode Breakout Project
Adapted from <NAME>'s Breakout by
<NAME>, <NAME>, <NAME>,
and <NAME>
This class provides attributes of Ball / Bricks / Paddle
and necessary methods that help run the game
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
# CONSTANT for creating Bricks / Paddle / Balls / Ball's Speed
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 3 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
# global variables for creating bricks
ball_color = ['red', 'red', 'orange', 'orange', 'yellow', 'yellow', 'green', 'green', 'blue', 'blue']
ball_color_index = 0 # The index of ball_color
brick_x = 0
brick_y = 0
class BreakoutGraphics:
def __init__(self, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
"""Create a window with bricks, a paddle, a ball and its speed"""
# Create a graphical window, with some extra space
window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
# Create a paddle
self.paddle = GRect(paddle_width, paddle_height, x=(window_width - paddle_width) / 2, y=window_height - paddle_offset)
self.paddle.filled = True
self.paddle.fill_color = 'black'
self.paddle.color = 'black'
self.window.add(self.paddle)
# Center a filled ball in the graphical window
self.ball = GOval(ball_radius * 2, ball_radius * 2, x=(window_width - ball_radius) / 2, y=(window_height - ball_radius) / 2)
self.ball.filled = True
self.ball.fill_color = 'black'
self.ball.color = 'black'
self.window.add(self.ball)
# Default initial velocity for the ball
self.__dx = random.randint(1, MAX_X_SPEED)
if random.random() > 0.5:
self.__dx = -self.__dx
self.__dy = INITIAL_Y_SPEED
# Initialize our mouse listeners
self.game_on = False
onmouseclicked(self.game_initiated)
onmousemoved(self.paddle_position)
# Draw bricks
global brick_x, brick_y, ball_color_index
for i in range(brick_rows):
for j in range(brick_cols):
self.brick = GRect(brick_width, brick_height)
self.brick.filled = True
self.brick.fill_color = ball_color[ball_color_index]
self.brick.color = ball_color[ball_color_index]
self.window.add(self.brick, x=brick_x, y=brick_y)
brick_x = brick_x + brick_width + brick_spacing
ball_color_index += 1
brick_x = 0
brick_y = brick_y + brick_height + brick_spacing
# Total number of bricks used for counting how many bricks player should remove
self.brick_number = brick_rows * brick_cols
def paddle_position(self, mouse):
"""Set the paddle's position"""
if self.paddle.width / 2 <= mouse.x <= self.window.width - self.paddle.width / 2:
self.paddle.x = mouse.x - self.paddle.width / 2
self.paddle.y = self.window.height - BRICK_OFFSET
def game_initiated(self, mouse):
"""Check if game starts and used for preventing onmouseclicked() function"""
self.game_on = True
def dx_getter(self):
"""Get the ball's horizontal speed"""
return self.__dx
def dy_getter(self):
"""Get the ball's vertical speed"""
return self.__dy
def reset_ball(self):
"""Everytime the ball falls under the bottom the the screen, it will spawn at the original place"""
self.ball.x = (self.window.width - BALL_RADIUS) / 2
self.ball.y = (self.window.height - BALL_RADIUS) / 2
self.__dx = random.randint(1, MAX_X_SPEED)
if random.random() > 0.5:
self.__dx = -self.__dx
self.__dy = INITIAL_Y_SPEED
def brick_detect(self):
"""Use 4 angles of the ball to detect if touching an object"""
detector_upper_left = self.window.get_object_at(self.ball.x, self.ball.y)
detector_upper_right = self.window.get_object_at(self.ball.x + BALL_RADIUS * 2, self.ball.y)
detector_lower_left = self.window.get_object_at(self.ball.x, self.ball.y + BALL_RADIUS * 2)
detector_lower_right = self.window.get_object_at(self.ball.x + BALL_RADIUS * 2, self.ball.y + BALL_RADIUS * 2)
if detector_upper_left is not None:
if detector_upper_left is self.paddle:
return 0
else:
self.window.remove(detector_upper_left)
return True
elif detector_upper_right is not None:
if detector_upper_right is self.paddle:
return 0
else:
self.window.remove(detector_upper_right)
return True
elif detector_lower_left is not None:
if detector_lower_left is self.paddle:
return 0
else:
self.window.remove(detector_lower_left)
return True
elif detector_lower_right is not None:
if detector_lower_right is self.paddle:
return 0
else:
self.window.remove(detector_lower_right)
return True
def game_winning_screen(self, width=BRICK_COLS * (BRICK_WIDTH + BRICK_SPACING) - BRICK_SPACING, height=BRICK_OFFSET + 3 * (BRICK_ROWS * (BRICK_HEIGHT + BRICK_SPACING) - BRICK_SPACING)):
"""Show "You Win!!" after successfully removing all bricks"""
game_winning_screen = GRect(width, height)
game_winning_screen.filled = True
game_winning_screen.fill_color = 'white'
game_winning_screen.color = 'white'
self.window.add(game_winning_screen)
game_winning_text = GLabel('You Win!!')
game_winning_text.font = 'Helvetica' + '-' + str(BRICK_COLS * 5)
self.window.add(game_winning_text, (width - game_winning_text.width) / 2, height / 2)
def game_over_screen(self, width=BRICK_COLS * (BRICK_WIDTH + BRICK_SPACING) - BRICK_SPACING, height=BRICK_OFFSET + 3 * (BRICK_ROWS * (BRICK_HEIGHT + BRICK_SPACING) - BRICK_SPACING)):
"""Show "Game Over" if player runs out of lives"""
game_over_background = GRect(width, height)
game_over_background.filled = True
game_over_background.fill_color = 'white'
game_over_background.color = 'white'
self.window.add(game_over_background)
game_over_text = GLabel('Game Over')
game_over_text.font = 'Helvetica' + '-' + str(BRICK_COLS * 5)
self.window.add(game_over_text, (width - game_over_text.width) / 2, height / 2) | StarcoderdataPython |
4824709 | <reponame>sjg20/ec
#!/usr/bin/python3.6
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import os
import subprocess
import tempfile
# SDK install path
SDK_INSTALL_PATH = '/opt/zephyr-sdk'
# SDK version
SDK_VERSION = '0.11.4'
# SDK installer URL
SDK_INSTALLER_URL = (
'https://github.com/zephyrproject-rtos/sdk-ng/releases' +
'/download/v{version}/zephyr-sdk-{version}-setup.run'
).format(version=SDK_VERSION)
# SDK installer expected MD5 checksum
SDK_INSTALLER_MD5 = 'ca6cc42573f6548cf936b2a60df9a125'
def verify_zephyr_sdk():
"""Verify that the Zephyr SDK is installed.
Returns:
True if the Zephyr SDK matching the version specified in
SDK_VERSION is believed to be installed.
"""
try:
with open('%s/sdk_version' % SDK_INSTALL_PATH) as sdk_version:
current_version = sdk_version.read().replace('\n', '')
return current_version == SDK_VERSION
except IOError as e:
return False
def install_zephyr_sdk(installer_file_fd, installer_file_name):
"""Install the Zephyr SDK using the provided installer file.
Args:
installer_file_fd: File descriptor for the installer file.
installer_file_name: File name for the installer file.
"""
# Download the installer
print('Downloading installer from: %s' % SDK_INSTALLER_URL)
subprocess.run(['wget', '-nv', '--show-progress', '-O', installer_file_name,
SDK_INSTALLER_URL])
os.close(installer_file_fd)
# Validate the installer
print('Validating installer...', end='')
with open(installer_file_name, 'rb') as installer_file:
data = installer_file.read()
md5_checksum = hashlib.md5(data).hexdigest()
if not md5_checksum == SDK_INSTALLER_MD5:
print('\nFailed to verify installer with MD5: %s' % md5_checksum)
exit(1)
print('SUCCESS')
# Run the installer
os.chmod(installer_file_name, 0o744)
subprocess.run([installer_file_name, '--', '-y', '-d', SDK_INSTALL_PATH])
def main():
# Only run this if the Zephyr SDK isn't already installed or if the version
# doesn't match.
if verify_zephyr_sdk():
print('Zephyr SDK already found in %s' % SDK_INSTALL_PATH)
exit(0)
# Create the install directory
os.makedirs(SDK_INSTALL_PATH, exist_ok=True)
# Create a temporary file to hold the installer
installer_file_fd, installer_file_name = tempfile.mkstemp(
suffix='.run', prefix='zephyr-sdk-setup-', text=False)
try:
install_zephyr_sdk(installer_file_fd, installer_file_name)
finally:
os.remove(installer_file_name)
# Exit with 1 and print error if verify_zephyr_sdk returns False
if not verify_zephyr_sdk():
print("Failed to verify SDK installation")
exit(1)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3342109 | from flask_login import UserMixin
from datetime import datetime
from app import db, login
from werkzeug.security import generate_password_hash, check_password_hash
import uuid
like = db.Table('like',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id'))
)
share = db.Table('share',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id')),
db.Column('created', db.DateTime, default=datetime.utcnow)
)
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id')),
db.Column('timestamp', db.DateTime, default=datetime.utcnow)
)
def generate_uuid():
return str(uuid.uuid4())
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String, index=True, unique=True, default=generate_uuid)
username = db.Column(db.String(64), index=True, unique=True) # used for the login manager
display_name = db.Column(db.String(64), index=True, unique=True) # used for displaying the username on pages
first_name = db.Column(db.String(64), nullable=False)
middle_name = db.Column(db.String(64), nullable=True)
last_name = db.Column(db.String(64), nullable=False)
email = db.Column(db.String(128), index=True, unique=True)
password = db.Column(db.String(164), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
about_me = db.Column(db.String(280), nullable=True)
last_seen = db.Column(db.DateTime)
avatar = db.Column(db.String(64), nullable=True)
background_image = db.Column(db.String(64), nullable=True)
posts = db.relationship('Post', backref='author', lazy='dynamic')
files = db.relationship('File', backref='user', lazy='dynamic')
followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
shared_posts = db.relationship('Post', secondary='share', lazy='dynamic',
backref=db.backref('shares', lazy='dynamic'))
sent_messages = db.relationship('Message', foreign_keys='Message.sender_id', backref='sender', lazy='dynamic')
received_messages = db.relationship('Message', foreign_keys='Message.recipient_id', backref='recipient', lazy='dynamic')
last_message_read_time = db.Column(db.DateTime)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def get_full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
def is_following(self, user):
return self.followed.filter(followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
followed = Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(
followers.c.follower_id == self.id)
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.created_at.desc())
def new_messages(self):
last_read_time = self.last_message_read_time or datetime(1900, 1, 1)
return Message.query.filter_by(recipient=self).filter(Message.created_at > last_read_time).count()
def share_post(self, post):
if not self.is_sharing(post):
self.shared_posts.append(post)
def unshare_post(self, post):
if self.is_sharing(post):
self.shared_posts.remove(post)
def is_sharing(self, post):
return self.shared_posts.filter(share.c.post_id == post.id).count() > 0
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class Post(db.Model):
__tablename__ = 'post'
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String, index=True, unique=True, default=generate_uuid)
title = db.Column(db.String(64), nullable=False)
body = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
created_at = db.Column(db.DateTime, default=datetime.utcnow)
likes = db.relationship('User', secondary=like, lazy='dynamic',
backref=db.backref('likes', lazy='dynamic'))
def __repr__(self):
return '<Post {}>'.format(self.title)
def is_liked(self, user):
return self.likes.filter(like.c.user_id == user.id).count() > 0
class File(db.Model):
__tablename__ = 'file'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(120), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
url = db.Column(db.String(120), nullable=True)
file_author = db.Column(db.String(120), nullable=True)
file_author_url = db.Column(db.String(120), nullable=True)
def __repr__(self):
return '<File {}>'.format(self.name)
class Message(db.Model):
__tablename__ = 'message'
id = db.Column(db.Integer, primary_key=True)
sender_id = db.Column(db.Integer, db.ForeignKey('user.id'))
recipient_id = db.Column(db.Integer, db.ForeignKey('user.id'))
body = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<Message {}>'.format(self.body) | StarcoderdataPython |
1779119 | <reponame>ch1huizong/learning
#! /usr/bin/env python3
# -*-coding:UTF-8 -*-
# @Time : 2019/01/05 11:31:20
# @Author : che
# @Email : <EMAIL>
import time
class Timer(object):
def __init__(self, func=time.perf_counter):
self.elapsed = 0.0
self._func = func
self._start = None
def start(self):
if self._start is not None:
raise RuntimeError('Already started')
self._start = self._func()
def stop(self):
if self._start is None:
raise RuntimeError('Not started')
end = self._func()
self.elapsed += end - self._start # 总耗时
self._start = None
def reset(self):
self.elapsed = 0.0
@property
def running(self):
return self._start is not None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def countdown(n):
while n > 0:
n -= 1
if __name__ == '__main__':
with Timer() as t2:
countdown(100000000)
print(t2.elapsed)
| StarcoderdataPython |
140329 | from RappCloud.Objects import (
File,
Payload)
from Cloud import (
CloudMsg,
CloudRequest,
CloudResponse)
class SpeechRecognitionGoogle(CloudMsg):
""" Speech Recognition Google Cloud Message object """
class Request(CloudRequest):
""" Speech Recognition Google Cloud Request object.
SpeechRecognitionGoogle.Request
"""
def __init__(self, **kwargs):
"""!
Constructor
@param **kwargs - Keyword arguments. Apply values to the request attributes.
- @ref audio_source
- @ref audiofile
- @ref language
"""
## Language to use for recognition
self.language = ''
## Audio source data format. e.g "nao_wav_1_ch".
self.audio_source = ''
## Path to the audio file.
self.audiofile = ''
super(SpeechRecognitionGoogle.Request, self).__init__(**kwargs)
def make_payload(self):
""" Create and return the Payload of the Request. """
return Payload(
language=self.language,
audio_source = self.audio_source)
def make_files(self):
""" Create and return Array of File objects of the Request. """
return [File(self.audiofile, postfield='file')]
class Response(CloudResponse):
""" Speech Recognition Google Cloud Response object.
SpeechRecognitionGoogle.Response
"""
def __init__(self, **kwargs):
"""!
Constructor
@param **kwargs - Keyword arguments. Apply values to the request attributes.
- @ref error
- @ref words
- @ref alternatives
"""
## An array that contains the "words-found" with highest confidence.
self.words = []
## Alternative sentences.
# e.g. [['send', 'mail'], ['send', 'email'], ['set', 'mail']...]
self.alternatives = []
## Error message.
self.error = ''
super(SpeechRecognitionGoogle.Response, self).__init__(**kwargs)
def __init__(self, **kwargs):
"""!
Constructor
@param **kwargs - Keyword arguments. Apply values to the request attributes.
- @ref Request.audio_source
- @ref Request.audiofile
- @ref Request.language
"""
# Create and hold the Request object for this CloudMsg
self.req = SpeechRecognitionGoogle.Request()
# Create and hold the Response object for this CloudMsg
self.resp = SpeechRecognitionGoogle.Response()
super(SpeechRecognitionGoogle, self).__init__(
svcname='speech_detection_google', **kwargs)
| StarcoderdataPython |
150870 | a = 5
print(a)
| StarcoderdataPython |
13771 | <filename>src/data_module.py
# Created by xieenning at 2020/10/19
from argparse import ArgumentParser, Namespace
from typing import Optional, Union, List
from pytorch_lightning import LightningDataModule
from transformers import BertTokenizer
from transformers import ElectraTokenizer
from transformers.utils import logging
import torch
from torch.utils.data import DataLoader, TensorDataset
from src.data_processor import SemanticMatchingProcessor, convert_examples_to_features
logger = logging.get_logger(__name__)
class SemanticMatchingDataModule(LightningDataModule):
def __init__(self, hparams: Namespace):
super().__init__()
self.data_path = hparams.data_path
self.model_name_or_path = hparams.model_name_or_path
self.max_length = hparams.max_length
self.train_batch_size = hparams.train_batch_size
self.val_batch_size = hparams.val_batch_size
self.loader_workers = hparams.loader_workers
self.tokenizer = ElectraTokenizer.from_pretrained(hparams.model_name_or_path)
self.processor = SemanticMatchingProcessor()
self.train_features = None
self.val_features = None
self.train_dataset = None
self.val_dataset = None
def prepare_data(self, *args, **kwargs):
train_examples = self.processor.get_train_examples(self.data_path)
self.train_features = convert_examples_to_features(train_examples,
self.tokenizer,
label_list=self.processor.get_labels(),
max_length=self.max_length)
val_examples = self.processor.get_dev_examples(self.data_path)
self.val_features = convert_examples_to_features(val_examples,
self.tokenizer,
label_list=self.processor.get_labels(),
max_length=self.max_length)
logger.info("`prepare_data` finished!")
@staticmethod
def generate_dataset(features):
return TensorDataset(
torch.tensor([f.input_ids for f in features], dtype=torch.long),
torch.tensor([f.attention_mask for f in features], dtype=torch.long),
torch.tensor([f.token_type_ids for f in features], dtype=torch.long),
torch.tensor([f.label for f in features], dtype=torch.long)
)
def setup(self, stage: Optional[str] = None):
self.train_dataset = self.generate_dataset(self.train_features)
self.val_dataset = self.generate_dataset(self.val_features)
logger.info("`setup` finished!")
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(self.train_dataset, shuffle=True, batch_size=self.train_batch_size,
num_workers=self.loader_workers)
def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.val_dataset, batch_size=self.val_batch_size, num_workers=self.loader_workers)
def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.val_dataset, batch_size=self.val_batch_size, num_workers=self.loader_workers)
@classmethod
def add_data_specific_args(
cls, parser: ArgumentParser
) -> ArgumentParser:
""" Parser for Estimator specific arguments/hyperparameters.
:param parser: argparse.ArgumentParser
Returns:
- updated parser
"""
parser.add_argument(
"--data_path",
default="/Data/enningxie/Codes/lightning-semantic-matching/data",
type=str
)
parser.add_argument(
"--max_length",
default=64,
type=int
)
parser.add_argument(
"--train_batch_size",
default=64,
type=int
)
parser.add_argument(
"--val_batch_size",
default=64,
type=int
)
parser.add_argument(
"--loader_workers",
default=64,
type=int,
help="How many subprocesses to use for data loading. 0 means that \
the data will be loaded in the main process.",
)
return parser
if __name__ == '__main__':
tmp_parser = ArgumentParser()
tmp_parser.add_argument(
"--model_name_or_path",
type=str,
default="/Data/public/pretrained_models/pytorch/chinese-bert-wwm-ext"
)
tmp_parser = SemanticMatchingDataModule.add_data_specific_args(tmp_parser)
hparams = tmp_parser.parse_args()
tmp_data_module = SemanticMatchingDataModule(hparams)
tmp_data_module.prepare_data()
tmp_data_module.setup()
train_dataloader = tmp_data_module.val_dataloader()
for batch in train_dataloader:
print(type(batch))
print(batch)
print('break point.')
print('break point.')
| StarcoderdataPython |
1784563 | # NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2021 Neongecko.com Inc.
# BSD-3
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
from time import sleep
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from neon_api_proxy.cached_api import CachedAPI
class TestCachedAPI(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.api = CachedAPI("test")
def test_cached_request(self):
url = "https://neon.ai"
res = self.api.session.get(url, timeout=10)
cached = self.api.session.get(url, timeout=10)
self.assertTrue(cached.from_cache)
self.assertEqual(res.content, cached.content)
def test_request_no_cache(self):
url = "https://neon.ai"
res = self.api.session.get(url, timeout=10)
with self.api.session.cache_disabled():
cached = self.api.session.get(url, timeout=10)
self.assertFalse(cached.from_cache)
self.assertEqual(res.content, cached.content)
def test_get_with_cache_timeout(self):
url = "https://chatbotsforum.org"
res = self.api.get_with_cache_timeout(url, 5)
self.assertFalse(res.from_cache)
cached = self.api.get_with_cache_timeout(url, 15)
self.assertTrue(cached.from_cache)
self.assertEqual(res.content, cached.content)
sleep(5)
expired = self.api.get_with_cache_timeout(url)
self.assertFalse(expired.from_cache)
def test_get_bypass_cache(self):
url = "https://klat.com"
res = self.api.get_with_cache_timeout(url)
self.assertFalse(res.from_cache)
cached = self.api.get_with_cache_timeout(url)
self.assertTrue(cached.from_cache)
no_cache = self.api.get_bypass_cache(url)
self.assertFalse(no_cache.from_cache)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
196275 | import operator
import re
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusError import (
KiwoomOpenApiPlusError,
KiwoomOpenApiPlusNegativeReturnCodeError,
)
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusTrInfo import (
KiwoomOpenApiPlusTrInfo,
)
from koapy.backend.kiwoom_open_api_plus.grpc import KiwoomOpenApiPlusService_pb2
from koapy.backend.kiwoom_open_api_plus.grpc.event.KiwoomOpenApiPlusEventHandlerForGrpc import (
KiwoomOpenApiPlusEventHandlerForGrpc,
)
from koapy.utils.logging.Logging import Logging
class KiwoomOpenApiPlusTrEventHandler(KiwoomOpenApiPlusEventHandlerForGrpc, Logging):
def __init__(self, control, request, context, screen_manager):
super().__init__(control, context)
self._request = request
self._screen_manager = screen_manager
self._rqname = request.request_name
self._trcode = request.transaction_code
self._scrnno = request.screen_no
self._inputs = request.inputs
self._trinfo = KiwoomOpenApiPlusTrInfo.get_trinfo_by_code(self._trcode)
if self._trinfo is None:
self.logger.error("Cannot find names for trcode %s", self._trinfo)
self._input_code = self._inputs.get("종목코드")
self._single_names = self._trinfo.get_single_output_names()
self._multi_names = self._trinfo.get_multi_output_names()
stop_condition = request.stop_condition
stop_condition_is_valid = (
stop_condition is not None
and stop_condition.name is not None
and len(stop_condition.name) > 0
and (
stop_condition.name in self._multi_names
or stop_condition.name in self._single_names
)
)
if stop_condition_is_valid:
comparator = {
KiwoomOpenApiPlusService_pb2.TransactionStopConditionCompartor.LESS_THAN_OR_EQUAL_TO: operator.le,
KiwoomOpenApiPlusService_pb2.TransactionStopConditionCompartor.LESS_THAN: operator.lt,
KiwoomOpenApiPlusService_pb2.TransactionStopConditionCompartor.GREATER_THAN_OR_EQUAL_TO: operator.ge,
KiwoomOpenApiPlusService_pb2.TransactionStopConditionCompartor.GREATER_THAN: operator.gt,
KiwoomOpenApiPlusService_pb2.TransactionStopConditionCompartor.EQUAL_TO: operator.eq,
KiwoomOpenApiPlusService_pb2.TransactionStopConditionCompartor.NOT_EQUAL_TO: operator.ne,
}.get(stop_condition.comparator, operator.le)
if stop_condition.name in self._multi_names:
column_index_to_check = self._multi_names.index(stop_condition.name)
else:
# if it does not have multi_names, it may use single_names instead.
column_index_to_check = self._single_names.index(stop_condition.name)
def is_stop_condition(row):
return comparator(row[column_index_to_check], stop_condition.value)
else:
def is_stop_condition(_):
return False
self._is_stop_condition = is_stop_condition
def on_enter(self):
self._scrnno = self._screen_manager.borrow_screen(self._scrnno)
self.add_callback(self._screen_manager.return_screen, self._scrnno)
self.add_callback(self.control.DisconnectRealData, self._scrnno)
KiwoomOpenApiPlusError.try_or_raise(
self.control.RateLimitedCommRqData.async_call(
self._rqname, self._trcode, 0, self._scrnno, self._inputs
),
except_callback=self.observer.on_error,
)
def OnReceiveTrData(
self,
scrnno,
rqname,
trcode,
recordname,
prevnext,
datalength,
errorcode,
message,
splmmsg,
):
if (rqname, trcode, scrnno) == (self._rqname, self._trcode, self._scrnno):
response = KiwoomOpenApiPlusService_pb2.ListenResponse()
response.name = "OnReceiveTrData" # pylint: disable=no-member
response.arguments.add().string_value = scrnno # pylint: disable=no-member
response.arguments.add().string_value = rqname # pylint: disable=no-member
response.arguments.add().string_value = trcode # pylint: disable=no-member
response.arguments.add().string_value = (
recordname # pylint: disable=no-member
)
response.arguments.add().string_value = (
prevnext # pylint: disable=no-member
)
should_stop = prevnext in ["", "0"]
repeat_cnt = self.control.GetRepeatCnt(trcode, recordname)
if repeat_cnt > 0:
if len(self._multi_names) == 0:
self.logger.warning(
"Repeat count greater than 0, but no multi data names available, fallback to sigle data names"
)
self._single_names, self._multi_names = (
self._multi_names,
self._single_names,
)
if len(self._multi_names) > 0:
rows = [
[
self.control.GetCommData(
trcode, recordname, i, name
).strip()
for name in self._multi_names
]
for i in range(repeat_cnt)
]
response.multi_data.names.extend(
self._multi_names
) # pylint: disable=no-member
for row in rows:
if self._is_stop_condition(row):
should_stop = True
break
response.multi_data.values.add().values.extend(
row
) # pylint: disable=no-member
if len(self._single_names) > 0:
values = [
self.control.GetCommData(trcode, recordname, 0, name).strip()
for name in self._single_names
]
response.single_data.names.extend(
self._single_names
) # pylint: disable=no-member
response.single_data.values.extend(values) # pylint: disable=no-member
self.observer.on_next(response)
if should_stop:
self.observer.on_completed()
else:
KiwoomOpenApiPlusError.try_or_raise(
self.control.RateLimitedCommRqData.async_call(
rqname, trcode, int(prevnext), scrnno, self._inputs
),
except_callback=self.observer.on_error,
)
def OnEventConnect(self, errcode):
if errcode < 0:
error = KiwoomOpenApiPlusNegativeReturnCodeError(errcode)
self.observer.on_error(error)
return
def OnReceiveMsg(self, scrnno, rqname, trcode, msg):
if (rqname, trcode, scrnno) == (self._rqname, self._trcode, self._scrnno):
msg_pattern = r"^[^(]+\((-?[0-9]+)\)$"
match = re.match(msg_pattern, msg)
if match:
errcode = match.group(1)
errcode = int(errcode)
error = KiwoomOpenApiPlusNegativeReturnCodeError(errcode, msg)
self.observer.on_error(error)
return
| StarcoderdataPython |
3377036 | <filename>tools/find_missing.py
# -*- coding: utf-8 -*-
# find missing glyphs needed to render given txt's
import json
def find_missing(pths):
missing = {}
for p in pths:
txt = open(p,'r').read()
js = json.loads(open("./dist/min-trad-compiled.json",'r').read())
for c in txt:
if c not in js and 0x4e00 <= ord(c) <= 0x9fef:
if c not in missing:
missing[c] = 0
missing[c]+=1
sb = sorted([(k,missing[k]) for k in missing if missing[k] > 10],key=lambda k: -k[1])
print(sb)
print(len(sb),len(missing),float(sum(s[1] for s in sb))/sum(missing[k] for k in missing))
return missing
find_missing([
u"../txt/彷徨朝花夕拾故事新编.txt",
u"../txt/唐诗宋词三百首.txt",
u"../txt/史记.txt",
u"../txt/古文观止.txt",
u"../txt/红楼梦.txt",
u"../txt/雅舍小品.txt",
u"../txt/子不语.txt",
u"../txt/闲情偶寄.txt",
u"../txt/六十种曲/還魂記.txt",
]) | StarcoderdataPython |
1667159 | <gh_stars>0
# initialise relevant variables
numbers = []
count = 0
total = 0
lowest = None
highest = None
TEMPLATE = 'count = {0} sum = {1} lowest = {2} highest = {3} mean = {4}'
# while loop will continue to seek new inputs until the user enters Enter, at
# which point loop terminates
while True:
new_input = input('enter a number or Enter to finish: ')
if new_input != '':
# attempt to convert user input to a number. If input is not
# convertible to a number, catch the exception and warn the user
try:
new_number = float(new_input)
except ValueError:
print('invalid input!')
numbers.append(new_number)
count += 1
total += new_number
if lowest is None:
lowest = new_number
highest = new_number
elif new_number < lowest:
lowest = new_number
elif new_number > highest:
highest = new_number
else:
break
# calculate the mean of the numbers and print results
try:
mean = total / count
print('numbers: ', numbers)
print(TEMPLATE.format(count, total, lowest, highest, mean))
except ZeroDivisionError:
print('no numbers were entered')
| StarcoderdataPython |
3268139 | <filename>hackerrank/BreakingTheRecords.py
import os
def breakingRecords(scores):
h = 0
l = 0
min_score = scores[0]
max_score = scores[0]
for s in scores[1:]:
if s > max_score:
max_score = s
h += 1
if s < min_score:
min_score = s
l += 1
return h, l
if __name__ == "__main__":
fptr = open(os.environ["OUTPUT_PATH"], "w")
n = int(input())
scores = list(map(int, input().rstrip().split()))
result = breakingRecords(scores)
fptr.write(" ".join(map(str, result)))
fptr.write("\n")
fptr.close()
| StarcoderdataPython |
3382142 | <reponame>CCSS-Utrecht/ninolearn
#from ninolearn.IO import read_raw
import xarray as xr
import numpy as np
import iris
import iris.analysis
from iris.coords import DimCoord
from iris.cube import Cube
def to2_5x2_5(data):
"""
Regrids data the 2.5x2.5 from the NCEP reanalysis data set.
:param data: An xarray DataArray.
"""
data_iris = data.to_iris()
latitude = DimCoord(np.arange(-90, 90.01, 2.5),
standard_name='latitude',
units='degrees')
longitude = DimCoord(np.arange(0, 359.99, 2.5),
standard_name='longitude',
units='degrees')
cube = Cube(np.zeros((73, 144), np.float32),
dim_coords_and_dims=[(latitude, 0),
(longitude, 1)])
scheme = iris.analysis.Linear(extrapolation_mode='extrapolate')
data_new = data_iris.regrid(cube,scheme)
data_regridded = xr.DataArray.from_iris(data_new)
return data_regridded | StarcoderdataPython |
189030 |
import torch
from .regularizers import overlapping_on_depths
from ..networks.primitive_parameters import PrimitiveParameters
from ..primitives import get_implicit_surface, _compute_accuracy_and_recall
from ..utils.stats_logger import StatsLogger
from ..utils.value_registry import ValueRegistry
from ..utils.metrics import compute_iou
def cluster_coverage_with_reconstruction(prim_params, y_target, options):
def _coverage_inner(p, pparent, X, labels):
M = p.n_primitives # number of primitives
B, N, _ = X.shape
translations = p.translations_r
splits = 2 if M > 1 else 1
assert labels.shape == (B, N, M//splits)
# First assign points from the labels to each of the siblings
dists = ((X.unsqueeze(2) - translations.unsqueeze(1))**2).sum(-1)
assert dists.shape == (B, N, M)
if M > 1:
assign_left = (dists[:, :, ::2] < dists[:, :, 1::2]).float()
assign_right = 1-assign_left
assignments = torch.stack([
assign_left * labels,
assign_right * labels
], dim=-1).view(B, N, M)
else:
assignments = labels
assert assignments.shape == (B, N, M)
assert assignments.sum(-1).max().item() == 1
# Now compute the sum of squared distances as the loss
loss = (dists * assignments).sum(-1).mean()
return loss, assignments
def _fit_shape_inner(pr, X, X_labels, X_weights):
M = pr.n_primitives # number of primitives
B, N, _ = X.shape
assert X_labels.shape == (B, N, 1)
assert X_weights.shape == (B, N, 1)
translations = pr.translations_r
rotations = pr.rotations_r
alphas = pr.sizes_r
epsilons = pr.shapes_r
sharpness = pr.sharpness_r
# Compute the implicit surface function for each primitive
F, _ = get_implicit_surface(
X, translations, rotations, alphas, epsilons, sharpness
)
assert F.shape == (B, N, M)
f = torch.max(F, dim=-1, keepdim=True)[0]
sm = F.new_tensor(1e-6)
t1 = torch.log(torch.max(f, sm))
t2 = torch.log(torch.max(1.0 - f, sm))
cross_entropy_loss = - X_labels * t1 - (1.0 - X_labels) * t2
loss = X_weights * cross_entropy_loss
return loss.mean(), F
def _fit_parent_inner(p, X, labels, X_weights, F):
M = p.n_primitives # number of primitives
B, N, _ = X.shape
assert labels.shape == (B, N, M)
translations = p.translations_r
rotations = p.rotations_r
alphas = p.sizes_r
epsilons = p.shapes_r
sharpness = p.sharpness_r
sm = F.new_tensor(1e-6)
t1 = labels * torch.log(torch.max(F, sm))
t2 = (1-labels) * torch.log(torch.max(1-F, sm))
ce = - t1 - t2
# 5 is a very important number that is necessary for the code to work!!!
# Do not change!! (This is there to avoid having empty primitives :-))
loss_mask = (labels.sum(1, keepdim=True) > 5).float()
loss = (loss_mask*ce*X_weights).mean()
# Compute the quality of the current SQ
target_iou = compute_iou(
F.transpose(2, 1).reshape(-1, N),
labels.transpose(2, 1).reshape(-1, N),
average=False
).view(B, M).detach()
mse_qos_loss = ((p.qos - target_iou)**2).mean()
return loss, mse_qos_loss, F
# Extract the arguments to local variables
gt_points, gt_labels, gt_weights = y_target
_, P = prim_params.space_partition
sharpness = prim_params.sharpness_r
# Compute the coverage loss given the partition
labels = [gt_labels]
coverage_loss = 0
for i in range(len(P)):
pcurrent = P[i]
if i == 0:
precision_m = gt_points.new_zeros(3, 3).fill_diagonal_(1).reshape(
1, 3, 3).repeat((gt_points.shape[0], 1, 1, 1)
)
pparent = PrimitiveParameters.from_existing(
PrimitiveParameters.empty(),
precision_matrix=precision_m
)
else:
pparent = P[i-1]
loss, next_labels = _coverage_inner(
pcurrent, pparent, gt_points, labels[-1]
)
labels.append(next_labels)
coverage_loss = coverage_loss + loss
F_intermediate = []
fit_loss = 0
pr_loss = 0
for pr, ps in zip(prim_params.fit, P):
floss, F = _fit_shape_inner(pr, gt_points, gt_labels, gt_weights)
fit_loss = fit_loss + 1e-1 * floss
F_intermediate.append(F)
# Compute the proximity loss between the centroids and the centers of the
# primitives
s_tr = ps.translations_r.detach()
r_tr = pr.translations_r
pr_loss = pr_loss + ((s_tr - r_tr)**2).sum(-1).mean()
# Compute the disjoint loss between the siblings
intermediates = ValueRegistry.get_instance("loss_intermediate_values")
intermediates["F_intermediate"] = F_intermediate
intermediates["labels"] = labels
intermediates["gt_points"] = gt_points
# Compute the quality of the reconstruction
qos_loss = 0
for i, pr in enumerate(prim_params.fit):
floss, qloss, F = _fit_parent_inner(
pr, gt_points, labels[i+1], gt_weights, F_intermediate[i]
)
fit_loss = fit_loss + 1e-2 * floss
qos_loss = qos_loss + 1e-3 * qloss
# Compute some metrics to report during training
F_leaves = F_intermediate[-1]
iou = compute_iou(
gt_labels.squeeze(-1),
torch.max(F_leaves, dim=-1)[0]
)
accuracy, positive_accuracy = _compute_accuracy_and_recall(
F_leaves,
F_leaves.new_ones(F_leaves.shape[0], F_leaves.shape[-1]),
gt_labels,
gt_weights
)
stats = StatsLogger.instance()
stats["losses.coverage"] = coverage_loss.item()
stats["losses.fit"] = fit_loss.item()
stats["losses.prox"] = pr_loss.item()
stats["losses.qos"] = qos_loss.item()
stats["metrics.iou"] = iou
stats["metrics.accuracy"] = accuracy.item()
stats["metrics.positive_accuracy"] = positive_accuracy.item()
return coverage_loss + pr_loss + fit_loss + qos_loss
| StarcoderdataPython |
3390831 | <filename>makeBismarkMethylationExtractorPlusPlusReadPosScript.py
def makeBismarkMethylationExtractorPlusPlusReadPosScript(bismarkFileNameListFileName, outputDir, ignoreR2Val, maxLen, maxLenR2, scriptFileName, codePath):
# Write a script that will extract the methylation status from Bismark output files
bismarkFileNameListFile = open(bismarkFileNameListFileName)
scriptFile = open(scriptFileName, 'w+')
for line in bismarkFileNameListFile:
# Iterate through the Bismark output files and write a line in the script to extract the methylation for each
scriptFile.write("perl " + codePath + "/bismark_methylation_extractor_plusplus_readPos.pl -p --no_overlap -o " + outputDir + (" --gzip --ignore_r2 ") + str(ignoreR2Val) + " --max_len " + str(maxLen) + " --max_len_r2 " + str(maxLenR2) + " " + line.strip() + "\n")
bismarkFileNameListFile.close()
scriptFile.close()
if __name__=="__main__":
import sys
bismarkFileNameListFileName = sys.argv[1]
outputDir = sys.argv[2]
ignoreR2Val = int(sys.argv[3])
maxLen = int(sys.argv[4])
maxLenR2 = int(sys.argv[5])
scriptFileName = sys.argv[6]
codePath = sys.argv[7] # Should not end with /
makeBismarkMethylationExtractorPlusPlusReadPosScript(bismarkFileNameListFileName, outputDir, ignoreR2Val, maxLen, maxLenR2, scriptFileName, codePath)
| StarcoderdataPython |
1631908 | import base64
import pickle
from django_redis import get_redis_connection
def merge_cookie_to_redis(request,user,response):
"""
将cookie中的购物车数据,合并到redis中
:param request:
:param user:
:param response:
:return:
"""
cart_cookie = request.COOKIES.get('cart')
if cart_cookie is not None:
cart_dic = pickle.loads(base64.b64decode(cart_cookie))
# 获取redis中的购物车数据
redis_conn = get_redis_connection('cart')
cart_sku_count = redis_conn.hgetall('cart_%s'%user.id)
cart = {}
for sku_id,count in cart_sku_count.items():
cart[int(sku_id)] = int(count)
sku_id_selected_ls = []
# 获取cookie中的购物车数据
for sku_id,cookie_count_selected in cart_dic.items():
# 合并数据 redis中有就覆盖 没有就增加
cart[sku_id] = cookie_count_selected['count']
# 将被勾选的cookie中的sku_id,添加到指定空列表
if cookie_count_selected['selected']:
sku_id_selected_ls.append(sku_id)
# 将cart 与 sku_id 保存到redis中
redis_conn.hmset('cart_%s'%user.id,cart)
if len(sku_id_selected_ls)>0:
redis_conn.sadd('cart_selected_%s'%user.id,*sku_id_selected_ls)
# 删除原来的cookie
response.delete_cookie('cart')
return response
else:
return response
| StarcoderdataPython |
3304664 | import unittest
from kbmod import *
class test_search(unittest.TestCase):
def setUp(self):
# test pass thresholds
self.pixel_error = 0
self.velocity_error = 0.05
self.flux_error = 0.15
# image properties
self.imCount = 20
self.dim_x = 80
self.dim_y = 60
self.noise_level = 8.0
self.variance = self.noise_level**2
self.p = psf(1.0)
# object properties
self.object_flux = 250.0
self.start_x = 17
self.start_y = 12
self.x_vel = 21.0
self.y_vel = 16.0
# search parameters
self.angle_steps = 150
self.velocity_steps = 150
self.min_angle = 0.0
self.max_angle = 1.5
self.min_vel = 5.0
self.max_vel = 40.0
# create image set with single moving object
self.imlist = []
for i in range(self.imCount):
time = i/self.imCount
im = layered_image(str(i), self.dim_x, self.dim_y,
self.noise_level, self.variance, time)
im.add_object( self.start_x + time*self.x_vel+0.5,
self.start_y + time*self.y_vel+0.5,
self.object_flux, self.p)
self.imlist.append(im)
self.stack = image_stack(self.imlist)
self.search = stack_search(self.stack, self.p)
#self.search.save_psi_phi("temp/")
self.search.gpu( self.angle_steps, self.velocity_steps,
self.min_angle, self.max_angle, self.min_vel,
self.max_vel, int(self.imCount/2))
def test_results(self):
#self.search.save_results("./test.txt", 1)
#self.p.print_psf()
#self.stack.save_images("temp/")
results = self.search.get_results(0,10)
best = results[0]
#for r in results:
# print(r)
self.assertAlmostEqual(best.x, self.start_x, delta=self.pixel_error)
self.assertAlmostEqual(best.y, self.start_y, delta=self.pixel_error)
self.assertAlmostEqual(best.x_v/self.x_vel, 1, delta=self.velocity_error)
self.assertAlmostEqual(best.y_v/self.y_vel, 1, delta=self.velocity_error)
self.assertAlmostEqual(best.flux/self.object_flux, 1, delta=self.flux_error)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3356916 | def corrente(texto, indice):
tamanhoP = len(texto)
resultado = None
if indice < tamanhoP and indice > 0:
if texto[indice].isalnum():
iStart = iEnd = indice
while iStart - 1 > 0 and texto[iStart - 1].isalnum():
iStart -= 1
while iEnd < tamanhoP and texto[iEnd].isalnum():
iEnd += 1
resultado = texto[iStart:iEnd]
return resultado
def main():
texto = "a"
palavra = corrente(texto, 0)
print(palavra)
if __name__ == '__main__':
main()
| StarcoderdataPython |
85332 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time
def open_avro(fname):
with open(fname,'rb') as f:
freader = fastavro.reader(f)
schema = freader.writer_schema
for packet in freader:
return packet
def make_dataframe(packet):
dfc = pd.DataFrame(packet['candidate'], index=[0])
df_prv = pd.DataFrame(packet['prv_candidates'])
dflc = pd.concat([dfc,df_prv], ignore_index=True,sort=True)
dflc.objectId = packet['objectId']
dflc.candid = packet['candid']
return dflc
def dcmag(dflc, match_radius_arcsec=1.5, star_galaxy_threshold = 0.4,band=2):
if (dflc.loc[0,'distpsnr1'] > match_radius_arcsec) & (dflc.loc[0,'sgscore1'] < star_galaxy_threshold):
print('Object is not a variable star.')
return dflc
else:
dflc=dflc.fillna(np.nan)
def robust_median(x):
if len(x) == 0:
return np.nan
else:
return np.median(x[np.isfinite(x)])
grp = dflc.groupby(['fid','field','rcid'])
impute_magnr = grp['magnr'].agg(robust_median)
#print(impute_magnr)
impute_sigmagnr = grp['sigmagnr'].agg(robust_median)
#print(impute_sigmagnr)
for idx, grpi in grp:
w = np.isnan(grpi['magnr'])
w2 = grpi[w].index
dflc.loc[w2,'magnr'] = impute_magnr[idx]
dflc.loc[w2,'sigmagnr'] = impute_sigmagnr[idx]
dflc['sign'] = 2* (dflc['isdiffpos'] == 't') - 1
dflc['dc_mag'] = -2.5 * np.log10(10**(-0.4*dflc['magnr']) + dflc['sign'] * 10**(-0.4*dflc['magpsf'])) #u
dflc['dc_sigmag'] = np.sqrt(
(10**(-0.4*dflc['magnr'])* dflc['sigmagnr']) **2. +
(10**(-0.4*dflc['magpsf']) * dflc['sigmapsf'])**2.) / 10**(-0.4*dflc['magnr']) + dflc['sign'] * 10**(-0.4*dflc['magpsf']) #u
dflc['dc_mag_ulim'] = -2.5 * np.log10(10**(-0.4*dflc['magnr']) + 10**(-0.4*dflc['diffmaglim'])) #v
dflc['dc_mag_llim'] = -2.5 * np.log10(10**(-0.4*dflc['magnr']) - 10**(-0.4*dflc['diffmaglim'])) #v2
return dflc
def band_amplitude(dflc, band=2):
if 'dc_mag' in dflc.columns.values:
mag_key= 'dc_mag'
else:
mag_key= 'magpsf'
z = dflc[dflc.fid==band]
ampli=z[mag_key].max()-z[mag_key].min()
print('Max:',z[mag_key].max())
print('Min:',z[mag_key].min())
print('Amplitude:',ampli)
print('Is amplitude > 1.0 mag?',ampli>=1)
return ampli
def plot_dc_lightcurve(dflc, days_ago=True):
plt.rcParams["figure.figsize"] = (10,7)
filter_color = {1:'green', 2:'red', 3:'pink'}
if days_ago:
now = Time.now().jd
t = dflc.jd - now
xlabel = 'Days Ago'
else:
t = dflc.jd
xlabel = 'Time (JD)'
fig=plt.figure()
for fid, color in filter_color.items():
# plot detections in this filter:
w = (dflc.fid == fid) & ~dflc.magpsf.isnull()
if np.sum(w):
plt.errorbar(t[w],dflc.loc[w,'dc_mag'], dflc.loc[w,'dc_sigmag'],fmt='.',color=color)
wnodet = (dflc.fid == fid) & dflc.magpsf.isnull()
if np.sum(wnodet):
plt.scatter(t[wnodet],dflc.loc[wnodet,'dc_mag_ulim'], marker='v',color=color,alpha=0.25)
plt.scatter(t[wnodet],dflc.loc[wnodet,'dc_mag_llim'], marker='^',color=color,alpha=0.25)
plt.gca().invert_yaxis()
plt.xlabel(xlabel)
plt.ylabel('Magnitude')
return fig
def get_dc_mag(dflc, band=2, days_ago=True):
if 'dc_mag' not in dflc.columns.values:
dflc = dcmag(dflc)
else:
dflc=dflc
amplitude=band_amplitude(dflc, band=band)
fig=plot_dc_lightcurve(dflc, days_ago=days_ago)
return dflc, amplitude, fig
| StarcoderdataPython |
4806960 | <gh_stars>1-10
#! /usr/bin/python3
from sys import exit
import gi; gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
if __name__ == "__main__":
#dialog = Gtk.ColorChooserDialog()
dialog = Gtk.ColorSelectionDialog()
if dialog.run() == Gtk.ResponseType.OK:
color = dialog.get_color_selection().get_current_rgba()
red = int(color.red * 255)
green = int(color.green * 255)
blue = int(color.blue * 255)
print("#{:02x}{:02x}{:02x}".format(red, green, blue))
else:
exit(1)
| StarcoderdataPython |
94993 | from wsgiref.simple_server import make_server
from fs.osfs import OSFS
from wsgi import serve_fs
osfs = OSFS('~/')
application = serve_fs(osfs)
httpd = make_server('', 8000, application)
print "Serving on http://127.0.0.1:8000"
httpd.serve_forever()
| StarcoderdataPython |
1695456 | <reponame>scottstickells/AWS-Scripts<gh_stars>0
#Built with Python 3.3.2
#This script allows for the interactive input from a user to create an EBS volume snapshot and select from what date and time snapshots should be retained and anything older than the specified date and time will be deleted
#The script will prompt for access & secret keys as well as region information
#The script will then prompt to enter the volume to snapshot and then the script will request a description for the snapshot, finally the user will be asked to input the date and time in UTC from which older snapshots will be deleted
import boto.ec2
import datetime
import time
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import RegionInfo
from boto.ec2.snapshot import Snapshot
aws_access_key = str(input("AWS Access Key: ")) #Requesting user to enter their AWS access key
aws_secret_key = str(input("AWS Secret Key: ")) #Requesting user to enter their AWS secret key
regionname = str(input("AWS Region Name: ")) #Requesting user to enter the AWS region name they will connect to
regionendpoint = str(input("AWS Region Endpoint: ")) #Requesting user to enter the AWS region endpoint they will connect to
region = RegionInfo(name=regionname, endpoint=regionendpoint) #This sets the region based on the user inputs above
conn = EC2Connection(aws_access_key_id = aws_access_key, aws_secret_access_key = aws_secret_key, region = region) #This establishes the connection using the information provided by the user
print (conn) #This prints the output of the connection, if None is shown the connection is unsuccessful or incorrect and the remainder of the script will fail
volumes = conn.get_all_volumes() #This grabs a list of all volumes available to the authenticated account in the region
print ("%s" % repr(volumes)) #This prints the list of volumes captured by the above command so the user can select the volume they wish to snapshot
vol_id = str(input("Enter Volume ID to snapshot: ")) #Requesting the user to enter the volume to create the snapshot from
volume = volumes[0] #Sets volume to the first position on the volumes output
description = str(input("Enter volume snapshot description: ")) #Requesting the snapshot description
if volume.create_snapshot(description): #Creates the snapshot adding the description entered above
print ('Snapshot created with description: %s' % description) #Confirms snapshot is complete and provides description
snapshots = volume.snapshots() #Creates the list of current snapshots for the volume selected earlier
user_time = str(input("Enter date and time in UTC from when you want to delete snapshots, enter in the format Year-Month-Date Hours:Minutes:Seconds, eg 2015-3-4 14:00:00 : ")) #Requests the user to input the date and time in UTC and not local timezone from when the snapshots should be deleted
real_user_time = datetime.datetime.strptime(user_time, '%Y-%m-%d %H:%M:%S') #This converts the user_time from a string to a datetime object
for snap in snapshots: #This for loop firstly converts the start_time of the snapshots list into a datetime object, it then takes any snapshots older than the user specified date and the deletes them
start_time = datetime.datetime.strptime(snap.start_time[:-5], '%Y-%m-%dT%H:%M:%S')
if start_time < real_user_time:
snap.delete()
print (snap.delete)
| StarcoderdataPython |
1749184 | def chat_import(filepath):
chat = open(filepath, "r+")
return chat | StarcoderdataPython |
1747740 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 15:23:44 2021
@author: py
"""
from adafruit_servokit import ServoKit
import rospy
import sensor_msgs.msg
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float32MultiArray
import message_filters
import time
kit = ServoKit(channels = 16)
def callback(msg1, msg2):
"""
INPUT: msg1: data from cam_values node, msg2: LaserScan data
"""
check_road = msg1.data[0]
check_centr = msg1.data[1]
angle_1 = msg1.data[2]
angle_2 = msg1.data[3]
stop_sign = msg1.data[4]
right_sign = msg1.data[5]
left_min = min(msg2.ranges[45:90])
top_centr_min = min(min(msg2.ranges[0:45]),min(msg2.ranges[315:359]))
right_min = min(msg2.ranges[270:315])
centr_2 = min(min(msg2.ranges[0:15]),min(msg2.ranges[335:359]))
left = min(msg2.ranges[45:75])
right =min(msg2.ranges[295:315])
#emergency stop
if left_min < 0.15 or right_min < 0.15 or top_centr_min < 0.25:
kit.continuous_servo[11].throttle = 0
#stop sign
if stop_sign == 1.0 & centr_2 < 2.0:
#print("stop sign detected_______________________________________________________")
kit.continuous_servo[11].throttle = 0
kit.servo[0].angle = 90
else:
kit.continuous_servo[11].throttle = 0.3
kit.servo[0].angle = 90
#avoid hit the wall
if left_min < 0.3:
kit.servo[0].angle = 100
if right_min < 0.3:
kit.servo[0].angle = 80
if centr_2 < 2.0:
#turn left
if right_min < 0.25 or left > 0.6:
kit.servo[0].angle = 45
#turn right
if left_min < 0.25 or right > 0.6:
kit.servo[0].angle = 135
#stop
if right_min < 0.3 and left_min < 0.3:
kit.continuous_servo[11].throttle = 0
elif check_road == 1.0:
if check_centr == -1.0:
kit.servo[0].angle = 80
elif check_centr == 1.0:
kit.servo[0].angle = 100
else:
kit.servo[0].angle = 90
def listener():
#initial the listener node
rospy.init_node('listener', anonymous = True)
cam_sub = message_filters.Subscriber('cam_values', Float32MultiArray)
lidar_sub = message_filters.Subscriber('/scan', LaserScan)
#synchronize the data from cam_sub and lidar_sub nodes
ts = message_filters.ApproximateTimeSynchronizer([cam_sub, lidar_sub], queue_size = 10, slop = 0.1, allow_headerless = True)
ts.registerCallback(callback)
rospy.spin()
if __name__ == '__main__':
listener()
| StarcoderdataPython |
1641598 | #!/usr/bin/env python3.7
"""A hashlife style solution for day 12.
TODO: document this!
"""
from __future__ import annotations
import sys
from collections import defaultdict, deque
from typing import Dict, List, Union, Tuple, Optional, Any
from dataclasses import dataclass
@dataclass(eq=False, frozen=True)
class Node:
level: int
left: Node
right: Node
plants: str
live: int
# The score if the left hand of the node was at 0
score: int
def centered_score(self) -> int:
return self.score - self.live * (1 << (self.level - 1))
class EmptyLeafNode(Node):
def __init__(self) -> None:
pass
def __repr__(self) -> str:
return '<leaf>'
hashconser: Dict[Union[str, Tuple[Node, Node]], Node] = {}
update_rule: Dict[str, str] = {}
step_cache: Dict[Tuple[Node, int], Node] = {}
LEAF = EmptyLeafNode()
def new_plants(plants: str) -> Node:
if plants not in hashconser:
# NB must be a power of 2
if len(plants) > 2:
i = len(plants) // 2
hashconser[plants] = new(new_plants(plants[:i]), new_plants(plants[i:]))
else:
real = [i for i, v in enumerate(plants) if v == '#']
hashconser[plants] = Node(1, LEAF, LEAF, plants, len(real), sum(real))
return hashconser[plants]
def new(left: Node, right: Node) -> Node:
key = (left, right)
if key not in hashconser:
assert left.level == right.level
plants = ""
# cache plants in nodes up to size 8
if left.level <= 2:
plants = left.plants + right.plants
score = left.score + right.score + (1 << left.level) * right.live
hashconser[key] = Node(
left.level + 1, left, right, plants, left.live + right.live, score
)
return hashconser[key]
def step_leaf(leaf: Node) -> Node:
"""Evaluate a size 8 node, producing a size 4 node of the middle"""
assert leaf.level == 3
assert len(leaf.plants) == 8
str = "".join(update_rule.get(leaf.plants[i-2:i+3], '.') for i in range(2, 6))
assert len(str) == 4
return new_plants(str)
def step_interior(node: Node, steps_power: int) -> Node:
"""Step an interior node of level n, producing a node of level n-1.
Evaluate for up to 2**steps_power steps.
"""
assert node.level > 3
if (node, steps_power) in step_cache:
return step_cache[node, steps_power]
left = step(node.left, steps_power)
right = step(node.right, steps_power)
mid = step(new(node.left.right, node.right.left), steps_power)
if node.level - 2 > steps_power:
val = new(
new(left.right, mid.left),
new(mid.right, right.left),
)
else:
val = new(
step(new(left, mid), node.level),
step(new(mid, right), node.level),
)
step_cache[node, steps_power] = val
return val
def step(node: Node, to_skip: int) -> Node:
"""Evaluate a node of level n, producing a node of level n-1"""
if node.level == 3:
res = step_leaf(node)
else:
res = step_interior(node, to_skip)
return res
def empty(level: int) -> Node:
"""Create an empty node at level"""
if level == 1:
return new_plants('..')
else:
x = empty(level - 1)
return new(x, x)
def expand(node: Node) -> Node:
"""Expand a node to be one level higher but have the same contents."""
x = empty(node.level - 1)
return new(new(x, node.left), new(node.right, x))
def try_shrink(node: Node) -> Node:
"""If possible, shrink a node to be one level lower but have the same contents."""
if node.left.left.live == 0 and node.right.right.live == 0:
return new(node.left.right, node.right.left)
else:
return node
def next_power_2(x: int) -> int:
"""Return the smallest power of 2 greater than or equal to x"""
return 1 << (x-1).bit_length()
def last_power_2(n: int) -> int:
"""Return the largest power of 2 less than or equal to x"""
return 1 << (n.bit_length() - 1)
def parse_input(data: List[str]) -> Tuple[Dict[str, str], str]:
rule_parts = [x.split(' ') for x in data[2:]]
update_rules = dict([(x, z) for x, _, z in rule_parts])
initial = data[0].split(" ")[2]
size = max(next_power_2(len(initial)), 8)
initial += '.' * (size - len(initial))
return update_rules, initial
def run(state: Node, target: int) -> Node:
# Expand the node so that it can be immediately stepped as far as we need.
while (1 << (state.level - 3)) < target:
state = expand(state)
# Evaluate until we hit it!
steps = 0
while steps < target:
# step can only evaluate a power-of-two number of steps, so
# find the largest power of two less than our distance to go.
amount = last_power_2(target - steps)
print("stepping", amount)
steps += amount
state = step(expand(state), amount.bit_length())
print(
'steps={}, score={}, live={}, table size={}'.format(
steps, state.centered_score(), state.live, len(hashconser)
)
)
return state
def main(args) -> None:
target = 50_000_000_000
if args[1:]:
target = int(args[1])
rules, initial = parse_input([s.strip() for s in sys.stdin])
update_rule.update(rules)
# The provided initial state starts at zero, but our node wants to
# be centered on zero, so put an empty node to the left.
state_0 = new_plants(initial)
state = new(empty(state_0.level), state_0)
state = run(state, target)
print(state.centered_score())
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
1766190 | """
Testing the CLI
"""
import os
from click.testing import CliRunner
from asaplib.cli.cmd_asap import asap
def test_cmd_gen_soap():
"""Test the command for generating soap descriptors"""
test_folder = os.path.split(__file__)[0]
xyzpath = os.path.abspath(os.path.join(test_folder, 'small_molecules-1000.xyz'))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(asap, ['gen_desc', '--fxyz', xyzpath, 'soap'])
assert result.exit_code == 0
assert 'Using SOAP' in result.stdout
| StarcoderdataPython |
27062 | #/bin/python3
## Step1 scan recursively over all files
import os
import re
import pdb
import datetime
path = "./notes"
dest = "_posts"
magic_prefix = "Active-"
def extractModifiedDate(string):
regexp = r"\d+-\d+-\d+T\d+:\d+:\d+.\d+Z"
date_strings_all = re.findall(regexp,string)
date = None
if (len(date_strings_all) == 1):
date = datetime.datetime.strptime(date_strings_all[0], "%Y-%m-%dT%H:%M:%S.%fZ")
return date
def insert_str(string, str_to_insert, index):
return string[:index] + str_to_insert + string[index:]
def processFile(src, dest):
state_none = 0
state_hdr_start = 1
state_hdr_stop = 2
state_post_start = 3
modified_date = None
print("Process file ", src, " -> ", dest)
state = state_none
skiplines = 0
with open(src, "r") as f_in, open(dest, "w+") as f_out:
src_lines = f_in.readlines()
for line in src_lines:
#pdb.set_trace()
if ("---" in line):
state = state + 1
if (state == state_post_start):
break
skiplines = skiplines + 1
if state == state_hdr_start:
if ("modified" in line):
modified_date = extractModifiedDate(line)
dest_lines = src_lines[skiplines:]
for i in range(0, len(dest_lines)):
if state == state_post_start:
line = dest_lines[i]
# find lines with single $
start_pos = 0
pos = line.find('$', start_pos)
while (pos != -1):
if pos + 1 < len(line):
if (line[pos + 1] != '$'):
line = insert_str(line, '$' ,pos)
pos = pos + 1
else:
while(line[pos + 1] == '$'):
pos = pos + 1
else:
line = insert_str(line, '$' ,pos)
pos = pos + 1
start_pos = pos + 1
pos = line.find('$', start_pos)
dest_lines[i] = line
for i in dest_lines:
f_out.write(i)
if (modified_date is not None):
f_out.write(os.linesep)
f_out.write("*Last update:" + modified_date.strftime("%d %B %Y") + "*" + os.linesep)
f_in.close()
f_out.close()
for root,d_names,f_names in os.walk(path):
if ("notes" in root):
category = os.path.split(os.path.split(root)[0])[1]
for post_fn in f_names:
## Find all with name Active...dd
#print(root, post_fn, f_names)
if ((post_fn.startswith(magic_prefix)) and (".bak" not in post_fn)):
#print (root, post_fn)
new_filename = post_fn[len(magic_prefix):]
src = os.path.join(root, post_fn)
dest_filename = os.path.join(dest, new_filename)
print (root, category, src, "->", dest_filename)
processFile(src, dest_filename)
## Copy file with new name without Active prefix
## extract tag, remove first line
| StarcoderdataPython |
3205073 | from typing import Optional, List
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import RDF, RDFS, OWL, DCTERMS, XSD
from client.model._TERN import TERN
from client.model.klass import Klass
from client.model.agent import Agent
from client.model.concept import Concept
import re
class RDFDataset(Klass):
def __init__(
self,
iri: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
subjects: List[Concept] = None,
creator: Optional[Agent] = None,
publisher: Optional[Agent] = None,
contributors: Optional[List[Agent]] = None,
modified: Optional[str] = None,
created: Optional[str] = None,
issued: Optional[str] = None,
):
if title is not None:
assert isinstance(title.__class__, str.__class__), \
"If you provide a value for the title parameter, it must be of type string"
if description is not None:
assert isinstance(description.__class__, str.__class__), \
"If you provide a value for the description parameter, it must be of type string"
if subjects is not None:
for subject in subjects:
assert isinstance(subject.__class__, Concept.__class__), \
"If supplied, each subject must be of type Concept"
if creator is not None:
assert isinstance(creator.__class__, Agent.__class__), "If supplied a creator must be of type Agent"
if contributors is not None:
for contributor in contributors:
assert isinstance(contributor.__class__, Agent.__class__), \
"If supplied, each contributor must be of type Agent"
if publisher is not None:
assert isinstance(publisher.__class__, Agent.__class__), "If supplied a publisher must be of type Agent"
date_pattern = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2}")
if created is not None:
assert isinstance(created.__class__, str.__class__), \
"If you provide a value for the created parameter, it must be of type string"
assert date_pattern.match(created), "The value for created you provided is not in the YYYY-MM-DD format"
if modified is not None:
assert isinstance(modified.__class__, str.__class__), \
"If you provide a value for the modified parameter, it must be of type string"
assert date_pattern.match(modified), "The value for modified you provided is not in the YYYY-MM-DD format"
if issued is not None:
assert isinstance(issued.__class__, str.__class__), \
"If you provide a value for the issued parameter, it must be of type string"
assert date_pattern.match(issued), "The value for issued you provided is not in the YYYY-MM-DD format"
"""Receive and use or make an IRI"""
if iri is None:
self.id = self.make_uuid()
iri = URIRef(f"http://example.com/rdfdataset/{self.id}")
self.iri = URIRef(iri)
super().__init__(iri)
if title is not None:
self.title = title
self.label = title
else:
self.title = f"RDF Dataset with ID {self.id if hasattr(self, 'id') else self.iri.split('/')[-1]}"
self.label = self.title
if description is not None:
self.description = description
if subjects is not None:
self.subjects = subjects
if creator is not None:
self.creator = creator
if contributors is not None:
self.contributors = contributors
if publisher is not None:
self.publisher = publisher
if created is not None:
self.created = created
if modified is not None:
self.modified = modified
if issued is not None:
self.issued = issued
def to_graph(self) -> Graph:
g = super().to_graph()
g.remove((self.iri, RDF.type, OWL.Class))
g.add((self.iri, RDF.type, TERN.RDFDataset))
g.remove((self.iri, RDFS.label, None))
g.add((self.iri, RDFS.label, Literal(self.label)))
if hasattr(self, "title"):
g.add((self.iri, DCTERMS.title, Literal(self.title)))
if hasattr(self, "description"):
g.add((self.iri, DCTERMS.description, Literal(self.description)))
if hasattr(self, "subjects"):
for subject in self.subjects:
g.add((self.iri, DCTERMS.creator, subject.iri))
if (subject.iri, RDF.type, None) not in g:
g += subject.to_graph()
if hasattr(self, "creator"):
g.add((self.iri, DCTERMS.creator, self.creator.iri))
if (self.creator.iri, RDF.type, None) not in g:
g += self.creator.to_graph()
if hasattr(self, "contributors"):
for contributor in self.contributors:
g.add((self.iri, DCTERMS.creator, contributor.iri))
if (contributor.iri, RDF.type, None) not in g:
g += contributor.to_graph()
if hasattr(self, "publisher"):
g.add((self.iri, DCTERMS.publisher, self.publisher.iri))
if (self.publisher.iri, RDF.type, None) not in g:
g += self.publisher.to_graph()
if hasattr(self, "created"):
g.add((self.iri, DCTERMS.created, Literal(self.created, datatype=XSD.date)))
if hasattr(self, "modified"):
g.add((self.iri, DCTERMS.modified, Literal(self.modified, datatype=XSD.date)))
if hasattr(self, "issued"):
g.add((self.iri, DCTERMS.issued, Literal(self.issued, datatype=XSD.date)))
return g
| StarcoderdataPython |
3219291 | from typing import Dict, List, Tuple
import torch
import torch.multiprocessing as mp
from leafdp.utils import model_utils
from leafdp.flower import flower_helpers
import argparse
from datetime import datetime
import numpy as np
import flwr as fl
import os
# Needs this if we want to launch grpc client
if os.environ.get("https_proxy"):
del os.environ["https_proxy"]
if os.environ.get("http_proxy"):
del os.environ["http_proxy"]
BASEPATH = os.environ["BASEPATH"]
DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class LeafClient(fl.client.NumPyClient):
"""Flower client for Leaf using PyTorch."""
def __init__(
self,
dataset: str,
cross_silo: bool,
client_id: str,
batch_size: int = 256,
virtual_batch_size: int = 256,
lr: float = 0.0001,
diff_privacy: bool = False,
target_epsilon: float = 0.0,
nm: float = 0.9555,
mgn: float = 0.8583,
centralized: bool = True,
strategy: str = "hybrid",
) -> None:
"""Flower client immplementation.
Args
----
dataset
Which dataset the client should load.
cross_silo
Whether we are in a cross silo setting or not.
client_id
Client id to get data.
diff_privacy
Wether Differential Privacy is applied or not.
target_epsilon
Determine the limit for the privacy budget.
centralized
Whether the model is evaluated on the server side or client's.
adaptive
Wether the noise multiplier is adaptive or fixed.
"""
self.dataset = dataset
self.cross_silo = cross_silo
self.client_id = client_id
self.batch_size = batch_size
self.virtual_batch_size = virtual_batch_size
self.lr = lr
self.diff_privacy = diff_privacy
self.target_epsilon = target_epsilon
self.nm = nm
self.mgn = mgn
self.centralized = centralized
self.results = []
self.parameters = None
# Variable keeping track of how many times a client was sampled
self.times_sampled = 0
self.strategy = strategy
self.adaptive = strategy == "adaptive"
if not self.centralized:
# Prepare multiprocess
manager = mp.Manager()
return_dict = manager.dict()
# Create the multiprocess
p = mp.Process(
target=flower_helpers.test_fl,
args=(
self.parameters,
self.dataset,
self.client_id,
return_dict,
False,
self.batch_size,
DEVICE,
),
)
# Start the process
p.start()
# Wait for it to end
p.join()
try:
p.close()
except ValueError as e:
print(f"Couldn't close the testing process: {e}.")
# Get the return values
test_results = return_dict["test_results"]
# del everything related to multiprocess to free memory
del (manager, return_dict, p)
self.loss = test_results[0]
self.accuracy = test_results[1]
else:
self.loss = 0.0
self.accuracy = 0.0
def get_parameters(self) -> List[np.ndarray]:
# Return model parameters as a list of NumPy ndarrays
return self.parameters
def set_parameters(self, parameters: List[np.ndarray]) -> None:
# Set model parameters from a list of NumPy ndarrays
self.parameters = parameters
def fit(
self, parameters: List[np.ndarray], config: Dict[str, str]
) -> Tuple[List[np.ndarray], int]:
"""Set model parameters, train model, return updated model parameters.
Parameters
----------
parameters
List of parameters to update the model.
config
Configuration for fitting.
Returns
-------
List object
Newly trained parameters.
Int object
Size of client's training sample.
Dict object
Optional results dict, for example including metrics.
"""
config["cross_silo"] = self.cross_silo
config["times_sampled"] = self.times_sampled
config["strategy"] = self.strategy
config["adaptive"] = self.adaptive
self.set_parameters(parameters)
# Prepare multiprocess
manager = mp.Manager()
return_dict = manager.dict()
# Create the multiprocess
p = mp.Process(
target=flower_helpers.train_fl,
args=(
self.parameters,
return_dict,
config,
self.client_id,
self.dataset,
self.batch_size,
self.virtual_batch_size,
self.lr,
self.diff_privacy,
self.nm,
self.mgn,
1e-6,
self.target_epsilon,
DEVICE,
),
)
# Start the process
p.start()
# Wait for it to end
p.join()
try:
p.close()
except ValueError as e:
print(f"Couldn't close the training process: {e}.")
# Get the return values
new_parameters = return_dict["parameters"]
data_size = return_dict["data_size"]
train_results = return_dict["train_results"]
# Init metrics
metrics = {}
if self.diff_privacy:
epsilon = return_dict["privacy_results"][0]
# Hybryd approach for adaptive noise and fix noise
if not bool(self.times_sampled) and self.strategy == "hybrid":
self.adaptive = return_dict["adaptive"]
if self.adaptive:
metrics["adaptive"] = 1
accept = True
if epsilon > self.target_epsilon + 0.3: # leaving some room
accept = False
print(
f"Epsilon over target value ({self.target_epsilon}), disconnecting client."
)
# Overrides the new parameters with the ones received
new_parameters = parameters
metrics.update(
{
"epsilon": epsilon,
"alpha": return_dict["privacy_results"][1],
"accept": accept,
}
)
privacy_results = [metrics["epsilon"], metrics["alpha"]]
else:
privacy_results = []
# del everything related to multiprocess to free memory
del (manager, return_dict, p)
self.times_sampled += 1
self.set_parameters(new_parameters)
self.results.append([train_results, privacy_results])
return (
new_parameters,
data_size,
metrics,
)
def evaluate(
self, parameters: List[np.ndarray], config: Dict[str, str]
) -> Tuple[int, float, float]:
"""Set model parameters, evaluate model on local test dataset, return result.
Parameters
----------
parameters
List of parameters to evaluate
config
Optional configuration
Returns
-------
List object
Newly trained parameters.
Int object
Size of client's training sample.
Dict object
Optional results dict, for example including metrics.
"""
# # Skip final client evaluation if only evaluating on server side
# if self.centralized:
# return 1.0, 1, {"accuracy": 1.0}
self.set_parameters(parameters)
# Prepare multiprocess
manager = mp.Manager()
return_dict = manager.dict()
# Create the multiprocess
p = mp.Process(
target=flower_helpers.test_fl,
args=(
self.parameters,
self.dataset,
self.client_id,
return_dict,
False,
self.batch_size,
DEVICE,
),
)
# Start the process
p.start()
# Wait for it to end
p.join()
try:
p.close()
except ValueError as e:
print(f"Couldn't close the testing process: {e}.")
# Get the return values
data_size = return_dict["data_size"]
test_results = return_dict["test_results"]
# del everything related to multiprocess to free memory
del (manager, return_dict, p)
# save model localy if it's better than the current model
if test_results[0] < self.loss and test_results[1] > self.accuracy:
self.loss = test_results[0]
self.accuracy = test_results[1]
self.save_model()
return (
float(test_results[0]),
data_size,
{"accuracy": float(test_results[1])},
)
def save_model(self):
# Get the date and format it
now = datetime.now()
dt_string = now.strftime("%Y%m%d-%H%M%S")
save_dir = f"{BASEPATH}leafdp/femnist/models/clients/{self.client_id}/"
if not os.path.exists(save_dir):
try:
os.mkdir(save_dir)
except OSError:
print("Creation of the directory failed")
else:
print("Successfully created the directory")
save_path = f"{save_dir}{self.strategy}_dp-{self.diff_privacy}_{dt_string}.pth"
model_utils.save_model(self.model, save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", type=int, default=0, help="Client number for determining id."
)
parser.add_argument(
"-s", type=int, default=42, help="Seed for generating random ids."
)
parser.add_argument(
"-nbc",
type=int,
default=2,
help="Number of clients to generate ids",
)
parser.add_argument(
"-d",
type=str,
default="femnist",
help="The dataset we want to train on.",
)
parser.add_argument("-cs", type=int, default=1, help="Cross silo dataset or not.")
parser.add_argument("-b", type=int, default=256, help="Batch size")
parser.add_argument("-vb", type=int, default=256, help="Virtual batch size")
parser.add_argument(
"-lr", type=float, default=0.0001, help="Learning rate for the optimizer"
)
parser.add_argument(
"-dp", type=int, default=0, help="Use Differential Privacy or not"
)
parser.add_argument(
"-nm", type=float, default=1.2, help="Noise multiplier for Private Engine."
)
parser.add_argument(
"-mgn", type=float, default=1.0, help="Max grad norm for Private Engine."
)
parser.add_argument(
"--centralized",
type=int,
default=1,
help="Whether evaluation is made by server or the client",
)
parser.add_argument(
"--tepsilon",
type=float,
default=0.0,
help="Target epsilon for the privacy budget.",
)
parser.add_argument(
"--strategy",
type=str,
default="hybrid",
help="Strategy to get the desired epsilon. Either 'hybrid', 'fix' or 'adaptive'",
)
args = parser.parse_args()
client_share = int(args.c)
seed = int(args.s)
nbc = int(args.nbc)
dataset = str(args.d)
cross_silo = bool(args.cs)
batch_size = int(args.b)
virtual_batch_size = int(args.vb)
lr = float(args.lr)
dp = bool(args.dp)
nm = float(args.nm)
mgn = float(args.mgn)
centralized = bool(args.centralized)
target_epsilon = float(args.tepsilon)
strat = str(args.strategy)
# Generate ids
if cross_silo:
indices = model_utils.generate_indices(nbc, total=201, seed=seed)
client_id = indices[client_share]
else:
indices = model_utils.generate_indices(nbc, total=3551, seed=seed)
# Get list of all clients
client_ids = model_utils.get_clients_ids(dataset)
# Get id
client_id = client_ids[indices[client_share]]
# Set explicitely the spawn method for Python under 3.8 compatibility
mp.set_start_method("spawn")
# Start client
client = LeafClient(
dataset,
cross_silo,
client_id,
batch_size=batch_size,
virtual_batch_size=virtual_batch_size,
lr=lr,
diff_privacy=dp,
target_epsilon=target_epsilon,
nm=nm,
mgn=mgn,
centralized=True,
strategy=strat,
)
print(f"Indices for clients: {indices}")
fl.client.start_numpy_client("[::]:8080", client=client)
| StarcoderdataPython |
115326 | <reponame>Paul3MK/NewsBlur
import datetime
from django.contrib.auth.models import User
from django.shortcuts import render
from django.views import View
from apps.profile.models import Profile, RNewUserQueue
class Users(View):
def get(self, request):
last_month = datetime.datetime.utcnow() - datetime.timedelta(days=30)
last_day = datetime.datetime.utcnow() - datetime.timedelta(minutes=60*24)
data = {
'all': User.objects.count(),
'monthly': Profile.objects.filter(last_seen_on__gte=last_month).count(),
'daily': Profile.objects.filter(last_seen_on__gte=last_day).count(),
'premium': Profile.objects.filter(is_premium=True).count(),
'queued': RNewUserQueue.user_count(),
}
chart_name = "users"
chart_type = "counter"
formatted_data = {}
for k, v in data.items():
formatted_data[k] = f'{chart_name}{{category="{k}"}} {v}'
context = {
"data": formatted_data,
"chart_name": chart_name,
"chart_type": chart_type,
}
return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")
| StarcoderdataPython |
1675385 | """
Train our RNN on extracted features or images.
"""
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger
from models import ResearchModels
from data import DataSet
import time
import os.path
def train(data_type, seq_length, model, saved_model=None,
class_limit=None, image_shape=None,
load_to_memory=False, batch_size=32, nb_epoch=100):
# Helper: Save the model.
checkpointer = ModelCheckpoint(
filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + '.{epoch:03d}-{val_loss:.3f}.hdf5'),
verbose=1,
save_best_only=True)
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=50)
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + str(timestamp) + '.log'))
# Get the data and process it.
if image_shape is None:
data = DataSet(
seq_length=seq_length,
class_limit=class_limit
)
else:
data = DataSet(
seq_length=seq_length,
class_limit=class_limit,
image_shape=image_shape
)
# Get samples per epoch.
# Multiply by 0.7 to attempt to guess how much of data.data is the train set.
steps_per_epoch = (len(data.data['0_train']) * 2 / 16) // batch_size
if load_to_memory:
# Get data.
X, y = data.get_all_sequences_in_memory('train', data_type)
X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
else:
# Get generators.
generator = data.frame_generator(batch_size, 'train', data_type)
val_generator = data.frame_generator(batch_size, 'test', data_type)
# Get the model.
rm = ResearchModels(len(data.classes), model, seq_length, saved_model)
# Fit!
if load_to_memory:
# Use standard fit.
rm.model.fit(
X,
y,
batch_size=batch_size,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[tb, early_stopper, csv_logger],
epochs=nb_epoch)
else:
# Use fit generator.
rm.model.fit_generator(
generator=generator,
steps_per_epoch=steps_per_epoch,
epochs=nb_epoch,
verbose=1,
callbacks=[tb, early_stopper, csv_logger, checkpointer],
validation_data=val_generator,
validation_steps=40,
workers=4)
def main():
"""These are the main training settings. Set each before running
this file."""
# model can be one of lstm, lrcn, mlp, conv_3d, c3d
model = '3d_in_c'
saved_model = None # None or weights file
class_limit = None # int, can be 1-101 or None
seq_length = 16
load_to_memory = False # pre-load the sequences into memory
batch_size = 32
nb_epoch = 1000
# Chose images or features and image shape based on network.
if model in ['conv_3d', 'c3d', 'lrcn']:
data_type = 'images'
image_shape = (80, 80, 3)
elif model in ['lstm', 'mlp']:
data_type = 'features'
image_shape = None
elif model in ['3d_in_c']:
data_type = 'images'
image_shape = (32, 32, seq_length)
else:
raise ValueError("Invalid model. See train.py for options.")
train(data_type, seq_length, model, saved_model=saved_model,
class_limit=class_limit, image_shape=image_shape,
load_to_memory=load_to_memory, batch_size=batch_size, nb_epoch=nb_epoch)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3323107 | <filename>perception/scripts/transform_service.py
#! /usr/bin/env python
import rospy
from transform_helper import Transformer
from perception.srv import *
def get_transform_point_cb(req):
resp = GetTransformPointResponse()
resp.point = transformer.transform_point(req.point, req.from_frame, req.to_frame)
return resp
def get_transform_pose_cb(req):
resp = GetTransformPoseResponse()
resp.pose = transformer.transform_pose(req.pose, req.from_frame, req.to_frame)
return resp
if __name__ == '__main__':
rospy.init_node('transformer_service')
transformer = Transformer()
rospy.Service('get_transform_point', GetTransformPoint, get_transform_point_cb)
rospy.Service('get_transform_pose', GetTransformPose, get_transform_pose_cb)
rospy.loginfo('\033[94m Started Transformer service \033[0m')
rospy.spin() | StarcoderdataPython |
187289 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advent of Code 2020, day five."""
INPUT_FILE = 'data/day_05.txt'
def main() -> None:
"""Identify missing ticket."""
with open(INPUT_FILE, encoding='utf-8') as input_file:
tkt = sorted([int(x.strip().replace('F', '0').replace('B', '1')
.replace('L', '0').replace('R', '1'), 2)
for x in input_file])
print(f'Part One: Highest Seat Id: {tkt[-1]}')
# Finding the missing ticket.
# Using triangular numbers to get sum from 0 to last seat
# Removing sum from 0 to seat before first one.
sum_all = (tkt[-1] * (tkt[-1] + 1) - (tkt[0] - 1) * tkt[0]) // 2
missing_ticket = sum_all - sum(tkt)
print(f'Part Two: Missing Seat Id: {missing_ticket}')
main()
| StarcoderdataPython |
3212745 | # Copyright 2015/2016 by <NAME> (RabbitStack)
# All Rights Reserved.
# http://rabbitstack.github.io
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest.mock import patch
from fibratus.common import IO
class TestIO(object):
@patch('fibratus.common.write_console_unicode')
@patch('fibratus.common.byref', return_value=8)
def test_write_console_new_line(self, byref_mock, write_console_unicode_mock):
IO._stdout_handle = 10
IO.write_console('Fibratus')
write_console_unicode_mock.assert_called_with(10, 'Fibratus\n', 9, 8, None)
@patch('fibratus.common.write_console_unicode')
@patch('fibratus.common.byref', return_value=8)
def test_write_console_same_line(self, byref_mock, write_console_unicode_mock):
IO._stdout_handle = 10
IO.write_console('Fibratus', False)
write_console_unicode_mock.assert_called_with(10, 'Fibratus\r', 9, 8, None)
def test_write_console_unicode(self):
IO.write_console('aaaàçççñññ skräms inför på fédéra')
| StarcoderdataPython |
3318516 | <reponame>Gabriel-15/tytus
from flask import Flask, jsonify, request
from flask_cors import CORS
from user import users
'''# archivos de parser team16
import interprete as Inter
import Ast2 as ast
from Instruccion import *
import Gramatica as g
import ts as TS
import jsonMode as JSON_INGE
import jsonMode as json
import Instruccion as INST'''
import sys
sys.path.append('../../../../parser/team26/G26/')
sys.path.append('../../../../parser/team26/G26/Utils')
sys.path.append('../../../../parser/team26/G26/Expresiones')
sys.path.append('../../../../parser/team26/G26/Instrucciones')
sys.path.append('../../../../storage/storageManager')
# Parser imports
import Instrucciones.DML.select as select
from Error import *
import jsonMode as storage
import gramatica as g
import Utils.Lista as l
# Data list
storage.dropAll()
datos = l.Lista({}, '')
app = Flask(__name__)
CORS(app)
#crea conexion a la bd
@app.route('/conexionBD', methods=['POST'])
def conectar():
user_name = request.json['user']
user_pass = request.json['password']
userEncontrado = [user for user in users if user['name'] == user_name and user['password'] == user_pass]
if (len(userEncontrado) > 0):
return jsonify({"msj":"Conexion establecida"})
return jsonify({"msj":"Usuario o contraseña invalidos"})
# recibe los queries del cliente
@app.route('/query',methods=['POST'])
def transaccionar():
query = request.json['query']
instrucciones = g.parse(query)
print(instrucciones);
mensaje = ""
text = ""
for instr in instrucciones['ast']:
if instr != None:
result = instr.execute(datos)
if isinstance(result, Error):
mensaje = mensaje + str(result.desc) + "\n"
elif isinstance(instr, select.Select) or isinstance(instr, select.QuerysSelect):
mensaje = mensaje + str(instr.ImprimirTabla(result)) + "\n"
else:
mensaje = mensaje + str(result) + "\n"
print(mensaje)
return jsonify({"msj": mensaje})
@app.route('/newUser', methods=['POST'])
def addUser():
user_name = request.json['user']
user_pass = request.json['password']
userEncontrado = [user for user in users if user['name'] == user_name]
if (len(userEncontrado) > 0):
return jsonify({"msj":"El usuario ya existe"})
newUser = {
"name" : user_name,
"password" : <PASSWORD>
}
users.append(newUser)
print(users)
return jsonify({"msj":"Usuario creado"})
if __name__ == "__main__":
app.run(port=10000, host='0.0.0.0') | StarcoderdataPython |
4804218 | <reponame>zooed/meanfield<gh_stars>0
"""Self Play
"""
import os
import magent
import argparse
import numpy as np
import tensorflow as tf
import tools
from four_model import spawn_ai
from senario_battle import play
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--algo', type=str, default='ac' ,choices={'ac', 'mfac', 'mfq', 'il'}, help='选择算法')
parser.add_argument('--n_round', type=int, default=10, help='设置训练局数')
parser.add_argument('--max_steps', type=int, default=5, help='设置最大步数')
parser.add_argument('--map_size', type=int, default=40, help='设置地图的大小')
parser.add_argument('--update_every', type=int, default=5, help='设置Q学习更新间隔, optional')
parser.add_argument('--save_every', type=int, default=10, help='设置self-play更新间隔')
parser.add_argument('--render',default=True, help='渲染与否(if true, will render every save)')
args = parser.parse_args()
# 初始化环境
env = magent.GridWorld('battle', map_size=args.map_size)
env.set_render_dir(os.path.join(BASE_DIR, 'battle_model', 'build/render')) # 设置存储目录路径
handles = env.get_handles() #返回的是c_int类型的列表 作战双方的控制句柄 [c_int(0),c_int(1)]
tf_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
tf_config.gpu_options.allow_growth = True
log_dir = os.path.join(BASE_DIR,'data/tmp'.format(args.algo))
model_dir = os.path.join(BASE_DIR, 'data/models/{}'.format(args.algo))
if args.algo in ['mfq', 'mfac']: # 是否使用mean field方法
use_mf = True
else:
use_mf = False
start_from = 0
sess = tf.Session(config=tf_config)
# 实例化模型 Q网络与AC算法
models = [spawn_ai(args.algo, sess, env, handles[0], args.algo + '-me', args.max_steps),
spawn_ai(args.algo, sess, env, handles[1], args.algo + '-opponent', args.max_steps)]
sess.run(tf.global_variables_initializer())
# ============ 网络参数的可视化 =================================
mergerd = tf.summary.merge_all()
writer = tf.summary.FileWriter("/home/future/mfrl/Battle_model/logs",sess.graph)
# 传入sess model env play handles 初始化
runner = tools.Runner(sess,
env,
handles,
args.map_size,
args.max_steps,
models,
play,
render_every=args.save_every if args.render else 0,
save_every=args.save_every,
tau=0.01,
log_name=args.algo,
log_dir=log_dir,
model_dir=model_dir,
train=True)
# 每一局开始训练
for k in range(start_from, start_from + args.n_round):
#0.005 +(1-i/1400)*0.995 随i从1逐渐递减 直到i>1400 不再变化等于0.005
eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05])
runner.run(eps, k, use_mf)
"""
#============= 结构分析 ==============
总体main:
env = magent.GridWorld('battle', map_size=args.map_size)
handles = env.get_handles()
sess = tf.Session(config=tf_config)
model = model = AC(human_name,sess,env, handle) 算法模型类的实例化对象
runner = tools.Runner(sees, env, handles, models, play) play是进行游戏的函数的引用
for k in range(episode):
runner.run(eps,k,use_mf) 在其中调用 paly()方法
=================================
模型算法:
值函数Critic
策略函数Actor
class AC(object):
self.view_space = env.get_view_space(handle)
self.feature_space = env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self._create_network(view_space, feature_space)
def act():
sess.run(policy)
策略函数 获取动作
def _create_network():
inputs =
policy =
value =
def train():
执行训练网络的操作 输入view feature action reward
_, pg_loss, vf_loss, ent_loss, state_value = self.sess.run(
[self.train_op, self.pg_loss, self.vf_loss, self.reg_loss, self.value], feed_dict={
self.input_view: view,
self.input_feature: feature,
self.action: action,
self.reward: reward,
})
def save(): 保存与加载模型
def laod():
智能体与环境进行一步一个动作的交互
智能体执行动作 环境进行反馈更新
def play(env, n_round, map_size, max_steps, handles, models):
env.reset()
generate_map()
while not done:
act = model[i].act() 获取智能体的动作
done = env.setp()环境进行反馈
model[i].train()模型训练更新 梯度更新
进行一局的每一步
结束后进行AC算法的梯度更新
class Runner():
def run():
max_nums, nums, agent_r_records, total_rewards = self.play(env=self.env,
n_round=iteration,
map_size=self.map_size,
max_steps=self.max_steps,
handles=self.handles,
models=self.models,
print_every=50,
eps=variant_eps,
render=(iteration + 1) % self.render_every if self.render_every > 0 else False,
use_mean=use_mean,
train=self.train)
每一局调用一次play
每一局结束之后 反馈最后的奖励结果
更新模型
""" | StarcoderdataPython |
106453 | <gh_stars>0
import logging
import threading
import time
import ipcqueue.posixmq
import prometheus_client.registry
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.core.management.base import BaseCommand
from ...backends.prometheus import PrometheusMultiprocessMonitoring
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Process Prometheus metrics"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
monitoring = apps.get_app_config(
"django_prometheus_monitoring"
).monitoring
if not isinstance(monitoring, PrometheusMultiprocessMonitoring):
raise ImproperlyConfigured(
"Monitoring backend is not instance of "
"PrometheusMultiprocessMonitoring"
)
self.monitoring = monitoring
self.metrics_lock = threading.Lock()
self.stop_event = threading.Event()
self.registry = prometheus_client.registry.CollectorRegistry(
auto_describe=True
)
def handle(self, *args, **options):
metrics_collector = threading.Thread(target=self.count_stats)
metrics_collector.start()
try:
self.consume_metrics()
finally:
self.stop_event.set()
def consume_metrics(self):
while 1:
try:
(
metric_cls,
name,
documentation,
labelnames,
method,
value,
labelvalues,
labelkwargs,
) = self.monitoring.queue.get(block=True)
if name not in self.monitoring.metrics:
metric = metric_cls(name, documentation, labelnames)
else:
metric = self.monitoring.metrics[name]
prometheus_metric = metric.get_prometheus_inst(self.registry)
self.metrics_lock.acquire()
try:
if labelvalues or labelkwargs:
prometheus_metric = prometheus_metric.labels(
*labelvalues, **labelkwargs
)
getattr(prometheus_metric, method)(value)
finally:
self.metrics_lock.release()
except ipcqueue.posixmq.QueueError as exc:
logger.error("Queue error: %d %s", exc.errno, exc.msg)
except Exception as exc:
logger.exception("Metrics consumer error: %s", exc)
def count_stats(self):
while 1:
try:
self.metrics_lock.acquire()
try:
stats = prometheus_client.generate_latest(self.registry)
finally:
self.metrics_lock.release()
self.monitoring.set_stats(stats)
wait_for_event(self.stop_event, 5.0)
except Exception as exc:
logger.exception("Metrics collector error: %s", exc)
def wait_for_event(event, seconds, step=0.1):
for unused in range(int(seconds / step)):
if event.is_set():
return
time.sleep(0.1)
def run_metrics_consumer():
call_command(__name__.split(".")[-1])
| StarcoderdataPython |
158561 | <filename>portfolio/blog/urls.py
from django.conf.urls import url
from portfolio.blog.views import BlogFormView, BlogView
urlpatterns = [
url(r'^add/$', BlogFormView.as_view(), name='blog_add'),
url(r'^$', BlogView.as_view(), name='blog_index'),
] | StarcoderdataPython |
77213 | from flask import Flask
from flask_socketio import SocketIO
from .boxoffice import *
app = Flask(__name__)
if not app.debug:
import os
base_dir = os.path.split(os.path.realpath(__file__))[0]
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler(base_dir+'/box-office.log', maxBytes=10*1024*1024,backupCount=5)
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
socketio = SocketIO(app)
socketio.on_namespace(BoxOffice('/boxoffice'))
| StarcoderdataPython |
1604580 | from django.test import TestCase
from survey.forms.question_set import BatchForm
from survey.models.locations import *
from survey.models import EnumerationArea
from survey.models import Interviewer
from survey.models.access_channels import *
from survey.models.batch import Batch
from survey.models.surveys import Survey
class BatchFormTest(TestCase):
def test_valid(self):
self.country = LocationType.objects.create(
name='Country', slug='country')
self.africa = Location.objects.create(name='Africa', type=self.country)
self.city_ea = EnumerationArea.objects.create(name="CITY EA")
self.city_ea.locations.add(self.africa)
self.investigator_1 = Interviewer.objects.create(name="Investigator",
ea=self.city_ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0)
odk = ODKAccess.objects.create(interviewer=self.investigator_1, user_identifier='Test', is_active=True, reponse_timeout=1000,
duration='H', odk_token='Test')
form_data = {
'name': 'Batch 1',
'description': 'description goes here',
}
batch_form = BatchForm(form_data)
self.assertFalse(batch_form.is_valid())
def test_invalid(self):
form_data = {
'name': 'test',
'description': 'description goes here',
}
batch_form = BatchForm(form_data)
self.assertFalse(batch_form.is_valid())
def test_field_required(self):
data = {'name': '', 'description': ''}
batch_form = BatchForm(data)
self.assertFalse(batch_form.is_valid())
self.assertEqual(['This field is required.'],
batch_form.errors['name'])
def test_form_should_be_invalid_if_name_already_exists_on_the_same_survey(self):
survey = Survey.objects.create(name="very fast")
Batch.objects.create(survey=survey, name='Batch A',
description='description')
form_data = {
'name': 'Batch A',
'description': 'description goes here',
}
batch_form = BatchForm(data=form_data, instance=Batch(survey=survey))
self.assertFalse(batch_form.is_valid()) | StarcoderdataPython |
3355597 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for SVC monitor
"""
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.vnc_db import DBBase
from cfgm_common import svc_info
class DBBaseSM(DBBase):
obj_type = __name__
def evaluate(self):
# Implement in the derived class
pass
class LoadbalancerSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.service_instance = None
self.loadbalancer_listeners = set()
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.display_name = obj.get('display_name', None)
self.parent_uuid = obj['parent_uuid']
self.id_perms = obj.get('id_perms', None)
self.params = obj.get('loadbalancer_properties', None)
self.provider = obj.get('loadbalancer_provider', None)
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('service_instance', obj)
self.update_multiple_refs('loadbalancer_listener', obj)
# end update
def add(self):
self.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_add(self)
# end add
def evaluate(self):
self.add()
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer(obj)
obj.update_single_ref('virtual_machine_interface', {})
obj.update_single_ref('service_instance', {})
obj.update_multiple_refs('loadbalancer_listener', {})
del cls._dict[uuid]
# end delete
# end class LoadbalancerSM
class LoadbalancerListenerSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_listener'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer = None
self.loadbalancer_pool = None
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.display_name = obj.get('display_name', None)
self.parent_uuid = obj['parent_uuid']
self.id_perms = obj.get('id_perms', None)
self.params = obj.get('loadbalancer_listener_properties', None)
self.update_single_ref('loadbalancer', obj)
self.update_single_ref('loadbalancer_pool', obj)
# end update
def add(self):
self.last_sent = \
self._manager.loadbalancer_agent.listener_add(self)
# end add
def evaluate(self):
self.add()
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_listener(obj)
obj.update_single_ref('loadbalancer', {})
obj.update_single_ref('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class LoadbalancerListenerSM
class LoadbalancerPoolSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_pool'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.members = set()
self.loadbalancer_healthmonitors = set()
self.service_instance = None
self.virtual_machine_interface = None
self.virtual_ip = None
self.loadbalancer_listener = None
self.loadbalancer_id = None
self.last_sent = None
self.custom_attributes = []
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.params = obj.get('loadbalancer_pool_properties', None)
self.provider = obj.get('loadbalancer_pool_provider', None)
kvpairs = obj.get('loadbalancer_pool_custom_attributes', None)
if kvpairs:
self.custom_attributes = kvpairs.get('key_value_pair', [])
self.members = set([lm['uuid']
for lm in obj.get('loadbalancer_members', [])])
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
self.update_single_ref('service_instance', obj)
self.update_single_ref('virtual_ip', obj)
self.update_single_ref('loadbalancer_listener', obj)
self.update_single_ref('virtual_machine_interface', obj)
self.update_multiple_refs('loadbalancer_healthmonitor', obj)
# end update
def add(self):
if self.loadbalancer_listener:
ll_obj = LoadbalancerListenerSM.get(self.loadbalancer_listener)
self.loadbalancer_id = ll_obj.loadbalancer
self.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_pool_add(self)
if len(self.members):
for member in self.members:
member_obj = LoadbalancerMemberSM.get(member)
if member_obj:
member_obj.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_member_add(
member_obj)
if self.virtual_ip:
vip_obj = VirtualIpSM.get(self.virtual_ip)
if vip_obj:
vip_obj.last_sent = \
self._manager.loadbalancer_agent.virtual_ip_add(vip_obj)
# end add
def evaluate(self):
self.add()
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer_pool(obj)
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_ip', {})
obj.update_single_ref('loadbalancer_listener', {})
obj.update_single_ref('virtual_machine_interface', {})
obj.update_multiple_refs('loadbalancer_healthmonitor', {})
del cls._dict[uuid]
# end delete
# end class LoadbalancerPoolSM
class LoadbalancerMemberSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_member'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer_pool = {}
self.last_sent = None
self.update(obj_dict)
if self.loadbalancer_pool:
parent = LoadbalancerPoolSM.get(self.loadbalancer_pool)
parent.members.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_member_properties', None)
self.loadbalancer_pool = self.get_parent_uuid(obj)
self.id_perms = obj.get('id_perms', None)
# end update
def evaluate(self):
pass
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer_member(obj)
if obj.loadbalancer_pool:
parent = LoadbalancerPoolSM.get(obj.loadbalancer_pool)
if parent:
parent.members.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end class LoadbalancerMemberSM
class VirtualIpSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.loadbalancer_pool = None
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('virtual_ip_properties', None)
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
# end update
def evaluate(self):
pass
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_virtual_ip(obj)
obj.update_single_ref('virtual_machine_interface', {})
obj.update_single_ref('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class VirtualIpSM
class HealthMonitorSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_healthmonitor'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer_pools = set()
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_healthmonitor_properties', None)
self.update_multiple_refs('loadbalancer_pool', obj)
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
self.last_sent = self._manager.loadbalancer_agent.update_hm(self)
# end update
def evaluate(self):
pass
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class HealthMonitorSM
class VirtualMachineSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_machine'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instance = None
self.service_id = None
self.virtual_router = None
self.virtual_machine_interfaces = set()
self.virtualization_type = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_single_ref('service_instance', obj)
self.update_single_ref('virtual_router', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
if self.service_instance:
self.service_id = self.service_instance
self.display_name = obj.get('display_name', None)
if self.display_name is None:
return
display_list = self.display_name.split('__')
if self.service_instance:
if len(display_list) == 5:
self.virtualization_type = display_list[-1]
self.proj_fq_name = display_list[0:2]
self.index = int(display_list[-2]) - 1
else:
self.index = -1
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
if self.service_id and not self.service_instance:
self._manager.delete_service_instance(self)
# end VirtualMachineSM
class VirtualRouterSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.agent_state = False
self.agent_down_count = 0
self.virtual_machines = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine', {})
del cls._dict[uuid]
# end delete
def set_agent_state(self, up):
if up:
self.agent_down_count = 0
self.agent_state = True
else:
self.agent_down_count += 1
if not (self.agent_down_count % 3):
self.agent_state = False
def set_netns_version(self, netns_version):
self.netns_version = netns_version
# end VirtualRouterSM
class VirtualMachineInterfaceSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_machine_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.params = None
self.if_type = None
self.virtual_ip = None
self.loadbalancer = None
self.virtual_network = None
self.virtual_machine = None
self.loadbalancer_pool = None
self.logical_interface = None
self.instance_ips = set()
self.floating_ips = set()
self.interface_route_tables = set()
self.service_health_checks = set()
self.security_groups = set()
self.service_instance = None
self.instance_id = None
self.physical_interface = None
self.port_tuple = None
self.fat_flow_ports = set()
self.aaps = None
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
if obj.get('virtual_machine_interface_properties', None):
self.params = obj['virtual_machine_interface_properties']
self.if_type = self.params.get('service_interface_type', None)
self.aaps = obj.get('virtual_machine_interface_allowed_address_pairs', None)
if self.aaps:
self.aaps = self.aaps.get('allowed_address_pair', None)
self.fat_flow_ports.clear()
ffps = obj.get('virtual_machine_interface_fat_flow_protocols', None)
if ffps:
for ffp in ffps.get('fat_flow_protocol', []):
if ffp['port']:
self.fat_flow_ports.add(ffp['port'])
self.update_single_ref('virtual_ip', obj)
self.update_single_ref('loadbalancer', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('floating_ip', obj)
self.update_single_ref('virtual_network', obj)
self.update_single_ref('virtual_machine', obj)
self.update_single_ref('logical_interface', obj)
self.update_multiple_refs('interface_route_table', obj)
self.update_multiple_refs('service_health_check', obj)
self.update_single_ref('physical_interface',obj)
self.update_multiple_refs('security_group', obj)
self.update_single_ref('port_tuple', obj)
if self.virtual_machine:
vm = VirtualMachineSM.get(self.virtual_machine)
if vm:
self.service_instance = vm.service_instance
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('virtual_ip', {})
obj.update_single_ref('loadbalancer', {})
obj.update_single_ref('loadbalancer_pool', {})
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('floating_ip', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('virtual_machine', {})
obj.update_single_ref('logical_interface', {})
obj.update_multiple_refs('interface_route_table', {})
obj.update_multiple_refs('service_health_check', {})
obj.update_multiple_refs('security_group', {})
obj.update_single_ref('port_tuple', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
def evaluate(self):
vm = VirtualMachineSM.get(self.virtual_machine)
if vm:
self._manager.port_delete_or_si_link(vm, self)
self._manager.port_tuple_agent.update_port_tuple(self)
# end VirtualMachineInterfaceSM
class ServiceInstanceSM(DBBaseSM):
_dict = {}
obj_type = 'service_instance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_template = None
self.loadbalancer = None
self.loadbalancer_pool = None
self.interface_route_tables = {}
self.service_health_checks = {}
self.instance_ips = set()
self.virtual_machines = set()
self.logical_router = None
self.params = None
self.bindings = None
self.kvps = None
self.state = 'init'
self.launch_count = 0
self.back_off = -1
self.image = None
self.flavor = None
self.max_instances = 0
self.availability_zone = None
self.ha_mode = None
self.vr_id = None
self.vn_changed = False
self.local_preference = [None, None]
self.vn_info = []
self.port_tuples = set()
obj_dict = self.update(obj_dict)
self.set_children('port_tuple', obj_dict)
self.add_to_parent(obj_dict)
if self.ha_mode == 'active-standby':
self.max_instances = 2
self.local_preference = [svc_info.get_active_preference(),
svc_info.get_standby_preference()]
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.proj_name = obj['fq_name'][-2]
self.check_vn_changes(obj)
self.params = obj.get('service_instance_properties', None)
self.bindings = obj.get('service_instance_bindings', None)
if self.bindings:
self.kvps = self.bindings.get('key_value_pair', None)
self.update_single_ref('service_template', obj)
self.update_single_ref('loadbalancer', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.update_single_ref('logical_router', obj)
self.update_multiple_refs_with_attr('interface_route_table', obj)
self.update_multiple_refs_with_attr('service_health_check', obj)
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('virtual_machine', obj)
self.id_perms = obj.get('id_perms', None)
if not self.params:
return obj
self.vr_id = self.params.get('virtual_router_id', None)
self.ha_mode = self.params.get('ha_mode', None)
if self.ha_mode != 'active-standby':
scale_out = self.params.get('scale_out', None)
if scale_out:
self.max_instances = scale_out.get('max_instances', 1)
return obj
# end update
def check_vn_changes(self, obj):
self.vn_changed = False
if not self.params or not obj.get('service_instance_properties'):
return
old_ifs = self.params.get('interface_list', [])
new_ifs = obj['service_instance_properties'].get('interface_list', [])
for index in range(0, len(old_ifs)):
try:
old_if = old_ifs[index]
new_if = new_ifs[index]
except IndexError:
continue
if not old_if['virtual_network'] or not new_if['virtual_network']:
continue
if old_if['virtual_network'] != new_if['virtual_network']:
self.vn_changed = True
return
# end check_vn_changes
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_template', {})
obj.update_single_ref('loadbalancer', {})
obj.update_single_ref('loadbalancer_pool', {})
obj.update_single_ref('logical_router', {})
obj.update_multiple_refs_with_attr('interface_route_table', {})
obj.update_multiple_refs_with_attr('service_health_check', {})
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('virtual_machine', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
def evaluate(self):
self.state = 'launch'
self._manager.create_service_instance(self)
for pt_id in self.port_tuples:
self._manager.port_tuple_agent.update_port_tuple(pt_id=pt_id)
# end class ServiceInstanceSM
class ServiceTemplateSM(DBBaseSM):
_dict = {}
obj_type = 'service_template'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instances = set()
self.virtualization_type = 'virtual-machine'
self.service_appliance_set = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.params = obj.get('service_template_properties')
if self.params:
self.virtualization_type = self.params.get(
'service_virtualization_type') or 'virtual-machine'
self.update_multiple_refs('service_instance', obj)
self.update_single_ref('service_appliance_set', obj)
self.id_perms = obj.get('id_perms', None)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('service_instance', {})
obj.update_single_ref('service_appliance_set', {})
del cls._dict[uuid]
# end delete
# end class ServiceTemplateSM
class VirtualNetworkSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_network'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine_interface', obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
def evaluate(self):
for si_id in ServiceInstanceSM:
si = ServiceInstanceSM.get(si_id)
intf_list = []
if si.params:
intf_list = si.params.get('interface_list', [])
for intf in intf_list:
if (':').join(self.fq_name) in intf.values():
self._manager.create_service_instance(si)
# end class VirtualNetworkSM
class FloatingIpSM(DBBaseSM):
_dict = {}
obj_type = 'floating_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.address = None
self.virtual_machine_interfaces = set()
self.virtual_ip = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.address = obj['floating_ip_address']
self.update_multiple_refs('virtual_machine_interface', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
self._manager.netns_manager.add_fip_to_vip_vmi(self)
# end class FloatingIpSM
class InstanceIpSM(DBBaseSM):
_dict = {}
obj_type = 'instance_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.address = None
self.family = None
self.service_instance = None
self.service_instance_ip = None
self.instance_ip_secondary = None
self.secondary_tracking_ip = None
self.service_health_check_ip = None
self.virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.instance_ip_mode = obj.get('instance_ip_mode', None)
self.service_instance_ip = obj.get('service_instance_ip', False)
self.instance_ip_secondary = obj.get('instance_ip_secondary', False)
self.secondary_tracking_ip = obj.get('secondary_ip_tracking_ip', None)
self.service_health_check_ip = obj.get('service_health_check_ip', None)
self.family = obj.get('instance_ip_family', 'v4')
self.address = obj.get('instance_ip_address', None)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_single_ref('service_instance', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_instance', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
self._manager.port_tuple_agent.delete_shared_iip(self)
# end class InstanceIpSM
class LogicalInterfaceSM(DBBaseSM):
_dict = {}
obj_type = 'logical_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.logical_interface_vlan_tag = 0
self.update(obj_dict)
if self.physical_interface:
parent = PhysicalInterfaceSM.get(self.physical_interface)
elif self.physical_router:
parent = PhysicalRouterSM.get(self.physical_router)
if parent:
parent.logical_interfaces.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if obj['parent_type'] == 'physical-router':
self.physical_router = self.get_parent_uuid(obj)
self.physical_interface = None
else:
self.physical_interface = self.get_parent_uuid(obj)
self.physical_router = None
self.update_single_ref('virtual_machine_interface', obj)
self.name = obj['fq_name'][-1]
self.logical_interface_vlan_tag = obj.get(
'logical_interface_vlan_tag', 0)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.physical_interface:
parent = PhysicalInterfaceSM.get(obj.physical_interface)
elif obj.physical_router:
parent = PhysicalInterfaceSM.get(obj.physical_router)
if parent:
parent.logical_interfaces.discard(obj.uuid)
obj.update_single_ref('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end LogicalInterfaceSM
class PhysicalInterfaceSM(DBBaseSM):
_dict = {}
fq_name = None
obj_type = 'physical_interface'
virtual_machine_interfaces = set()
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
pr = PhysicalRouterSM.get(self.physical_router)
if pr:
pr.physical_interfaces.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.physical_router = self.get_parent_uuid(obj)
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
self.update_multiple_refs('virtual_machine_interface',obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
pr = PhysicalRouterSM.get(obj.physical_router)
if pr:
pr.physical_interfaces.discard(obj.uuid)
obj.update_multiple_refs('virtual_machine_interface',{})
del cls._dict[uuid]
# end delete
# end PhysicalInterfaceSM
class PhysicalRouterSM(DBBaseSM):
_dict = {}
obj_type = 'physical_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.management_ip = obj.get('physical_router_management_ip')
self.vendor = obj.get('physical_router_vendor_name')
self.physical_interfaces = set([pi['uuid'] for pi in
obj.get('physical_interfaces', [])])
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
del cls._dict[uuid]
# end delete
# end PhysicalRouterSM
class ProjectSM(DBBaseSM):
_dict = {}
obj_type = 'project'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instances = set()
self.virtual_networks = set()
obj_dict = self.update(obj_dict)
self.set_children('virtual_network', obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('service_instance', obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('service_instance', {})
del cls._dict[uuid]
# end delete
# end ProjectSM
class DomainSM(DBBaseSM):
_dict = {}
obj_type = 'domain'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
del cls._dict[uuid]
# end delete
# end DomainSM
class SecurityGroupSM(DBBaseSM):
_dict = {}
obj_type = 'security_group'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine_interface', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
self._manager.netns_manager.add_sg_to_vip_vmi(self)
# end SecurityGroupSM
class InterfaceRouteTableSM(DBBaseSM):
_dict = {}
obj_type = 'interface_route_table'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.service_instances = {}
self.si_uuid = None
self.if_type = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs_with_attr('service_instance', obj)
name_split = self.name.split(' ')
if len(name_split) == 2:
self.si_uuid = name_split[0]
self.if_type = name_split[1]
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs_with_attr('service_instance', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
if self.si_uuid and not len(self.virtual_machine_interfaces):
self._manager.delete_interface_route_table(self.uuid)
# end InterfaceRouteTableSM
class ServiceApplianceSM(DBBaseSM):
_dict = {}
obj_type = 'service_appliance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_appliance_set = None
self.physical_interfaces = {}
self.kvpairs = []
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
kvpairs = obj.get('service_appliance_properties', None)
if kvpairs:
self.kvpairs = kvpairs.get('key_value_pair', [])
self.user_credential = obj.get(
'service_appliance_user_credentials', None)
self.ip_address = obj.get('service_appliance_ip_address', None)
self.service_appliance_set = self.get_parent_uuid(obj)
self.physical_interfaces = {}
ref_objs = obj.get("physical_interface_refs",[])
for ref in ref_objs:
self.physical_interfaces[ref[
'attr'].get('interface_type')] = ref['uuid']
if self.service_appliance_set:
parent = ServiceApplianceSetSM.get(self.service_appliance_set)
parent.service_appliances.add(self.uuid)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.service_appliance_set:
parent = ServiceApplianceSetSM.get(obj.service_appliance_set)
if parent:
parent.service_appliances.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end ServiceApplianceSM
class ServiceApplianceSetSM(DBBaseSM):
_dict = {}
obj_type = 'service_appliance_set'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_appliances = set()
self.service_template = None
self.kvpairs = []
self.ha_mode = "standalone"
self.update(obj_dict)
# end __init__
def add(self):
self._manager.loadbalancer_agent.load_driver(self)
# end add
def evaluate(self):
self.add()
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.driver = obj.get('service_appliance_driver', None)
self.update_single_ref("service_template", obj)
kvpairs = obj.get('service_appliance_set_properties', None)
if kvpairs:
self.kvpairs = kvpairs.get('key_value_pair', [])
self.service_appliances = set(
[sa['uuid'] for sa in obj.get('service_appliances', [])])
if 'service_appliance_ha_mode' in obj:
self.ha_mode = obj['service_appliance_ha_mode']
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.unload_driver(obj)
obj.update_single_ref("service_template",{})
del cls._dict[uuid]
# end delete
# end ServiceApplianceSetSM
class LogicalRouterSM(DBBaseSM):
_dict = {}
obj_type = 'logical_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instance = None
self.virtual_network = None
self.virtual_machine_interfaces = set()
self.last_virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.parent_uuid = obj['parent_uuid']
self.update_single_ref('service_instance', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_single_ref('virtual_network', obj)
self.name = obj['fq_name'][-1]
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.snat_agent.delete_snat_instance(obj)
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_network', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
self._manager.snat_agent.update_snat_instance(self)
# end LogicalRouterSM
class PortTupleSM(DBBaseSM):
_dict = {}
obj_type = 'port_tuple'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.parent_uuid = self.get_parent_uuid(obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.name = obj['fq_name'][-1]
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
def evaluate(self):
self._manager.port_tuple_agent.update_port_tuple(pt_id=self.uuid)
# end PortTupleSM
class ServiceHealthCheckSM(DBBaseSM):
_dict = {}
obj_type = 'service_health_check'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.service_instances = {}
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.parent_uuid = obj['parent_uuid']
self.name = obj['fq_name'][-1]
self.params = obj.get('service_health_check_properties', None)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs_with_attr('service_instance', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs_with_attr('service_instance', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end ServiceHealthCheckSM
| StarcoderdataPython |
3272607 | <reponame>scjs/buckeye<gh_stars>10-100
"""Container for a chunk of speech bounded by long pauses.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .containers import Pause
class Utterance(object):
"""Iterable of Word and Pause instances comprising one chunk of speech.
Parameters
----------
words : list of Word and Pause, optional
List of Word and Pause instances that comprise one speech chunk.
Default is None.
Attributes
----------
beg
end
dur
words
"""
def __init__(self, words=None):
if words is None:
self._words = []
return
words = sorted(words, key=lambda word: float(word.beg))
for word in words:
if float(word.beg) > float(word.end):
raise ValueError('Reversed items in utterance')
for left, right in zip(words, words[1:]):
if float(left.end) > float(right.beg):
raise ValueError('Overlapping items in utterance')
self._words = words
def __repr__(self):
return 'Utterance({})'.format(repr(self.words))
def __str__(self):
utt = []
for word in self._words:
if hasattr(word, 'orthography'):
utt.append(word.orthography)
elif hasattr(word, 'entry'):
utt.append(word.entry)
else:
utt.append(str(word))
return '<Utterance "{}">'.format(' '.join(utt))
@property
def beg(self):
"""Timestamp where the first item in the utterance begins."""
try:
return self._words[0].beg
except IndexError:
raise AttributeError('Utterance is empty')
@property
def end(self):
"""Timestamp where the last item in the utterance ends."""
try:
return self._words[-1].end
except IndexError:
raise AttributeError('Utterance is empty')
@property
def dur(self):
"""Duration of the utterance."""
try:
return self._words[-1].end - self._words[0].beg
except IndexError:
raise AttributeError('Utterance is empty')
@property
def words(self):
"""Chronological list of Word and Pause instances in this utterance."""
return self._words
def append(self, item):
"""Append an instance to this utterance.
Parameters
----------
word : Word or Pause instance
Instance with `beg` and `end` attributes to be appended to this
utterance.
Returns
-------
None
"""
beg = float(item.beg)
end = float(item.end)
if beg > end:
raise ValueError('Item beg timestamp: {0} is after item end '
'timestamp: {1}'.format(str(item.beg), str(item.end)))
for word in self._words:
if float(word.beg) > beg and float(word.beg) <= end:
raise ValueError('Item overlaps with existing items in utterance')
self._words.append(item)
self._words = sorted(self._words, key=lambda word: float(word.beg))
def __iter__(self):
return iter(self._words)
def __getitem__(self, i):
return self._words[i]
def __len__(self):
return len(self._words)
def speech_rate(self, use_phonetic=True, ignore_missing_syllables=False):
"""Return the number of syllables per second in this utterance.
Parameters
----------
use_phonetic: bool, optional
If True, this method counts syllables in the close phonetic
transcriptions of the items in this utterance (see
`Word.syllables`). If False, use the phonemic transcription to
count syllables instead. Default is True.
ignore_missing_syllables : bool, optional
If True, then items in the utterance without a `syllables`
property will be counted as having zero zyllables when this
method is called. If False, a ValueError will be raised if
the utterance includes any items without a `syllables`
property. Default is False.
Returns
-------
rate : float
The number of syllabic segments per second over the items in
this utterance.
"""
if not self._words:
raise ValueError('Utterance is empty')
syllable_count = 0
for word in self._words:
if hasattr(word, 'syllables'):
syllable_count += word.syllables(use_phonetic)
elif not ignore_missing_syllables:
raise ValueError('All objects in Utterance must have a '
'syllables property to calculate speech '
'rate')
return float(syllable_count) / float(self.dur)
def words_to_utterances(words, sep=0.5, strip_pauses=True):
"""Yield Utterance instances from iterable of Word and Pause instances.
Generator that takes an iterable of Word and Pause instances, such as
process_words(), and packs them into Utterance instances.
A new Utterance is created at the start of the iterable passed to
words_to_utterances(), and then whenever there is a sequence of Pause
instances that add up to `sep` seconds or more of duration.
Parameters
----------
words : iterable object of Word and Pause instances
sep : float, optional
If more than `sep` seconds of Pause instances occur consecutively,
yield the current Utterance instance and initialize a new one with
no items. Default is 0.5.
strip_pauses : bool, optional
If True, then Pause instances are removed from the beginning and end of
each Utterance before it is yielded. Default is True.
Yields
------
utt : Utterance
An Utterance for each sequence of word entries delimited by
>= `sep` seconds (default 0.5) of Pause instances.
"""
utt = Utterance()
pause_duration = 0.0
pause_count = 0
for word in words:
# if this item is a pause token...
if isinstance(word, Pause):
# optionally skip it if there are no words in the utterance yet
if strip_pauses and len(utt) == 0:
continue
# if this item doesn't follow another pause, restart the
# pause duration
if not pause_count:
pause_duration = word.dur
# otherwise, add it to the cumulative pause duration
else:
pause_duration += word.dur
pause_count += 1
else:
pause_count = 0
utt.append(word)
# if the total pause duration has reached `sep` seconds, return this
# utterance and start a new one
if pause_duration >= sep:
# optionally remove any pauses at the end
if strip_pauses and pause_count:
utt._words = utt._words[:-pause_count]
if len(utt) > 0:
yield utt
utt = Utterance()
pause_duration = 0.0
pause_count = 0
# return the last utterance if there is one
if strip_pauses and pause_count:
utt._words = utt._words[:-pause_count]
if len(utt) > 0:
yield utt
| StarcoderdataPython |
4806432 | from setuptools import setup
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name="smooth",
version="0.1.2",
description="Data approximation using a cubic smoothing spline",
long_description=readme(),
long_description_content_type='text/markdown',
license="MIT License",
url="https://github.com/kevinmmendez/smooth",
packages=["smooth"],
python_requires='>=3.5',
install_requires=[
"numpy",
"pandas",
"scipy"],
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
)
| StarcoderdataPython |
131538 | <filename>sdk/python/pulumi_azure/servicebus/get_namespace_authorization_rule.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetNamespaceAuthorizationRuleResult',
'AwaitableGetNamespaceAuthorizationRuleResult',
'get_namespace_authorization_rule',
'get_namespace_authorization_rule_output',
]
@pulumi.output_type
class GetNamespaceAuthorizationRuleResult:
"""
A collection of values returned by getNamespaceAuthorizationRule.
"""
def __init__(__self__, id=None, name=None, namespace_name=None, primary_connection_string=None, primary_connection_string_alias=None, primary_key=None, resource_group_name=None, secondary_connection_string=None, secondary_connection_string_alias=None, secondary_key=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if namespace_name and not isinstance(namespace_name, str):
raise TypeError("Expected argument 'namespace_name' to be a str")
pulumi.set(__self__, "namespace_name", namespace_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_connection_string_alias and not isinstance(primary_connection_string_alias, str):
raise TypeError("Expected argument 'primary_connection_string_alias' to be a str")
pulumi.set(__self__, "primary_connection_string_alias", primary_connection_string_alias)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_connection_string_alias and not isinstance(secondary_connection_string_alias, str):
raise TypeError("Expected argument 'secondary_connection_string_alias' to be a str")
pulumi.set(__self__, "secondary_connection_string_alias", secondary_connection_string_alias)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> str:
return pulumi.get(self, "namespace_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
The primary connection string for the authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryConnectionStringAlias")
def primary_connection_string_alias(self) -> str:
"""
The alias Primary Connection String for the ServiceBus Namespace, if the namespace is Geo DR paired.
"""
return pulumi.get(self, "primary_connection_string_alias")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
The primary access key for the authorization rule.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
The secondary connection string for the authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryConnectionStringAlias")
def secondary_connection_string_alias(self) -> str:
"""
The alias Secondary Connection String for the ServiceBus Namespace
"""
return pulumi.get(self, "secondary_connection_string_alias")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
The secondary access key for the authorization rule.
"""
return pulumi.get(self, "secondary_key")
class AwaitableGetNamespaceAuthorizationRuleResult(GetNamespaceAuthorizationRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceAuthorizationRuleResult(
id=self.id,
name=self.name,
namespace_name=self.namespace_name,
primary_connection_string=self.primary_connection_string,
primary_connection_string_alias=self.primary_connection_string_alias,
primary_key=self.primary_key,
resource_group_name=self.resource_group_name,
secondary_connection_string=self.secondary_connection_string,
secondary_connection_string_alias=self.secondary_connection_string_alias,
secondary_key=self.secondary_key)
def get_namespace_authorization_rule(name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceAuthorizationRuleResult:
"""
Use this data source to access information about an existing ServiceBus Namespace Authorization Rule.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.servicebus.get_namespace_authorization_rule(name="examplerule",
namespace_name="examplenamespace",
resource_group_name="example-resources")
pulumi.export("ruleId", example.id)
```
:param str name: Specifies the name of the ServiceBus Namespace Authorization Rule.
:param str namespace_name: Specifies the name of the ServiceBus Namespace.
:param str resource_group_name: Specifies the name of the Resource Group where the ServiceBus Namespace exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:servicebus/getNamespaceAuthorizationRule:getNamespaceAuthorizationRule', __args__, opts=opts, typ=GetNamespaceAuthorizationRuleResult).value
return AwaitableGetNamespaceAuthorizationRuleResult(
id=__ret__.id,
name=__ret__.name,
namespace_name=__ret__.namespace_name,
primary_connection_string=__ret__.primary_connection_string,
primary_connection_string_alias=__ret__.primary_connection_string_alias,
primary_key=__ret__.primary_key,
resource_group_name=__ret__.resource_group_name,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_connection_string_alias=__ret__.secondary_connection_string_alias,
secondary_key=__ret__.secondary_key)
@_utilities.lift_output_func(get_namespace_authorization_rule)
def get_namespace_authorization_rule_output(name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNamespaceAuthorizationRuleResult]:
"""
Use this data source to access information about an existing ServiceBus Namespace Authorization Rule.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.servicebus.get_namespace_authorization_rule(name="examplerule",
namespace_name="examplenamespace",
resource_group_name="example-resources")
pulumi.export("ruleId", example.id)
```
:param str name: Specifies the name of the ServiceBus Namespace Authorization Rule.
:param str namespace_name: Specifies the name of the ServiceBus Namespace.
:param str resource_group_name: Specifies the name of the Resource Group where the ServiceBus Namespace exists.
"""
...
| StarcoderdataPython |
3315589 | <filename>src/test/tests/plots/pseudocolor.py
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: pseudocolor.py
#
# Tests: meshes - 2D rectilinear
# plots - pseudocolor
#
# Defect ID: '1016, '987
#
# Programmer: <NAME>
# Date: March 19, 2015
#
# Modifications:
#
# <NAME>, Thu Mar 19 12:00:23 PDT 2015
# Add test for drawing pseudocolor plot using a log scaling
# and very small min value.
#
# ----------------------------------------------------------------------------
OpenDatabase(silo_data_path("rect2d.silo"))
AddPlot("Pseudocolor", "d")
pc = PseudocolorAttributes()
pc.minFlag = 1
pc.min = 1e-5
pc.maxFlag = 1
pc.max = 1
pc.centering = pc.Nodal
pc.scaling = pc.Log
SetPlotOptions(pc)
DrawPlots()
Test("pseudocolor_01")
Exit()
| StarcoderdataPython |
143170 | <filename>cowait/tasks/definition.py<gh_stars>0
from __future__ import annotations
from datetime import datetime, timezone
from marshmallow import Schema, fields, post_load
from ..utils import uuid
def generate_task_id(name: str) -> str:
if '.' in name:
dot = name.rfind('.')
name = name[dot+1:]
name = name.replace('.', '-')
name = name.replace('_', '-')
return '%s-%s' % (name.lower(), uuid())
class TaskDefinition(object):
"""
Defines a Task :)
Attributes:
name (str): Task import name.
image (str): Task image.
id (str): Task id. If None, an id will be autogenerated.
upstream (str): Upstream connection string. Defaults to None.
inputs (dict): Input values
meta (dict): Freeform metadata
env (dict): Environment variables
ports (dict): Port forwards
routes (dict): HTTP Ingresses
volumes (dict): List of volumes
cpu (str): CPU request
cpu_limit (str): CPU limit
memory (str): Memory request
memory_limit (str): Memory limit
owner (str): Owner name
created_at (DateTime): Creation date
"""
def __init__(
self,
name: str,
image: str,
id: str = None,
upstream: str = None,
parent: str = None,
inputs: dict = {},
meta: dict = {},
env: dict = {},
ports: dict = {},
routes: dict = {},
volumes: dict = {},
storage: dict = {},
cpu: str = None,
cpu_limit: str = None,
memory: str = None,
memory_limit: str = None,
owner: str = '',
created_at: datetime = None,
):
"""
Arguments:
name (str): Task import name.
image (str): Task image.
id (str): Task id. If None, an id will be autogenerated.
upstream (str): Upstream connection string. Defaults to None.
inputs (dict): Input values
meta (dict): Freeform metadata
env (dict): Environment variables
ports (dict): Port forwards
routes (dict): HTTP Ingresses
volumes (dict): List of volumes
cpu (str): CPU request
cpu_limit (str): CPU limit
memory (str): Memory request
memory_limit (str): Memory limit
owner (str): Owner name
created_at (DateTime): Creation date
"""
self.id = generate_task_id(name) if id is None else id
self.name = name
self.image = image
self.parent = parent
self.upstream = upstream
self.inputs = inputs
self.meta = meta
self.env = {str(k): str(v) for k, v in env.items()}
self.ports = ports
self.routes = routes
self.cpu = cpu
self.cpu_limit = cpu_limit
self.memory = memory
self.memory_limit = memory_limit
self.owner = owner
self.volumes = volumes
self.storage = storage
if created_at is None:
self.created_at = datetime.now(timezone.utc)
elif isinstance(created_at, datetime):
self.created_at = created_at
elif isinstance(created_at, str):
self.created_at = datetime.fromisoformat(created_at)
else:
raise TypeError(f'Expected created_at to be None or datetime, got {created_at}')
def serialize(self) -> dict:
""" Serialize task definition to a dict """
return TaskDefinitionSchema().dump(self)
@staticmethod
def deserialize(taskdef: dict) -> TaskDefinition:
""" Deserialize task definition from a dict """
return TaskDefinitionSchema().load(taskdef)
class TaskDefinitionSchema(Schema):
""" TaskDefinition serialization schema. """
id = fields.Str(required=True)
name = fields.Str(required=True)
image = fields.Str(required=True)
upstream = fields.Str(allow_none=True)
parent = fields.Str(allow_none=True)
inputs = fields.Dict(missing={})
meta = fields.Dict(missing={})
env = fields.Dict(missing={})
ports = fields.Dict(missing={})
routes = fields.Dict(missing={})
cpu = fields.Str(allow_none=True)
cpu_limit = fields.Str(allow_none=True)
memory = fields.Str(allow_none=True)
memory_limit = fields.Str(allow_none=True)
owner = fields.Str(missing='')
created_at = fields.DateTime('iso', default=lambda: datetime.now(timezone.utc))
storage = fields.Dict(missing={})
volumes = fields.Mapping(
keys=fields.Str(),
values=fields.Mapping(),
missing={}
)
@post_load
def make_class(self, data: dict, **kwargs):
return self.make_instance(data)
def make_instance(self, data: dict) -> TaskDefinition:
return TaskDefinition(**data)
| StarcoderdataPython |
1663849 | <filename>tests/dataset_tests/parsers_tests/test_sdf_parser.py
import os
import pytest
import numpy as np
from rdkit.Chem import rdDistGeom, rdmolfiles, rdmolops
from profit.dataset.parsers.sdf_parser import SDFFileParser
from profit.dataset.preprocessors.egcn_preprocessor import EGCNPreprocessor
from profit.utils.io_utils import maybe_create_dir
@pytest.fixture
def test_mols():
mols = []
all_smiles = ['CN=C=O', 'Cc1ccccc1', 'CC1=CC2CC(CC1)O2', 'CCCCCCCCCCCCCCCC']
for smiles in all_smiles:
mol = rdmolfiles.MolFromSmiles(smiles)
mol = rdmolops.AddHs(mol, addCoords=True)
rdDistGeom.EmbedMolecule(mol, rdDistGeom.ETKDG())
mol = rdmolops.RemoveHs(mol)
mol.SetProp('Fitness', str(np.random.rand(1)[0]))
mols.append(mol)
return mols
@pytest.fixture()
def sdf_file(test_mols):
# Create directory for test file(s)
tmp_dir = maybe_create_dir('data/tmp/')
fname = os.path.join(tmp_dir, 'test.sdf')
# Store molecules
writer = rdmolfiles.SDWriter(fname)
for mol in test_mols:
writer.write(mol)
writer.close()
return fname
def test_sdf_file_parser_not_return_smiles(sdf_file, test_mols):
preprocessor = EGCNPreprocessor(max_atoms=49, out_size=49)
parser = SDFFileParser(preprocessor)
result = parser.parse(sdf_file, return_smiles=False)
dataset = result['dataset']
smiles = result['smiles']
assert len(dataset) == 3
assert smiles is None
# Check if computed features are saved correctly
for i in range(len(dataset)): # for each feature
for j in range(len(test_mols)): # and for each example
expect = preprocessor.get_input_feats(test_mols[j])
np.testing.assert_array_almost_equal(dataset[i][j], expect[i], decimal=3)
def test_sdf_file_parser_return_smiles(sdf_file, test_mols):
preprocessor = EGCNPreprocessor(max_atoms=49, out_size=49)
parser = SDFFileParser(preprocessor)
result = parser.parse(sdf_file, return_smiles=True)
dataset = result['dataset']
smiles = result['smiles']
assert len(dataset) == 3
# Check if computed features are saved correctly
for i in range(len(dataset)): # for each feature
for j in range(len(test_mols)): # and for each example
expect = preprocessor.get_input_feats(test_mols[j])
np.testing.assert_array_almost_equal(dataset[i][j], expect[i], decimal=3)
# Check smiles array
assert type(smiles) == np.ndarray
assert smiles.ndim == 1
assert len(smiles) == dataset[0].shape[0]
expected_smiles = np.array([rdmolfiles.MolToSmiles(mol) for mol in test_mols])
np.testing.assert_array_equal(smiles, expected_smiles)
def test_sdf_file_parser_target_index(sdf_file, test_mols):
idxs = [0,2]
preprocessor = EGCNPreprocessor(max_atoms=49, out_size=49)
parser = SDFFileParser(preprocessor, labels='Fitness')
result = parser.parse(sdf_file, return_smiles=True, target_index=idxs)
dataset = result['dataset']
smiles = result['smiles']
assert len(dataset) == 4
# # Check if computed features are saved correctly
for i in range(len(dataset)-1): # for each feature
for data_idx, j in enumerate(idxs): # and for each example
expect = preprocessor.get_input_feats(test_mols[j])
np.testing.assert_array_almost_equal(dataset[i][data_idx], expect[i], decimal=3)
# Check if labels are parsed correctly
labels = dataset[3]
expected_labels = np.array([preprocessor.get_labels(test_mols[idx], 'Fitness') for idx in idxs])
np.testing.assert_array_almost_equal(labels, expected_labels, decimal=3)
# Check smiles array
assert type(smiles) == np.ndarray
assert smiles.ndim == 1
assert len(smiles) == dataset[0].shape[0]
expected_smiles = np.array([rdmolfiles.MolToSmiles(test_mols[idx]) for idx in idxs])
np.testing.assert_array_equal(smiles, expected_smiles)
| StarcoderdataPython |
1730241 | <gh_stars>1-10
from pyramid.view import (
view_config,
forbidden_view_config
)
from pyramid.response import Response
from pyramid.httpexceptions import (
HTTPNotImplemented,
HTTPUnauthorized,
HTTPForbidden
)
from ns_portal.core.resources.metarootresource import (
CustomErrorParsingArgs,
MyNotImplementedError
)
from marshmallow import (
ValidationError
)
@view_config(context=ValidationError)
def validationError_marsh(exception, request):
request.response.status_code = 400
request.response.json_body = exception.messages
request.response.content_type = 'application/json'
return request.response
@view_config(context=CustomErrorParsingArgs)
def failed_sqlalchemy(exception, request):
"""
catch any CustomErrorParsingArgs raised
"""
return Response(
status=400,
content_type='application/json',
charset='utf-8',
body='{exception}'.format(exception=exception)
)
@view_config(context=MyNotImplementedError)
def myNotImplementedView(exception, request):
"""
catch any MyNotImplementedError raised
"""
print(
'DEBUG HINT\n',
'API called with request\n',
'METHOD : {method}\n'.format(method=exception.method),
'URL : {path_url}\n'.format(path_url=exception.path_url),
'QUERY STRING: {qs}\n'.format(qs=exception.query_string),
'this method is not yet implemented\n'
)
return HTTPNotImplemented(
headers={
"content_type": 'application/json',
"charset": 'utf-8',
},
body='{exception}'.format(exception=exception)
)
@forbidden_view_config()
def forbidden(request):
'''
IF no cookie in the request
or when effective_principals in cookie didn't match view permission
HTTPForbidden() is raised
forbidden_view_config is an hook that invoke the method when
HTTPForbidden() is raised (when is RAISED! not whend returned)
'''
# case when no cookie
# return 401
if getattr(request, 'authenticated_userid') is None:
return HTTPUnauthorized('No cookie')
# effective_principals didn't match
# return 403
return HTTPForbidden()
| StarcoderdataPython |
1725899 | <reponame>szcyd-chian/soliwordsapi<filename>extras/sandbox.py<gh_stars>0
class Test:
def __init__(self):
self.value = 5
@property
def test_attr(self):
return self.value
def test_self(self, value):
arg = value ** 2
return arg
| StarcoderdataPython |
20788 | import os
import sys
import jinja2
import yaml
with open(".information.yml") as fp:
information = yaml.safe_load(fp)
loader = jinja2.FileSystemLoader(searchpath="")
environment = jinja2.Environment(loader=loader, keep_trailing_newline=True)
template = environment.get_template(sys.argv[1])
result = template.render({
"docker_image_name": information.get("docker_image_name", "NONE"),
"readme_note": information.get("readme_note", None),
"versions": information.get("versions", ["latest"])
})
with open(sys.argv[1], "w+") as fp:
fp.write(result)
| StarcoderdataPython |
102026 | """
Async Yadacoin node poc
"""
import sys
import importlib
import pkgutil
import json
import logging
import os
import ssl
import ntpath
import binascii
import socket
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from datetime import datetime
from traceback import format_exc
from asyncio import sleep as async_sleep
from hashlib import sha256
from logging.handlers import RotatingFileHandler
from os import path
from sys import exit, stdout
from time import time
import webbrowser
import pyrx
from Crypto.PublicKey.ECC import EccKey
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
import tornado.ioloop
import tornado.locks
import tornado.log
from tornado.iostream import StreamClosedError
from tornado.options import define, options
from tornado.web import Application, StaticFileHandler
from concurrent.futures import ThreadPoolExecutor
from bson.objectid import ObjectId
import yadacoin.core.blockchainutils
import yadacoin.core.transactionutils
import yadacoin.core.config
from yadacoin.core.crypt import Crypt
from yadacoin.core.consensus import Consensus
from yadacoin.core.chain import CHAIN
from yadacoin.core.graphutils import GraphUtils
from yadacoin.core.mongo import Mongo
from yadacoin.core.miningpoolpayout import PoolPayer
from yadacoin.core.miningpool import MiningPool
from yadacoin.core.latestblock import LatestBlock
from yadacoin.core.peer import (
Peer, Seed, SeedGateway, ServiceProvider, User, Miner, Peers, Group
)
from yadacoin.core.identity import Identity
from yadacoin.core.health import Health
from yadacoin.core.smtp import Email
from yadacoin.http.web import WEB_HANDLERS
from yadacoin.http.explorer import EXPLORER_HANDLERS
from yadacoin.http.graph import GRAPH_HANDLERS
from yadacoin.http.node import NODE_HANDLERS
from yadacoin.http.pool import POOL_HANDLERS
from yadacoin.http.wallet import WALLET_HANDLERS
from yadacoin.websocket.base import WEBSOCKET_HANDLERS
from yadacoin.tcpsocket.node import (
NodeSocketServer, NodeSocketClient, NodeRPC
)
from yadacoin.websocket.base import RCPWebSocketServer
from yadacoin.tcpsocket.pool import StratumServer
from yadacoin import version
from plugins.yadacoinpool import handlers
define("debug", default=False, help="debug mode", type=bool)
define("verbose", default=False, help="verbose mode", type=bool)
define("network", default='', help="Force mainnet, testnet or regnet", type=str)
define("reset", default=False, help="If blockchain is invalid, truncate at error block", type=bool)
define("config", default='config/config.json', help="Config file location, default is 'config/config.json'", type=str)
define("verify", default=False, help="Verify chain, default False", type=bool)
define("server", default=False, help="Is server for testing", type=bool)
define("client", default=False, help="Is client for testing", type=bool)
class NodeApplication(Application):
def __init__(self):
options.parse_command_line(final=False)
self.init_config(options)
self.configure_logging()
self.init_config_properties()
if 'node' in self.config.modes:
self.init_seeds()
self.init_seed_gateways()
self.init_service_providers()
self.init_groups()
self.init_peer()
self.config.app_log.info("Node: {}:{}".format(self.config.peer_host, self.config.peer_port))
if 'pool' in self.config.modes:
self.init_pool()
if 'web' in self.config.modes:
if os.path.exists(path.join(path.join(path.dirname(__file__), '..', 'static'), 'app')):
static_path = path.join(path.join(path.dirname(__file__), '..', 'static'), 'app')
else:
static_path = path.join(path.join(path.dirname(__file__), 'static'), 'app') # probably running from binary
self.default_handlers = [
(r"/app/(.*)", StaticFileHandler, {"path": static_path}),
]
self.default_handlers.extend(handlers.HANDLERS)
self.init_websocket()
self.init_webui()
self.init_plugins()
self.init_http()
self.init_whitelist()
self.init_jwt()
self.init_ioloop()
async def background_consensus(self):
while True:
try:
if self.config.consensus.block_queue.queue:
await tornado.gen.sleep(3)
continue
await self.config.consensus.sync_bottom_up()
await tornado.gen.sleep(3)
except Exception as e:
self.config.app_log.error(format_exc())
async def background_peers(self):
"""Peers management coroutine. responsible for peers testing and outgoing connections"""
while True:
try:
await self.config.peer.ensure_peers_connected()
self.config.health.peer.last_activity = int(time())
except:
self.config.app_log.error(format_exc())
await tornado.gen.sleep(3)
async def background_status(self):
"""This background co-routine is responsible for status collection and display"""
while True:
try:
status = await self.config.get_status()
await self.config.health.check_health()
status['health'] = self.config.health.to_dict()
if status['health']['status']:
self.config.app_log.info(json.dumps(status, indent=4))
else:
self.config.app_log.warning(json.dumps(status, indent=4))
self.config.status_busy = False
except Exception as e:
self.config.app_log.error(format_exc())
await tornado.gen.sleep(30)
async def background_block_checker(self):
"""Responsible for miner updates"""
"""
New blocks will directly trigger the correct event.
This co-routine checks if new transactions have been received, or if special_min is triggered,
So we can update the miners.
"""
while True:
try:
last_block_height = 0
if LatestBlock.block:
last_block_height = LatestBlock.block.index
await LatestBlock.block_checker()
if last_block_height != LatestBlock.block.index:
self.config.app_log.info('Latest block height: %s | time: %s' % (
self.config.LatestBlock.block.index,
datetime.fromtimestamp(
int(
self.config.LatestBlock.block.time
)
).strftime("%Y-%m-%d %H:%M:%S")
))
self.config.health.block_checker.last_activity = int(time())
except Exception as e:
self.config.app_log.error(format_exc())
await tornado.gen.sleep(1)
async def background_message_sender(self):
retry_attempts = {}
while True:
try:
for x in list(self.config.nodeServer.retry_messages):
message = self.config.nodeServer.retry_messages.get(x)
if not message:
continue
if x not in retry_attempts:
retry_attempts[x] = 0
retry_attempts[x] += 1
for peer_cls in self.config.nodeServer.inbound_streams.keys():
if x[0] in self.config.nodeServer.inbound_streams[peer_cls]:
if retry_attempts[x] > 10:
del self.config.nodeServer.retry_messages[x]
await self.remove_peer(self.config.nodeServer.inbound_streams[peer_cls][x[0]])
continue
if len(x) > 3:
await self.config.nodeShared.write_result(self.config.nodeServer.inbound_streams[peer_cls][x[0]], x[1], message, x[3])
else:
await self.config.nodeShared.write_params(self.config.nodeServer.inbound_streams[peer_cls][x[0]], x[1], message)
for x in list(self.config.nodeClient.retry_messages):
message = self.config.nodeClient.retry_messages.get(x)
if not message:
continue
if x not in retry_attempts:
retry_attempts[x] = 0
retry_attempts[x] += 1
for peer_cls in self.config.nodeClient.outbound_streams.keys():
if x[0] in self.config.nodeClient.outbound_streams[peer_cls]:
if retry_attempts[x] > 10:
del self.config.nodeClient.retry_messages[x]
await self.remove_peer(self.config.nodeClient.outbound_streams[peer_cls][x[0]])
continue
if len(x) > 3:
await self.config.nodeShared.write_result(self.config.nodeClient.outbound_streams[peer_cls][x[0]], x[1], message, x[3])
else:
await self.config.nodeShared.write_params(self.config.nodeClient.outbound_streams[peer_cls][x[0]], x[1], message)
self.config.health.message_sender.last_activity = int(time())
await tornado.gen.sleep(10)
except Exception as e:
self.config.app_log.error(format_exc())
async def remove_peer(self, stream):
stream.close()
if not hasattr(stream, 'peer'):
return
id_attr = getattr(stream.peer, stream.peer.id_attribute)
if id_attr in self.config.nodeServer.inbound_streams[stream.peer.__class__.__name__]:
del self.config.nodeServer.inbound_streams[stream.peer.__class__.__name__][id_attr]
if id_attr in self.config.nodeServer.inbound_pending[stream.peer.__class__.__name__]:
del self.config.nodeServer.inbound_pending[stream.peer.__class__.__name__][id_attr]
if id_attr in self.config.nodeClient.outbound_streams[stream.peer.__class__.__name__]:
del self.config.nodeClient.outbound_streams[stream.peer.__class__.__name__][id_attr]
if id_attr in self.config.nodeClient.outbound_pending[stream.peer.__class__.__name__]:
del self.config.nodeClient.outbound_pending[stream.peer.__class__.__name__][id_attr]
async def background_block_inserter(self):
while True:
try:
await self.config.consensus.process_block_queue()
self.config.health.block_inserter.last_activity = int(time())
except:
self.config.app_log.error(format_exc())
await tornado.gen.sleep(1)
async def background_pool_payer(self):
"""Responsible for paying miners"""
"""
New blocks will directly trigger the correct event.
This co-routine checks if new transactions have been received, or if special_min is triggered,
So we can update the miners.
"""
while True:
try:
if self.config.pp:
await self.config.pp.do_payout()
self.config.health.pool_payer.last_activity = int(time())
except Exception as e:
self.config.app_log.error(format_exc())
await tornado.gen.sleep(120)
async def background_cache_validator(self):
"""Responsible for validating the cache and clearing it when necessary"""
while True:
if not hasattr(self.config, 'cache_inited'):
self.cache_collections = [x for x in await self.config.mongo.async_db.list_collection_names({}) if x.endswith('_cache')]
self.cache_last_times = {}
try:
async for x in self.config.mongo.async_db.blocks.find({'updated_at': {'$exists': False}}):
self.config.mongo.async_db.blocks.update_one({'index': x['index']}, {'$set': {'updated_at': time()}})
for cache_collection in self.cache_collections:
self.cache_last_times[cache_collection] = 0
await self.config.mongo.async_db[cache_collection].delete_many({'cache_time': {'$exists': False}})
self.config.cache_inited = True
except Exception as e:
self.config.app_log.error(format_exc())
"""
We check for cache items that are not currently in the blockchain
If not, we delete the cached item.
"""
try:
for cache_collection in self.cache_collections:
if not self.cache_last_times.get(cache_collection):
self.cache_last_times[cache_collection] = 0
latest = await self.config.mongo.async_db[cache_collection].find_one({
'cache_time': {'$gt': self.cache_last_times[cache_collection]}
}, sort=[('height', -1)])
if latest:
self.cache_last_times[cache_collection] = latest['cache_time']
else:
self.cache_last_times[cache_collection] = 0
async for txn in self.config.mongo.async_db[cache_collection].find({
'cache_time': {'$gt': self.cache_last_times[cache_collection]}
}).sort([('height', -1)]):
if not await self.config.mongo.async_db.blocks.find_one({
'index': txn.get('height'),
'hash': txn.get('block_hash')
}) and not await self.config.mongo.async_db.miner_transactions.find_one({
'id': txn.get('id'),
}):
await self.config.mongo.async_db[cache_collection].delete_many({
'height': txn.get('height')
})
break
else:
if txn['cache_time'] > self.cache_last_times[cache_collection]:
self.cache_last_times[cache_collection] = txn['cache_time']
self.config.health.cache_validator.last_activity = int(time())
except Exception as e:
self.config.app_log.error("error in background_cache_validator")
self.config.app_log.error(format_exc())
await tornado.gen.sleep(30)
def configure_logging(self):
# tornado.log.enable_pretty_logging()
self.config.app_log = logging.getLogger("tornado.application")
tornado.log.enable_pretty_logging(logger=self.config.app_log)
logfile = path.abspath("yada_app.log")
# Rotate log after reaching 512K, keep 5 old copies.
rotateHandler = RotatingFileHandler(logfile, "a", 512 * 1024, 5)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
rotateHandler.setFormatter(formatter)
self.config.app_log.addHandler(rotateHandler)
self.config.app_log.setLevel(logging.INFO)
if self.config.debug:
self.config.app_log.setLevel(logging.DEBUG)
self.access_log = logging.getLogger("tornado.access")
tornado.log.enable_pretty_logging()
logfile2 = path.abspath("yada_access.log")
rotateHandler2 = RotatingFileHandler(logfile2, "a", 512 * 1024, 5)
formatter2 = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
rotateHandler2.setFormatter(formatter2)
self.access_log.addHandler(rotateHandler2)
self.config.app_log.propagate = False
self.access_log.propagate = False
# This logguer config is quite a mess, but works well enough for the time being.
logging.getLogger("engineio").propagate = False
def init_config(self, options):
if not path.isfile(options.config):
self.config = yadacoin.core.config.Config.generate()
try:
os.makedirs(os.path.dirname(options.config))
except:
pass
with open(options.config, 'w') as f:
f.write(self.config.to_json())
with open(options.config) as f:
self.config = yadacoin.core.config.Config(json.loads(f.read()))
# Sets the global var for all objects
yadacoin.core.config.CONFIG = self.config
self.config.debug = options.debug
# force network, command line one takes precedence
if options.network != '':
self.config.network = options.network
self.config.reset = options.reset
self.config.pyrx = pyrx.PyRX()
self.config.pyrx.get_rx_hash('header', binascii.unhexlify('4181a493b397a733b083639334bc32b407915b9a82b7917ac361816f0a1f5d4d'), 4)
def init_consensus(self):
tornado.ioloop.IOLoop.current().run_sync(self.config.consensus.async_init)
if self.options.verify:
self.config.app_log.info("Verifying existing blockchain")
tornado.ioloop.IOLoop.current().run_sync(self.config.consensus.verify_existing_blockchain)
else:
self.config.app_log.info("Verification of existing blockchain skipped by config")
def init_whitelist(self):
api_whitelist = 'api_whitelist.json'
api_whitelist_filename = options.config.replace(ntpath.basename(options.config), api_whitelist)
if path.isfile(api_whitelist_filename):
with open(api_whitelist_filename) as f:
self.config.api_whitelist = [x['host'] for x in json.loads(f.read())]
def init_ioloop(self):
tornado.ioloop.IOLoop.current().set_default_executor(ThreadPoolExecutor(max_workers=1))
if self.config.network != 'regnet' and 'node' in self.config.modes:
tornado.ioloop.IOLoop.current().spawn_callback(self.background_consensus)
tornado.ioloop.IOLoop.current().spawn_callback(self.background_peers)
tornado.ioloop.IOLoop.current().spawn_callback(self.background_status)
tornado.ioloop.IOLoop.current().spawn_callback(self.background_block_checker)
tornado.ioloop.IOLoop.current().spawn_callback(self.background_cache_validator)
tornado.ioloop.IOLoop.current().spawn_callback(self.background_message_sender)
tornado.ioloop.IOLoop.current().spawn_callback(self.background_block_inserter)
if self.config.pool_payout:
self.config.app_log.info("PoolPayout activated")
self.config.pp = PoolPayer()
tornado.ioloop.IOLoop.current().spawn_callback(self.background_pool_payer)
while True:
tornado.ioloop.IOLoop.current().start()
def init_jwt(self):
jwt_key = EccKey(curve='p256', d=int(self.config.private_key, 16))
self.config.jwt_secret_key = jwt_key.export_key(format='PEM')
self.config.jwt_public_key = self.config.jwt_public_key or jwt_key.public_key().export_key(format='PEM')
self.config.jwt_options = {
'verify_signature': True,
'verify_exp': True,
'verify_nbf': False,
'verify_iat': True,
'verify_aud': False
}
def init_seeds(self):
if self.config.network == 'mainnet':
self.config.seeds = Peers.get_seeds()
elif self.config.network == 'regnet':
self.config.seeds = Peers.get_seeds()
def init_seed_gateways(self):
if self.config.network == 'mainnet':
self.config.seed_gateways = Peers.get_seed_gateways()
elif self.config.network == 'regnet':
self.config.seed_gateways = Peers.get_seed_gateways()
def init_service_providers(self):
if self.config.network == 'mainnet':
self.config.service_providers = Peers.get_service_providers()
elif self.config.network == 'regnet':
self.config.service_providers = Peers.get_service_providers()
def init_groups(self):
if self.config.network == 'mainnet':
self.config.groups = Peers.get_groups()
elif self.config.network == 'regnet':
self.config.groups = Peers.get_groups()
def init_websocket(self):
self.default_handlers.extend(WEBSOCKET_HANDLERS)
def init_webui(self):
self.default_handlers.extend(NODE_HANDLERS)
self.default_handlers.extend(GRAPH_HANDLERS)
self.default_handlers.extend(EXPLORER_HANDLERS)
self.default_handlers.extend(WALLET_HANDLERS)
self.default_handlers.extend(WEB_HANDLERS)
self.default_handlers.extend(POOL_HANDLERS)
def init_plugins(self):
for finder, name, ispkg in pkgutil.iter_modules([path.join(path.dirname(__file__), '..', 'plugins')]):
handlers = importlib.import_module('plugins.' + name + '.handlers')
if name == self.config.root_app:
[self.default_handlers.insert(0, handler) for handler in handlers.HANDLERS]
else:
self.default_handlers.extend(handlers.HANDLERS)
def init_http(self):
self.config.app_log.info("API: http://{}:{}".format(self.config.serve_host, self.config.serve_port))
if 'web' in self.config.modes:
self.config.app_log.info("Wallet: http://{}:{}/app".format(self.config.serve_host, self.config.serve_port))
if os.path.exists(path.join(path.dirname(__file__), '..', 'templates')):
template_path = path.join(path.dirname(__file__), '..', 'templates')
else:
template_path = path.join(path.dirname(__file__), 'templates')
settings = dict(
app_title=u"Yadacoin Node",
template_path=template_path,
xsrf_cookies=False, # TODO: sort out, depending on python client version (< 3.6) does not work with xsrf activated
cookie_secret=sha256(self.config.private_key.encode('utf-8')).hexdigest(),
compress_response=True,
debug=options.debug, # Also activates auto reload
autoreload=False,
serve_traceback=options.debug,
yadacoin_vars={'node_version': version},
yadacoin_config=self.config,
mp=None,
BU=yadacoin.core.blockchainutils.GLOBAL_BU,
TU=yadacoin.core.transactionutils.TU
)
handlers = self.default_handlers.copy()
super().__init__(handlers, **settings)
self.config.application = self
self.config.http_server = tornado.httpserver.HTTPServer(self)
self.config.http_server.listen(self.config.serve_port, self.config.serve_host)
if self.config.ssl:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=self.config.ssl.ca_file)
ssl_ctx.load_cert_chain(self.config.ssl.cert_file, keyfile=self.config.ssl.key_file)
self.config.https_server = tornado.httpserver.HTTPServer(self, ssl_options=ssl_ctx)
self.config.https_server.listen(self.config.ssl.port)
if hasattr(self.config, 'email'):
self.config.emailer = Email()
def init_pool(self):
self.config.app_log.info("Pool: {}:{}".format(self.config.peer_host, self.config.stratum_pool_port))
StratumServer.inbound_streams[Miner.__name__] = {}
self.config.pool_server = StratumServer()
self.config.pool_server.listen(self.config.stratum_pool_port)
def init_peer(self):
Peer.create_upnp_mapping(self.config)
my_peer = {
'host': self.config.peer_host,
'port': self.config.peer_port,
'identity': {
"username": self.config.username,
"username_signature": self.config.username_signature,
"public_key": self.config.public_key
},
'peer_type': self.config.peer_type,
'http_host': self.config.ssl.common_name if isinstance(self.config.ssl, dict) else self.config.peer_host,
'http_port': self.config.ssl.port if isinstance(self.config.ssl, dict) else self.config.serve_port,
'secure': isinstance(self.config.ssl, dict),
'protocol_version': 3
}
if my_peer.get('peer_type') == 'seed':
if not self.config.username_signature in self.config.seeds:
raise Exception('You are not a valid SeedGateway. Could not find you in the list of SeedGateways')
my_peer['seed_gateway'] = self.config.seeds[self.config.username_signature].seed_gateway
self.config.peer = Seed.from_dict(my_peer, is_me=True)
elif my_peer.get('peer_type') == 'seed_gateway':
if not self.config.username_signature in self.config.seed_gateways:
raise Exception('You are not a valid SeedGateway. Could not find you in the list of SeedGateways')
my_peer['seed'] = self.config.seed_gateways[self.config.username_signature].seed
self.config.peer = SeedGateway.from_dict(my_peer, is_me=True)
elif my_peer.get('peer_type') == 'service_provider':
self.config.peer = ServiceProvider.from_dict(my_peer, is_me=True)
elif my_peer.get('peer_type') == 'user' or True: # default if not specified
self.config.peer = User.from_dict(my_peer, is_me=True)
def init_config_properties(self):
self.config.health = Health()
self.config.mongo = Mongo()
self.config.http_client = AsyncHTTPClient()
self.config.BU = yadacoin.core.blockchainutils.BlockChainUtils()
self.config.TU = yadacoin.core.transactionutils.TU
yadacoin.core.blockchainutils.set_BU(self.config.BU) # To be removed
self.config.GU = GraphUtils()
self.config.LatestBlock = LatestBlock
tornado.ioloop.IOLoop.current().run_sync(self.config.LatestBlock.block_checker)
self.config.consensus = tornado.ioloop.IOLoop.current().run_sync(Consensus.init_async)
self.config.cipher = Crypt(self.config.wif)
if 'node' in self.config.modes:
self.config.nodeServer = NodeSocketServer
self.config.nodeShared = NodeRPC()
self.config.nodeClient = NodeSocketClient()
for x in [Seed, SeedGateway, ServiceProvider, User, Miner]:
if x.__name__ not in self.config.nodeClient.outbound_streams:
self.config.nodeClient.outbound_ignore[x.__name__] = {}
if x.__name__ not in self.config.nodeClient.outbound_streams:
self.config.nodeClient.outbound_pending[x.__name__] = {}
if x.__name__ not in self.config.nodeClient.outbound_streams:
self.config.nodeClient.outbound_streams[x.__name__] = {}
for x in [Seed, SeedGateway, ServiceProvider, User, Miner]:
if x.__name__ not in self.config.nodeServer.inbound_pending:
self.config.nodeServer.inbound_pending[x.__name__] = {}
if x.__name__ not in self.config.nodeServer.inbound_streams:
self.config.nodeServer.inbound_streams[x.__name__] = {}
self.config.node_server_instance = self.config.nodeServer()
self.config.node_server_instance.bind(self.config.peer_port, family=socket.AF_INET)
self.config.node_server_instance.start(1)
self.config.websocketServer = RCPWebSocketServer
self.config.app_log = logging.getLogger('tornado.application')
if 'web' in self.config.modes:
for x in [User, Group]:
if x.__name__ not in self.config.websocketServer.inbound_streams:
self.config.websocketServer.inbound_streams[x.__name__] = {}
if 'test' in self.config.modes:
return
if __name__ == "__main__":
NodeApplication()
| StarcoderdataPython |
4830464 | <reponame>kuzxnia/typer<filename>typer/util/statistic.py
from __future__ import division
from typer.util.keystroke import score_for_words
def cpm(correct_words: list, duration: float):
return score_for_words(correct_words) // (duration / 60.0)
def wpm(correct_words: list, duration: float):
return cpm(correct_words, duration) / 5
def accuracy(correct_words: list, incorrect_words: list):
correct_words_score = score_for_words(correct_words)
incorrect_words_score = score_for_words(incorrect_words)
if correct_words_score == 0:
return 0
else:
return correct_words_score / (correct_words_score + incorrect_words_score) * 100
| StarcoderdataPython |
1624407 | #from server.djangoapp.models import DealerReview
from django.contrib import auth
from django.http.response import JsonResponse
from djangoapp.models import DealerReview, CarDealer
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
# from .models import related models
from .restapis import get_dealers_from_cf, get_dealer_reviews_from_cf,post_request
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
from datetime import datetime
import logging
import urllib
import requests
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
# Create an `about` view to render a static about page
def get_about(request):
context = {}
if request.method == "GET":
return render(request,'djangoapp/about.html', context)
# Create a `contact` view to return a static contact page
def get_contact(request):
context = {}
if request.method == "GET":
return render(request,'djangoapp/contact.html', context)
# Create a `login_request` view to handle sign in request
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['<PASSWORD>']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('djangoapp:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'djangoapp/about.html', context)
else:
return render(request, 'djangoapp/about.html', context)
# Create a `logout_request` view to handle sign out request
def logout_request(request):
# Get the user object based on session id in request
logout(request)
# Redirect user back to course list view
return redirect('djangoapp:index')
# Create a `registration_request` view to handle sign up request
def registration_request(request):
context = {}
# If it is a GET request, just render the registration page
if request.method == 'GET':
return render(request, 'djangoapp/registration.html', context)
# If it is a POST request
elif request.method == 'POST':
# <HINT> Get user information from request.POST
# <HINT> username, first_name, last_name, password
username = request.POST['username']
firstname = request.POST['firstname']
lastname = request.POST['lastname']
password = request.POST['<PASSWORD>']
user_exist = False
try:
# Check if user already exists
User.objects.get(username=username)
user_exist = True
except:
# If not, simply log this is a new user
logger.debug("{} is new user".format(username))
# If it is a new user
if not user_exist:
# Create user in auth_user table
user = User.objects.create_user(username=username, password=password)
# <HINT> Login the user and
# redirect to course list page
login(request, user)
return redirect("djangoapp:dealerdetails")
else:
context['message'] = "User already exists."
return render(request, 'djangoapp/registration.html', context)
# Update the `get_dealerships` view to render the index page with a list of dealerships
def get_dealerships(request):
context = {}
url = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/dealerships/dealer-get"
# Get dealers from the URL
dealerships = get_dealers_from_cf(url)
if request.method == "GET":
context = {'dealerships' : dealerships }
return render(request, 'djangoapp/index.html', context)
# Create a `get_dealer_details` view to render the reviews of a dealer
def get_dealer_details(request, dealer_id):
context = {}
url = "https://1065db83.eu-gb.apigw.appdomain.cloud/api//reviews?id="+ str(dealer_id)
reviews = get_dealer_reviews_from_cf(url, dealer_id=dealer_id)
url_for_dealers = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/dealerships/dealer-get?id=" + str(dealer_id)
dealers = get_dealers_from_cf(url_for_dealers, dealer_id = dealer_id)
if request.method == "GET":
context = {'reviews' : reviews,
"dealers" : dealers }
return render(request, 'djangoapp/dealer_details.html', context)
# Create a `add_review` view to submit a review
# def add_review(request, dealer_id):
# ...
def add_review(request, dealer_id):
if request.method == "GET":
url = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/reviews?id=" + str(dealer_id)
reviews = get_dealer_reviews_from_cf(url, dealer_id=dealer_id)
url_for_dealers = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/dealerships/dealer-get?id=" + str(dealer_id)
dealer_by_id = get_dealers_from_cf(url_for_dealers, dealer_id = dealer_id)
context = {'reviews' : reviews,
"dealers" : dealer_by_id }
return render(request, 'djangoapp/add_review.html', context)
if request.method == "GET":
url = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/reviews?id=" + str(dealer_id)
reviews = get_dealer_reviews_from_cf(url, dealer_id=dealer_id)
url_for_dealers = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/dealerships/dealer-get?id=" + str(dealer_id)
dealer_by_id = get_dealers_from_cf(url_for_dealers, dealer_id = dealer_id)
context = {'reviews' : reviews,
"dealers" : dealer_by_id }
return render(request, 'djangoapp/add_review.html', context)
if request.method == "POST":
if request.user.is_authenticated:
context = {}
url_for_reviews = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/reviews?id=" + str(dealer_id)
reviews = get_dealer_reviews_from_cf(url_for_reviews, dealer_id=dealer_id)
url_for_dealers = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/dealerships/dealer-get?id=" + str(dealer_id)
dealers = get_dealers_from_cf(url_for_dealers, dealer_id = dealer_id)
json_payload = {
"time" : datetime.utcnow().isoformat(),
"name": reviews[0].name,
"dealership": reviews[0].dealership,
"review": request.POST.get('content'),
"purchase": reviews[0].purchase,
"purchase_date": reviews[0].purchase_date,
"car_make": reviews[0].car_make,
"car_model": reviews[0].car_model,
"car_year": reviews[0].car_year,
"id": reviews[0].id
}
url = "https://1065db83.eu-gb.apigw.appdomain.cloud/api/reviews?id=" + str(dealer_id)
results = post_request(url, json_payload, dealer_id=dealer_id)
context = { 'reviews' : reviews,
"dealers" : dealers,
"results" : results }
return redirect('djangoapp:dealerdetails', dealer_id=dealer_id)
else:
messages.error(request, 'Login Failed! Invalid username and password.')
return redirect('djangoapp:index')
| StarcoderdataPython |
3296588 | from unittest import mock
from os import environ
from unittest import TestCase
import sewer
from . import test_utils
class TestClouDNS(TestCase):
"""
Tests the ClouDNS DNS provider class.
"""
def setUp(self):
self.domain_name = "example.com"
self.domain_dns_value = "mock-domain_dns_value"
self.cloudns_auth_id = "mock-api-id"
self.cloudns_auth_password = "<PASSWORD>"
self.dns_class = sewer.ClouDNSDns()
environ["CLOUDNS_API_AUTH_ID"] = self.cloudns_auth_id
environ["CLOUDNS_API_AUTH_PASSWORD"] = self.cloudns_auth_password
def test_cloudns_is_called_by_create_dns_record(self):
with mock.patch(
"cloudns_api.api.CLOUDNS_API_AUTH_ID", new=self.cloudns_auth_id
) as _, mock.patch(
"cloudns_api.api.CLOUDNS_API_AUTH_PASSWORD", new=self.cloudns_auth_password
) as __, mock.patch(
"requests.post"
) as mock_requests_post:
mock_requests_post.return_value = test_utils.MockResponse()
self.dns_class.create_dns_record(
domain_name=self.domain_name, domain_dns_value=self.domain_dns_value
)
expected = {
"auth-id": "mock-api-id",
"auth-password": "<PASSWORD>",
"domain-name": "example.com",
"host": "_acme-challenge",
"record": "mock-domain_dns_value",
"record-type": "TXT",
"ttl": 60,
}
self.assertDictEqual(expected, mock_requests_post.call_args[1]["params"])
def test_cloudns_is_called_by_delete_dns_record(self):
with mock.patch(
"cloudns_api.api.CLOUDNS_API_AUTH_ID", new=self.cloudns_auth_id
) as _, mock.patch(
"cloudns_api.api.CLOUDNS_API_AUTH_PASSWORD", new=self.cloudns_auth_password
) as __, mock.patch(
"requests.get"
) as mock_requests_get, mock.patch(
"requests.post"
) as mock_requests_post:
mock_requests_get.return_value = test_utils.MockResponse(
content={"1234567": {"record": "mock-domain_dns_value"}}
)
mock_requests_post.return_value = test_utils.MockResponse()
self.dns_class.delete_dns_record(
domain_name=self.domain_name, domain_dns_value=self.domain_dns_value
)
expected = {
"auth-id": "mock-api-id",
"auth-password": "<PASSWORD>",
"domain-name": "example.com",
"record-id": "1234567",
}
self.assertDictEqual(expected, mock_requests_post.call_args[1]["params"])
| StarcoderdataPython |
105130 | <reponame>zmoon/monetio
""" Obs Utilities """
import datetime
import sys
import numpy as np
def find_near(df, latlon, distance=100, sid="site_num", drange=None):
"""find all values in the df dataframe column sid which are within distance
(km) of lat lon point. output dictionary with key as value in column sid
and value tuple (latitude, longitude)
Parameters
----------
latlon : tuple or list
(longitude, latitude)
distance : float
kilometers
sid: string
name of column
drange: tuple or list with two datetimes
consider rows with dates between these two dates.
Returns
--------
lhash: dictionary
key is the value in column sid and value is (latitude, longitude)
position.
"""
degree2km = 111
if drange:
df = timefilter(df.copy(), drange)
lhash = get_lhash(df, sid)
for key in lhash.keys:
xd = (lhash[key][1] - latlon[1]) * degree2km * np.cos(latlon[1] * np.pi / 180.0)
yd = (lhash[key][0] - latlon[0]) * degree2km
dd = (xd**2 + yd**2) ** 0.5
if dd > distance:
lhash.pop(key, None)
return lhash
def write_datem(df, obscolumn="obs", dname="datemfile.txt", sitename="1", info=None, drange=None):
"""returns string in datem format (See NOAA ARL).
datem format has the following columns:
Year, Month, Day, Hour, Duration, lat, lon, Concentration (units), site
id, height
Parameters
----------
obscolumn : string
name of column with values to write in the Concentration column.
dname : string
name of the output file.
sitename : string.
If it is the name of a column in the dataframe then
that column will be used to generate the site name column in the
datem file. If is not the name of a column, then the string will
be used as the site name.
info : string
will be written to the second line of the header.
drange : list of two time stamp objects.
Returns
--------
runstring: string
string in datem format.
"""
if drange:
df = timefilter(df, drange)
units = df["units"].tolist()
units = list(set(units))
sdate = datetime.datetime(2010, 1, 1, 0)
if len(units) > 1:
print("WARNING, more than one type of unit ", units)
ustr = ""
for uuu in units:
ustr += uuu + " "
runstring = "Beginning date " + sdate.strftime("%Y %m %d %H:%M") + " UTC ---"
runstring += "Information "
if info:
runstring += info + "\n"
else:
runstring += "\n"
runstring += (
"Year, Month, Day, Hour:Minute (UTC), Dur(hhmm) , LAT, LON, Concentration ("
+ ustr
+ "), sid, height\n"
)
lat = df["latitude"]
lon = df["longitude"]
cval = df[obscolumn]
# print t2
t1 = df["time"]
duration = " 0100 "
height = "20"
if sitename in df.columns.values:
sval = df[sitename]
else:
sval = [sitename] * len(cval)
for val in zip(t1, lat, lon, cval, sval):
runstring += val[0].strftime("%Y %m %d %H%M") + duration
try:
runstring += str(val[1]) + " " + str(val[2]) + " "
except RuntimeError:
print("WARNING1", val[1])
print(val[2])
print(type(val[1]))
print(type(val[2]))
sys.exit()
if isinstance(val[4], str):
runstring += f"{val[3]:.3f}" + " " + val[4] + " " + height + "\n"
else:
runstring += f"{val[3]:.3f}" + " " + f"{val[4]:d}" + " " + height + "\n"
with open(dname, "w") as fid:
fid.write(runstring)
return runstring
def dropna(df, inplace=True):
"""remove columns which have all Nans.
TO DO: is this needed?"""
return df.dropna(axis=1, inplace=inplace)
def get_lhash(df, idn):
"""returns a dictionary with the key as the input column value and the
value a tuple of (lat, lon) Useful for getting lat lon locations of
different sites in a dataframe.
"""
if "latitude" in list(df.columns.values):
dftemp = df.copy()
pairs = zip(dftemp[idn], zip(dftemp["latitude"], dftemp["longitude"]))
pairs = list(set(pairs))
lhash = dict(pairs) # key is facility id and value is name.
print(lhash)
return lhash
def summarize(df, verbose=False):
"""prints list of columns. if verbose prints list of unique values in each
column"""
columns = list(df.columns.values)
if verbose:
for ccc in columns:
print(ccc)
print(df[ccc].unique())
print("-------------------------------")
for ccc in columns:
print(ccc)
def latlonfilter(df, llcrnr, urcrnr):
"""
removes rows from self.df with latitude longitude outside of the box
described by llcrnr (lower left corner) and urcrnr (upper right corner)
Parameters
----------
llcrnr : tuple
lower left corner. (latitude, longitude)
urcrnr : tuple
upper right corner (latittude, longitude)
inplace: boolean
if TRUE then replaces self.df attribute
removes rows with latitude longitude outside of the box
described by llcrnr (lower left corner) and urcrnr (upper right corner)
"""
lat1 = llcrnr[0]
lat2 = urcrnr[0]
lon1 = llcrnr[1]
lon2 = urcrnr[1]
df = df[df["latitude"] < lat2]
df = df[df["latitude"] > lat1]
df = df[df["longitude"] > lon1]
df = df[df["longitude"] < lon2]
return df
def timefilter(df, daterange, inplace=True):
"""removes rows with dates outside of the daterange from self.df
Parameters
----------
daterange: tuple
(datetime, datetime)
inplace: boolean
if TRUE then replaces self.df attribute
"""
df = df[df["time"] > daterange[0]]
df = df[df["time"] < daterange[1]]
return df
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.