id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9755246 | <gh_stars>10-100
# @author <NAME>
#
# Copyright (C) 2010 by <NAME>
# Copyright (C) 2011 by <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
from pypeflow.common import *
from pypeflow.task import PypeThreadTaskBase, PypeTaskBase
from pypeflow.task import PypeTask, PypeShellTask, PypeSGETask, PypeDistributibleTask
from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow, PypeMPWorkflow
from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn
import logging
import time
logger = logging.getLogger()
#logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
inputs = {"input": makePypeLocalFile("/tmp/test1_input")}
outputs = {"output": makePypeLocalFile("/tmp/test1_output")}
os.system("touch /tmp/test1_input")
@PypeTask(inputs = inputs, outputs = outputs, TaskType = PypeThreadTaskBase)
def f(self):
i = 0
while 1:
time.sleep(0.1)
if self.shutdown_event != None and self.shutdown_event.is_set():
break
if i > 10:
break
i += 1
if self.shutdown_event == None or not self.shutdown_event.is_set():
os.system("touch %s" % fn(self.output))
wf = PypeThreadWorkflow()
wf.addTasks([f])
wf.refreshTargets()
| StarcoderdataPython |
3591898 | from __future__ import annotations
import pytest
import torch
from torch.utils.data import TensorDataset
from ranzen.torch import prop_random_split
@pytest.fixture(scope="module")
def dummy_ds() -> TensorDataset: # type: ignore[no-any-unimported]
return TensorDataset(torch.randn(100))
@pytest.mark.parametrize("props", [0.5, [-0.2, 0.5], [0.1, 0.3, 0.4], [0.5, 0.6]])
def test_prop_random_split(dummy_ds: TensorDataset, props: float | list[float]) -> None: # type: ignore[no-any-unimported]
sum_ = props if isinstance(props, float) else sum(props)
props_ls = [props] if isinstance(props, float) else props
if sum_ > 1 or any((not (0 <= prop <= 1)) for prop in props_ls):
with pytest.raises(ValueError):
splits = prop_random_split(dataset=dummy_ds, props=props)
else:
splits = prop_random_split(dataset=dummy_ds, props=props)
sizes = [len(split) for split in splits]
sum_sizes = sum(sizes)
assert len(splits) == (len(props_ls) + 1)
assert sum_sizes == len(dummy_ds)
assert sizes[-1] == (len(dummy_ds) - (round(sum_ * len(dummy_ds))))
| StarcoderdataPython |
11279068 | <reponame>extroot/yandexLyceumPlus<gh_stars>1-10
from catalog.forms import StarForm
from catalog.models import Category, Item
from django.db.models import Avg, Count
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from rating.models import Rating
class ItemListView(View):
template_name = 'catalog/item_list.html'
def get(self, request):
categories = Category.objects.published_category()
context = {
'categories': categories
}
return render(request, self.template_name, context)
class ItemDetailView(View):
template_name = 'catalog/item_detail.html'
def get(self, request, id_product):
item = Item.objects.get_item(id_product)
stars = item.ratings.exclude(star=0).aggregate(
Avg('star'), Count('star')
)
star_user = 0
if request.user.is_authenticated:
star_user = Rating.objects.get_user_star(item, request.user)
context = {
'item': item,
'stars': stars,
'star_user': star_user,
'form': StarForm()
}
return render(request, self.template_name, context)
def post(self, request, id_product):
# Используется именно get_or_404 т.к. нам не нужны зависимости объекта.
item = get_object_or_404(Item, pk=id_product, is_published=True)
form = StarForm(request.POST)
if form.is_valid() and request.user.is_authenticated:
Rating.objects.update_or_create(
item=item,
user=request.user,
defaults={'star': form.cleaned_data['star']}
)
return redirect('item_detail', id_product)
| StarcoderdataPython |
1821957 | <reponame>KnowledgeLinks/rdfframework<filename>rdfframework/datamanager/defmanager.py
import os
import logging
import requests
import urllib
import datetime
import pdb
from dateutil.parser import parse as date_parse
from rdfframework.connections import ConnManager
from rdfframework.datatypes import RdfNsManager
from rdfframework.configuration import RdfConfigManager
from rdfframework.utilities import make_list
from .datamanager import DataFileManager
__CONNS__ = ConnManager()
__CFG__ = RdfConfigManager()
__NSM__ = RdfNsManager()
class DefManagerMeta(type):
""" Metaclass ensures that there is only one instance of the RdfConnManager
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(DefManagerMeta,
cls).__call__(*args, **kwargs)
else:
values = None
if kwargs.get("conn"):
cls._instances[cls].conn = kwargs['conn']
if args:
values = args[0]
elif 'rdf_defs' in kwargs:
values = kwargs['vocabularies']
if values:
cls._instances[cls].load(values, **kwargs)
return cls._instances[cls]
def __init__(self, *args, **kwargs):
pass
def clear(cls):
cls._instances = {}
class DefinitionManager(DataFileManager, metaclass=DefManagerMeta):
"""
RDF Vocabulary Manager. This class manages all of the RDF vocabulary
for the rdfframework
"""
log_level = logging.INFO
is_initialized = False
vocab_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0],
"vocabularies")
vocab_map = {
"rdf": {
"filename": "rdf.ttl",
"download": "https://www.w3.org/1999/02/22-rdf-syntax-ns#",
"namespace": "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
},
"owl": {
"filename": "owl.ttl",
"download": "http://www.w3.org/2002/07/owl#",
"namespace": "http://www.w3.org/2002/07/owl#"
},
"schema": {
"filename": "schema.nt",
"download": "http://schema.org/version/latest/schema.nt",
"namespace": "http://schema.org/"
},
"rdfs": {
"filename": "rdfs.ttl",
"download": "https://www.w3.org/2000/01/rdf-schema#",
"namespace": "http://www.w3.org/2000/01/rdf-schema#"
},
"skos": {
"filename": "skos.rdf",
"namespace": "http://www.w3.org/2004/02/skos/core#",
"download": "https://www.w3.org/2009/08/skos-reference/skos.rdf"
},
"dc": {
"filename": "dc.ttl",
"namespace": "http://purl.org/dc/elements/1.1/",
"download": ["http://purl.org/dc/elements/1.1/",
"http://dublincore.org/2012/06/14/dcelements"]
},
"dcterm": {
"filename": "dcterm.ttl",
"download": ["http://purl.org/dc/terms/",
"http://dublincore.org/2012/06/14/dcterms"],
"namespace": "http://purl.org/dc/terms/"
},
"void": {
"filename": "void.ttl",
"namespace": "http://rdfs.org/ns/void#",
"download": "http://vocab.deri.ie/void.ttl"
},
"adms": {
"filename": "adms.ttl",
"namespace": "https://www.w3.org/ns/adms#",
"download": "https://www.w3.org/ns/adms#"
},
"vcard": {
"filename": "vcard.ttl",
"namespace": "http://www.w3.org/2006/vcard/ns#",
"download": "https://www.w3.org/2006/vcard/ns#"
},
"foaf": {
"filename": "foaf.rdf",
"namespace": "http://xmlns.com/foaf/0.1/",
"download": "http://xmlns.com/foaf/spec/20140114.rdf"
},
"bf": {
"filename": "bf.rdf",
"namespace": "http://id.loc.gov/ontologies/bibframe/",
"download": "http://id.loc.gov/ontologies/bibframe.rdf"
}
}
def __init__(self, file_locations=[], conn=None, **kwargs):
# add all namespaces to the RdfNsManager to ensure that there are no
# conflicts with the config file
[__NSM__.bind(prefix, val['namespace'], override=False, calc=False)
for prefix, val in self.vocab_map.items()]
self.conn = None
if not conn:
conn = kwargs.get("conn", __CONNS__.active_defs)
if conn:
super(DefinitionManager, self).__init__(file_locations,
conn,
**kwargs)
if self.__file_locations__:
self.load(self.__file_locations__, **kwargs)
else:
self.add_file_locations(file_locations)
def __get_conn__(self, **kwargs):
if not self.conn:
self.conn = kwargs.get("conn", __CONNS__.active_defs)
return kwargs.get("conn", self.conn)
def load(self, file_locations=[], **kwargs):
""" Loads the file_locations into the triplestores
args:
file_locations: list of tuples to load
[('vocabularies', [list of vocabs to load])
('directory', '/directory/path')
('filepath', '/path/to/a/file')
('package_all', 'name.of.a.package.with.defs')
('package_file','name.of.package', 'filename')]
custom: list of custom definitions to load
"""
self.__set_cache_dir__(**kwargs)
conn = self.__get_conn__(**kwargs)
self.set_load_state(**kwargs)
super(DefinitionManager, self).load(file_locations, **kwargs)
if not file_locations:
file_locations = self.__file_locations__
if file_locations:
log.info("loading vocabs into conn '%s'", conn)
for item in file_locations:
if item[0] == 'vocabularies':
vocabs = item[1]
if item[1] == "all":
vocabs = self.vocab_map
for vocab in vocabs:
self.load_vocab(vocab)
self.loaded_files(reset=True)
self.loaded_times = self.load_times(**kwargs)
def __set_cache_dir__(self, cache_dirs=[], **kwargs):
""" sets the cache directory by test write permissions for various
locations
args:
directories: list of directories to test. First one with read-write
permissions is selected.
"""
# add a path for a subfolder 'vocabularies'
test_dirs = [self.vocab_dir] + cache_dirs
try:
test_dirs += [os.path.join(__CFG__.CACHE_DATA_PATH,
"vocabularies")]
except (RuntimeWarning, TypeError):
pass
super(DefinitionManager, self).__set_cache_dir__(test_dirs, **kwargs)
def load_vocab(self, vocab_name, **kwargs):
""" loads a vocabulary into the defintion triplestore
args:
vocab_name: the prefix, uri or filename of a vocabulary
"""
log.setLevel(kwargs.get("log_level", self.log_level))
vocab = self.get_vocab(vocab_name , **kwargs)
if vocab['filename'] in self.loaded:
if self.loaded_times.get(vocab['filename'],
datetime.datetime(2001,1,1)).timestamp() \
< vocab['modified']:
self.drop_file(vocab['filename'], **kwargs)
else:
return
conn = kwargs.get("conn", self.conn)
conn.load_data(graph=getattr(__NSM__.kdr, vocab['filename']).clean_uri,
data=vocab['data'],
datatype=vocab['filename'].split(".")[-1],
log_level=logging.WARNING)
self.__update_time__(vocab['filename'], **kwargs)
log.warning("\n\tvocab: '%s' loaded \n\tconn: '%s'",
vocab['filename'],
conn)
self.loaded.append(vocab['filename'])
def __get_vocab_dict__(self, vocab_name, **kwargs):
""" dictionary for the specified vocabulary
args:
vocab_name: the name or uri of the vocab to return
"""
try:
vocab_dict = self.vocab_map[vocab_name].copy()
except KeyError:
vocab_dict = {key: value for key, value in self.vocab_map.items()
if vocab_name in value.values()}
vocab_name = list(vocab_dict)[0]
vocab_dict = vocab_dict.pop(vocab_name)
return vocab_dict
def get_vocab(self, vocab_name, **kwargs):
""" Returns data stream of an rdf vocabulary
args:
vocab_name: the name or uri of the vocab to return
"""
vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)
filepaths = list(set([os.path.join(self.cache_dir,
vocab_dict['filename']),
os.path.join(self.vocab_dir,
vocab_dict['filename'])]))
for path in filepaths:
if os.path.exists(path):
with open(path, 'rb') as f_obj:
vocab_dict.update({"name": vocab_name,
"data": f_obj.read(),
"modified": os.path.getmtime(path)})
return vocab_dict
download_locs = make_list(vocab_dict.get('download',[]))
for loc in download_locs:
loc_web = urllib.request.urlopen(loc)
# loc_file_date = date_parse(loc_web.info()['Last-Modified'])
urllib.request.urlretrieve(loc, filepaths[0])
with open(filepaths[0], 'rb') as f_obj:
vocab_dict.update({"name": vocab_name,
"data": f_obj.read(),
"modified": os.path.getmtime(filepaths[0])})
return vocab_dict
def drop_vocab(self, vocab_name, **kwargs):
""" Removes the vocab from the definiton triplestore
args:
vocab_name: the name or uri of the vocab to return
"""
vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)
return self.drop_file(vocab_dict['filename'], **kwargs)
| StarcoderdataPython |
1600981 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 claviering <<EMAIL>>
#
# Distributed under terms of the WTFPL license.
def reduceNum(n):
print '{} = '.format(n),
if not isinstance(n, int) or n <= 0:
print 'input a number'
exit(0)
elif n in [1]:
print '{}'.format(n),
while n not in [1]:
for index in xrange(2, n+1):
if n % index == 0:
n /= index
if n == 1:
print index
else:
print '{} * '.format(index),
break
reduceNum(90)
| StarcoderdataPython |
188863 | import socket
localIP = "127.0.0.1"
localPort = 20001
bufferSize = 1024
msgFromServer = "Hello UDP Client"
bytesToSend = msgFromServer.encode()
# Create a datagram socket
UDPServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# Bind to address and ip
UDPServerSocket.bind((localIP, localPort))
print("UDP server up and listening")
# Listen for incoming datagrams
while(True):
bytesAddressPair = UDPServerSocket.recvfrom(bufferSize)
message = bytesAddressPair[0]
address = bytesAddressPair[1]
clientMsg = "Message from Client:{}".format(message)
clientIP = "Client IP Address:{}".format(address)
print(clientMsg)
print(clientIP)
# Sending a reply to client
UDPServerSocket.sendto(bytesToSend, address)
| StarcoderdataPython |
11329538 | <filename>rackio_AI/_temporal.py
from easy_deco.del_temp_attr import del_temp_attr, set_to_methods
@set_to_methods(del_temp_attr)
class TemporalMeta:
"""
The Singleton class can be implemented in different ways in Python. Some
possible methods include: base class, decorator, metaclass. We will use the
metaclass because it is best suited for this purpose.
"""
_instances = list()
def __new__(cls):
inst = super(TemporalMeta, cls).__new__(cls)
cls._instances.append(inst)
return inst
| StarcoderdataPython |
11321966 | # testing/assertsql.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
import collections
import contextlib
import re
from .. import event
from ..engine import url
from ..engine.default import DefaultDialect
from ..schema import _DDLCompiles
class AssertRule:
is_consumed = False
errormessage = None
consume_statement = True
def process_statement(self, execute_observed):
pass
def no_more_statements(self):
assert False, (
"All statements are complete, but pending "
"assertion rules remain"
)
class SQLMatchRule(AssertRule):
pass
class CursorSQL(SQLMatchRule):
def __init__(self, statement, params=None, consume_statement=True):
self.statement = statement
self.params = params
self.consume_statement = consume_statement
def process_statement(self, execute_observed):
stmt = execute_observed.statements[0]
if self.statement != stmt.statement or (
self.params is not None and self.params != stmt.parameters
):
self.errormessage = (
"Testing for exact SQL %s parameters %s received %s %s"
% (
self.statement,
self.params,
stmt.statement,
stmt.parameters,
)
)
else:
execute_observed.statements.pop(0)
self.is_consumed = True
if not execute_observed.statements:
self.consume_statement = True
class CompiledSQL(SQLMatchRule):
def __init__(self, statement, params=None, dialect="default"):
self.statement = statement
self.params = params
self.dialect = dialect
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r"[\n\t]", "", self.statement)
return received_statement == stmt
def _compile_dialect(self, execute_observed):
if self.dialect == "default":
dialect = DefaultDialect()
# this is currently what tests are expecting
# dialect.supports_default_values = True
dialect.supports_default_metavalue = True
return dialect
else:
# ugh
if self.dialect == "postgresql":
params = {"implicit_returning": True}
else:
params = {}
return url.URL.create(self.dialect).get_dialect()(**params)
def _received_statement(self, execute_observed):
"""reconstruct the statement and params in terms
of a target dialect, which for CompiledSQL is just DefaultDialect."""
context = execute_observed.context
compare_dialect = self._compile_dialect(execute_observed)
# received_statement runs a full compile(). we should not need to
# consider extracted_parameters; if we do this indicates some state
# is being sent from a previous cached query, which some misbehaviors
# in the ORM can cause, see #6881
cache_key = None # execute_observed.context.compiled.cache_key
extracted_parameters = (
None # execute_observed.context.extracted_parameters
)
if "schema_translate_map" in context.execution_options:
map_ = context.execution_options["schema_translate_map"]
else:
map_ = None
if isinstance(execute_observed.clauseelement, _DDLCompiles):
compiled = execute_observed.clauseelement.compile(
dialect=compare_dialect,
schema_translate_map=map_,
)
else:
compiled = execute_observed.clauseelement.compile(
cache_key=cache_key,
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
for_executemany=context.compiled.for_executemany,
schema_translate_map=map_,
)
_received_statement = re.sub(r"[\n\t]", "", str(compiled))
parameters = execute_observed.parameters
if not parameters:
_received_parameters = [
compiled.construct_params(
extracted_parameters=extracted_parameters
)
]
else:
_received_parameters = [
compiled.construct_params(
m, extracted_parameters=extracted_parameters
)
for m in parameters
]
return _received_statement, _received_parameters
def process_statement(self, execute_observed):
context = execute_observed.context
_received_statement, _received_parameters = self._received_statement(
execute_observed
)
params = self._all_params(context)
equivalent = self._compare_sql(execute_observed, _received_statement)
if equivalent:
if params is not None:
all_params = list(params)
all_received = list(_received_parameters)
while all_params and all_received:
param = dict(all_params.pop(0))
for idx, received in enumerate(list(all_received)):
# do a positive compare only
for param_key in param:
# a key in param did not match current
# 'received'
if (
param_key not in received
or received[param_key] != param[param_key]
):
break
else:
# all keys in param matched 'received';
# onto next param
del all_received[idx]
break
else:
# param did not match any entry
# in all_received
equivalent = False
break
if all_params or all_received:
equivalent = False
if equivalent:
self.is_consumed = True
self.errormessage = None
else:
self.errormessage = self._failure_message(
execute_observed, params
) % {
"received_statement": _received_statement,
"received_parameters": _received_parameters,
}
def _all_params(self, context):
if self.params:
if callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
return params
else:
return None
def _failure_message(self, execute_observed, expected_params):
return (
"Testing for compiled statement\n%r partial params %s, "
"received\n%%(received_statement)r with params "
"%%(received_parameters)r"
% (
self.statement.replace("%", "%%"),
repr(expected_params).replace("%", "%%"),
)
)
class RegexSQL(CompiledSQL):
def __init__(self, regex, params=None, dialect="default"):
SQLMatchRule.__init__(self)
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
self.dialect = dialect
def _failure_message(self, execute_observed, expected_params):
return (
"Testing for compiled statement ~%r partial params %s, "
"received %%(received_statement)r with params "
"%%(received_parameters)r"
% (
self.orig_regex.replace("%", "%%"),
repr(expected_params).replace("%", "%%"),
)
)
def _compare_sql(self, execute_observed, received_statement):
return bool(self.regex.match(received_statement))
class DialectSQL(CompiledSQL):
def _compile_dialect(self, execute_observed):
return execute_observed.context.dialect
def _compare_no_space(self, real_stmt, received_stmt):
stmt = re.sub(r"[\n\t]", "", real_stmt)
return received_stmt == stmt
def _received_statement(self, execute_observed):
received_stmt, received_params = super(
DialectSQL, self
)._received_statement(execute_observed)
# TODO: why do we need this part?
for real_stmt in execute_observed.statements:
if self._compare_no_space(real_stmt.statement, received_stmt):
break
else:
raise AssertionError(
"Can't locate compiled statement %r in list of "
"statements actually invoked" % received_stmt
)
return received_stmt, execute_observed.context.compiled_parameters
def _dialect_adjusted_statement(self, paramstyle):
stmt = re.sub(r"[\n\t]", "", self.statement)
# temporarily escape out PG double colons
stmt = stmt.replace("::", "!!")
if paramstyle == "pyformat":
stmt = re.sub(r":([\w_]+)", r"%(\1)s", stmt)
else:
# positional params
repl = None
if paramstyle == "qmark":
repl = "?"
elif paramstyle == "format":
repl = r"%s"
elif paramstyle == "numeric":
repl = None
stmt = re.sub(r":([\w_]+)", repl, stmt)
# put them back
stmt = stmt.replace("!!", "::")
return stmt
def _compare_sql(self, execute_observed, received_statement):
paramstyle = execute_observed.context.dialect.paramstyle
stmt = self._dialect_adjusted_statement(paramstyle)
return received_statement == stmt
def _failure_message(self, execute_observed, expected_params):
paramstyle = execute_observed.context.dialect.paramstyle
return (
"Testing for compiled statement\n%r partial params %s, "
"received\n%%(received_statement)r with params "
"%%(received_parameters)r"
% (
self._dialect_adjusted_statement(paramstyle).replace(
"%", "%%"
),
repr(expected_params).replace("%", "%%"),
)
)
class CountStatements(AssertRule):
def __init__(self, count):
self.count = count
self._statement_count = 0
def process_statement(self, execute_observed):
self._statement_count += 1
def no_more_statements(self):
if self.count != self._statement_count:
assert False, "desired statement count %d does not match %d" % (
self.count,
self._statement_count,
)
class AllOf(AssertRule):
def __init__(self, *rules):
self.rules = set(rules)
def process_statement(self, execute_observed):
for rule in list(self.rules):
rule.errormessage = None
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.discard(rule)
if not self.rules:
self.is_consumed = True
break
elif not rule.errormessage:
# rule is not done yet
self.errormessage = None
break
else:
self.errormessage = list(self.rules)[0].errormessage
class EachOf(AssertRule):
def __init__(self, *rules):
self.rules = list(rules)
def process_statement(self, execute_observed):
while self.rules:
rule = self.rules[0]
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.pop(0)
elif rule.errormessage:
self.errormessage = rule.errormessage
if rule.consume_statement:
break
if not self.rules:
self.is_consumed = True
def no_more_statements(self):
if self.rules and not self.rules[0].is_consumed:
self.rules[0].no_more_statements()
elif self.rules:
super(EachOf, self).no_more_statements()
class Conditional(EachOf):
def __init__(self, condition, rules, else_rules):
if condition:
super(Conditional, self).__init__(*rules)
else:
super(Conditional, self).__init__(*else_rules)
class Or(AllOf):
def process_statement(self, execute_observed):
for rule in self.rules:
rule.process_statement(execute_observed)
if rule.is_consumed:
self.is_consumed = True
break
else:
self.errormessage = list(self.rules)[0].errormessage
class SQLExecuteObserved:
def __init__(self, context, clauseelement, multiparams, params):
self.context = context
self.clauseelement = clauseelement
if multiparams:
self.parameters = multiparams
elif params:
self.parameters = [params]
else:
self.parameters = []
self.statements = []
def __repr__(self):
return str(self.statements)
class SQLCursorExecuteObserved(
collections.namedtuple(
"SQLCursorExecuteObserved",
["statement", "parameters", "context", "executemany"],
)
):
pass
class SQLAsserter:
def __init__(self):
self.accumulated = []
def _close(self):
self._final = self.accumulated
del self.accumulated
def assert_(self, *rules):
rule = EachOf(*rules)
observed = list(self._final)
while observed:
statement = observed.pop(0)
rule.process_statement(statement)
if rule.is_consumed:
break
elif rule.errormessage:
assert False, rule.errormessage
if observed:
assert False, "Additional SQL statements remain:\n%s" % observed
elif not rule.is_consumed:
rule.no_more_statements()
@contextlib.contextmanager
def assert_engine(engine):
asserter = SQLAsserter()
orig = []
@event.listens_for(engine, "before_execute")
def connection_execute(
conn, clauseelement, multiparams, params, execution_options
):
# grab the original statement + params before any cursor
# execution
orig[:] = clauseelement, multiparams, params
@event.listens_for(engine, "after_cursor_execute")
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if not context:
return
# then grab real cursor statements and associate them all
# around a single context
if (
asserter.accumulated
and asserter.accumulated[-1].context is context
):
obs = asserter.accumulated[-1]
else:
obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2])
asserter.accumulated.append(obs)
obs.statements.append(
SQLCursorExecuteObserved(
statement, parameters, context, executemany
)
)
try:
yield asserter
finally:
event.remove(engine, "after_cursor_execute", cursor_execute)
event.remove(engine, "before_execute", connection_execute)
asserter._close()
| StarcoderdataPython |
6540565 | <reponame>Natsurii/nicabot-monkee
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Discord service status.
Ported from Nekozilla V1
"""
import asyncio
import datetime
import re
import typing
import aiohttp
import neko3.cog
from neko3 import embeds
from neko3 import neko_commands
from neko3 import pagination
endpoint_base = "https://status.discordapp.com/api"
api_version = "v2"
# Max fields per page on short pages
max_fields = 4
class ListMix(list):
"""Quicker than replacing a bunch of internal calls. I know this is inefficient anyway."""
def __iadd__(self, other):
self.append(other)
return self
def get_endpoint(page_name):
"""Produces the endpoint URL."""
return f"{endpoint_base}/{api_version}/{page_name}"
def get_impact_color(impact, is_global=False):
return {"none": 0x00FF00 if is_global else 0x0, "minor": 0xFF0000, "major": 0xFFA500, "critical": 0xFF0000}.get(
impact.lower(), 0x0
)
def find_highest_impact(entries):
print(entries)
for state in ("critical", "major", "minor", "none"):
for entry in entries:
if entry["impact"].lower() == state:
return state.title()
return "none"
def make_incident_update_body(recent_update):
updated_at = recent_update.get("updated at")
created_at = recent_update.get("created at")
body = recent_update.get("body")
ru_message = "\n_**" + recent_update.get("status").title() + "**_"
if updated_at:
ru_message += f" - Last updated at {friendly_date(updated_at)}"
elif created_at:
ru_message += f" - Created at {friendly_date(created_at)}"
ru_message += f"\n{body}\n"
return "\n".join(ru_message.split("\n"))
def make_incident_body(incident):
created = friendly_date(incident["created at"])
updated = incident.get("updated at")
updated = friendly_date(updated) if updated else "N/A"
monitoring = incident.get("monitoring at")
monitoring = friendly_date(monitoring) if monitoring else "N/A"
url = incident.get("shortlink", "https://status.discordapp.com")
affects = ", ".join(component["name"] for component in incident.get("components")) or "nothing"
recent_updates = incident.get("updates")
ru_message = ""
if recent_updates:
ru_message = f"Updates:\n"
for recent_update in recent_updates:
ru_message += make_incident_update_body(recent_update)
return (
f"_**[{incident['name']}]({url})**_\n\n"
f"Affects: `{affects}`\n"
f"Status: `{incident['status']}`\n"
f"Created: `{created}`\n"
f"Updated: `{updated}`\n"
f"Monitoring: `{monitoring}`\n"
f"{ru_message if recent_updates else 'No updates yet.'}"
)
def parse_timestamp(timestamp):
"""
Discord use a timestamp that is not compatible with Python by
default, which is kind of annoying.
Expected format: YYYY-mm-ddTHH:MM:SS.sss(sss)?[+-]hh:mm
:param timestamp: timestamp to parse.
:return: datetime object.
"""
if timestamp is None:
return None
# Remove the periods, T and colons.
timestamp = re.sub(r"[:.T]", "", timestamp, flags=re.I)
# extract the date part and time part.
if "+" in timestamp:
dt, tz = timestamp.rsplit("+", maxsplit=1)
tz = "+" + tz
else:
dt, tz = timestamp.rsplit("-", maxsplit=1)
tz = "-" + tz
# Remove hyphens from date (we didn't want to mess up the timezone earlier)
dt = dt.replace("-", "")
expected_dt_len = len("YYYYmmddHHMMSSssssss")
# Append zeros onto the end to make it in microseconds.
dt = dt + ("0" * (expected_dt_len - len(dt)))
timestamp = dt + tz
dt_obj = datetime.datetime.strptime(timestamp, "%Y%m%d%H%M%S%f%z")
return dt_obj.astimezone(datetime.timezone.utc)
def friendly_date(value: datetime.datetime):
"""Creates a friendly date format for the given datetime."""
if value is None:
return "N/A"
return value.strftime("%d %B %Y at %H:%M %Z")
class DiscordServiceStatusCog(neko3.cog.CogBase):
"""
Holds the service status command.
"""
@neko_commands.command(name="discord", aliases=["discordstatus"], brief="Check if Discord is down (again)")
async def discord_status_command(self, ctx):
"""
Gets a list of all Discord systems, and their service
status.
"""
async with ctx.message.channel.typing():
stat_res, comp_res, inc_res, sms_res = await asyncio.gather(
self._get(get_endpoint("summary.json")),
self._get(get_endpoint("components.json")),
self._get(get_endpoint("incidents.json")),
self._get(get_endpoint("scheduled-maintenances.json")),
)
status, components, incidents, sms = await asyncio.gather(
self.get_status(stat_res),
self.get_components(comp_res),
self.get_incidents(inc_res),
self.get_scheduled_maintenances(sms_res),
)
footer_text = status["indicator"]
@pagination.embed_generator(max_chars=1100)
def factory(_, page, __):
if not incidents["unresolved"]:
color = status["color"]
else:
color = get_impact_color(find_highest_impact(incidents["unresolved"]))
e = embeds.Embed(
colour=color, title="API Status for discordapp.com", description=page, url=status["url"]
)
if footer_text != "None":
e.set_footer(text=footer_text[:2000])
return e
nav = pagination.EmbedNavigatorFactory(factory=factory, max_lines=25)
# Make the front page, if needed.
headline = status["indicator"]
if str(headline) != "None":
nav.add_block(f"**{headline}**\n")
nav.add_block(f'{status["description"]}\n\n' f'Last updated: {friendly_date(status["updated_at"])}.')
nav.add_page_break()
if incidents["unresolved"]:
first = incidents["unresolved"][0]
name = first["name"]
body = make_incident_body(first)
nav.add_block(f"\n**{name}**\n{body}\n")
nav.add_page_break()
"""
PAGE 3
======
Incidents.
"""
if incidents["unresolved"]:
nav.add_block("**__UNRESOLVED INCIDENTS__**\n")
incident = incidents["unresolved"][0]
name = f'**{incident["name"]}**'
desc = make_incident_body(incident)
nav.add_block(name + "\n" + desc.strip())
for incident in incidents["unresolved"][1:3]:
body = make_incident_body(incident)
name = incident["name"]
body = name + "\n" + body
nav.add_block(body.strip())
nav.add_page_break()
nav.add_block("**__RESOLVED INCIDENTS__**\n")
# Add six most recent.
for incident in incidents["resolved"][:6]:
body = make_incident_body(incident)
nav.add_block(body)
nav.add_line()
nav.add_page_break()
nav.add_block("**__PRIMARY COMPONENTS__**\n")
for i, component in enumerate(components["showcase"], start=1):
if i and not (i % max_fields):
nav.add_page_break()
title = component.pop("name")
desc = []
for k, v in component.items():
line = f"**{k}**: "
if isinstance(v, datetime.datetime):
line += friendly_date(v)
else:
line += str(v)
desc.append(line)
desc = "\n".join(desc)
nav.add_block(f"**{title}**\n{desc}\n")
nav.add_page_break()
"""
PAGE 5
======
Non showcase components
"""
nav.add_block("**__OTHER COMPONENTS__**\n")
for i, component in enumerate(components["rest"], start=1):
if i and not (i % max_fields):
nav.add_page_break()
title = component.pop("name")
desc = []
for k, v in component.items():
if k == "components":
continue
line = f"{k}: "
if isinstance(v, datetime.datetime):
line += friendly_date(v)
else:
line += str(v)
desc.append(line)
nav.add_block(f"\n**{title}**\n" + "\n".join(desc))
nav.start(ctx)
@classmethod
async def _get(cls, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.get(*args, **kwargs) as resp:
resp.raise_for_status()
return await resp.json()
@staticmethod
async def get_status(res) -> typing.Dict[str, typing.Any]:
"""
Gets the short overall status of Discord.
:param res: the http response.
:return: a map of:
description - str, None
color - int
indicator - str
updated_at - datetime
url - str
"""
updated_at = res["page"]["updated_at"]
updated_at = parse_timestamp(updated_at)
return {
"description": res["status"]["description"],
"color": get_impact_color(res["status"]["indicator"], True),
"indicator": res["status"]["indicator"].title(),
"updated_at": updated_at,
"url": res["page"]["url"],
}
@staticmethod
async def get_components(res, hide_un_degraded=True) -> typing.Dict[str, typing.List]:
"""
Gets the status of individual components of Discord.
:param res: the http response.
:param hide_un_degraded: defaults to true. If true, we respect the
API's intent to hide any component marked true under
"only_show_if_degraded" unless the component is actually
degraded.
:return: a dict containing two lists: 'showcase' and 'rest'.
Both lists contain components, with fields:
status - str
name - str
created_at - datetime
updated_at - datetime
description - str, None
"""
# Anything that is not set to "showcase" belongs in the
# rest list instead.
showcase_result = []
rest_result = []
components: list = res["components"]
for component in components:
comp_dict = {}
for k, v in component.items():
# Skip these keys.
if k in ("id", "page_id", "position", "group", "only_show_if_degraded", "showcase", "group_id"):
continue
elif v is None:
continue
friendly_key = k.replace("_", " ")
# If a date/time
if k in ("created_at", "updated_at"):
comp_dict[friendly_key] = parse_timestamp(v)
elif k == "status":
# This is always formatted with underscores (enum really)
comp_dict[friendly_key] = v.replace("_", " ")
else:
comp_dict[friendly_key] = v
# Determine whether to skip the only-show-if-degraded element
# if it is flagged as such.
show_always = not component["only_show_if_degraded"]
if not show_always:
is_degraded = component["status"] != "operational"
should_hide = not show_always and is_degraded
if hide_un_degraded and should_hide:
continue
if component["showcase"]:
showcase_result.append(comp_dict)
else:
rest_result.append(comp_dict)
return {"showcase": showcase_result, "rest": rest_result}
@classmethod
async def get_incidents(cls, res) -> typing.Dict[str, typing.List]:
"""
Gets a dict containing two keys: 'resolved' and 'unresolved'.
These contain incidents and incident updates.
Due to the quantity of information this returns, we only get the
first 5, resolved. All unresolved are returned.
:param res: the http response.
"""
max_resolved = 5
res = res["incidents"]
unresolved = []
resolved = []
for inc in res:
if inc["status"] in ("investigating", "identified", "monitoring"):
target = unresolved
elif len(resolved) < max_resolved:
target = resolved
else:
continue
incident = {}
for k, v in inc.items():
if k in ("id", "page_id") or v is None:
continue
friendly_key = k.replace("_", " ")
if k in ("updated_at", "created_at", "monitoring_at"):
incident[friendly_key] = parse_timestamp(v)
elif k == "incident_updates":
incident["updates"] = cls.__parse_incident_updates(v)
elif k in ("impact", "status"):
incident[friendly_key] = v.replace("_", " ")
else:
incident[friendly_key] = v
target.append(incident)
return {"resolved": resolved, "unresolved": unresolved}
@staticmethod
def __parse_incident_updates(v):
# Parse incident updates.
updates = []
if v is None:
return updates
for up in v:
update = {}
for up_k, up_v in up.items():
up_f_k = up_k.replace("_", " ")
# Ignore custom_tweet and affected_components,
# as we do not have any info on how these are
# formatted...
if (
up_k
in (
"id",
"incident_id",
"display_at",
"custom_tweet",
"affected_components",
"deliver_notifications",
)
or up_v is None
):
continue
elif up_k in ("created_at", "updated_at"):
if up_v is None:
continue
else:
update[up_f_k] = parse_timestamp(up_v)
elif up_k == "status":
update[up_f_k] = up_v.replace("_", " ")
else:
update[up_f_k] = up_v
updates.append(update)
return updates
@staticmethod
async def __get_active_and_scheduled_maintenances(res):
"""
We do not care about maintenances that are done with, but this contains
a lot of irrelevant information, so I guess we should skip what we
don't need now.
:param res: the response to use.
"""
res = res["scheduled_maintenances"]
return [r for r in res if r.get("status", None) != "completed"]
# test: return res
@classmethod
async def get_scheduled_maintenances(cls, res) -> typing.List[typing.Dict]:
"""
Gets a list of active and scheduled maintenance events.
:param res: the response to use.
"""
in_events = await cls.__get_active_and_scheduled_maintenances(res)
out_events = []
for event in in_events:
event_obj = {}
for k, v in event.items():
if k in ("id", "page_id", "shortlink") or v is None:
continue
friendly_key = k.replace("_", " ")
if k in ("created_at", "monitoring_at", "scheduled_for", "scheduled_until", "updated_at"):
event_obj[friendly_key] = parse_timestamp(v)
elif k == "incident_updates":
event_obj["updates"] = cls.__parse_incident_updates(v)
elif k in ("status", "impact"):
event_obj[friendly_key] = v.replace("_", " ")
else:
event_obj[friendly_key] = v
out_events.append(event_obj)
return out_events
def setup(bot):
bot.add_cog(DiscordServiceStatusCog(bot))
| StarcoderdataPython |
5142245 | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error #均方误差
from sklearn.metrics import mean_absolute_error #平方绝对误差
from sklearn.metrics import r2_score#R square
IDIR = 'D://data//data//'
df_train = pd.read_csv(IDIR + 'train_feat.csv').fillna(0.).to_sparse()
print(df_train)
labels = np.load(IDIR + 'labels.npy')
print(labels)
X_train, X_test, y_train, y_test = train_test_split(df_train, labels, test_size=0.2, random_state=2019)
del df_train
del labels
# [买:100,不买:10]
# dt = DecisionTreeClassifier()
rfr = RandomForestRegressor(n_estimators=10, # 几棵树
criterion="mse", # (y-y_hat)^2
max_depth=10, # 树的深度
min_samples_split=200,
min_samples_leaf=100,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=3,
random_state=None,
verbose=0,
warm_start=False)
rfr.fit(X_train, y_train)
print(rfr.feature_importances_)
y_pred = rfr.predict(X_test)
mse_test = np.sum((y_pred - y_test) ** 2) / len(y_test)
print('mse : ', mse_test)
rmse_test = mse_test ** 0.5
print('Rmse : ', rmse_test)
print('mean_absolute_error: ', mean_absolute_error(y_test, y_pred))
print('mean_squared_error: ', mean_squared_error(y_test, y_pred))
print('r2_score: ', r2_score(y_test, y_pred))
| StarcoderdataPython |
3232072 | from __future__ import print_function
import unittest
from test.test_functions import Example
from typing import Any, List, Optional
from ddt import data, ddt, unpack
from aft import fuzzer
@ddt
class TestFuzzer(unittest.TestCase):
@data(
("add_one", [1], 2, "Test getting a single function"),
(
"Example.add_some_stuff",
[Example(1, 2, "sum is:"), 1, 2],
"sum is: 6",
"Test getting a method from a class",
),
)
@unpack
def test_get_function(
self,
function_name, # type: str
args, # type: List[Any]
expected, # type: Any
test_description, # type: str
):
# type: (...) -> None
"""Tests that the correct functions are obtained programatically."""
file_name = "test.test_functions"
func = fuzzer.get_function(file_name, function_name)
result = func(*args)
self.assertTrue(result == expected, test_description)
@data(
(
"Example.add_some_stuff",
Example(1, 2.0, "sum is:"),
[1, 2],
"sum is: 6.0",
"Test applying arguments to a nested function",
),
(
"Example.add_one_only_int_no_deps",
Example(1, 2.0, "3"),
[1],
2,
"Test applying arguments to a nested function no dependencies",
),
)
@unpack
def test_class_func_app(
self,
function_name, # type: str
class_instance, # type: Any
args, # type: List[Any]
expected, # type: Any
test_description, # type: str
):
# type: (...) -> None
file_name = "test.test_functions"
func = fuzzer.get_function(file_name, function_name)
result = fuzzer.class_func_app(class_instance, func, args)
self.assertTrue(result == expected, test_description)
@data(
(
"add_one_only_int",
None,
["'int'"],
"Test simple single input function with single type",
),
(
"add_two_only_int",
None,
["'int', 'int'"],
"Test multi input function with single types",
),
(
"add_one_multi_type",
None,
["'int'", "'float'"],
"Test single input function with multi types",
),
(
"add_two_multi_type",
None,
[
"'int', 'int'",
"'int', 'float'",
"'float', 'int'",
"'float', 'float'",
"'str', 'str'",
],
"Test multi input function with multi types",
),
(
"Example.add_one_only_int_no_deps",
Example(1, 2.0, "3"),
["'int'"],
"Test method in class with no dependencies and single type",
),
(
"Example.add_two_multi_type",
Example(1, 2.0, 3),
["'int', 'int'", "'int', 'float'", "'float', 'int'", "'float', 'float'"],
"Test method in class with multi types and dependencies",
),
(
"Example.add_two_multi_type",
Example(1, 2.0, "3"),
[],
"Test method in class with dependencies no feasible types",
),
("add_one_only_int_default", None, ["'int'"], "test default parameters"),
)
@unpack
def test_fuzz_example_success(
self,
function_name, # type: str
class_instance, # type: Optional[Any]
expected, # type: List[str]
test_description, # type: str
):
# type: (...) -> None
output = fuzzer.fuzz_example(
"test.test_functions", function_name, class_instance=class_instance
)
success_type_list = list(output["results"]["successes"].keys())
self.assertListEqual(
sorted(success_type_list), sorted(expected), test_description
)
@data(
(
"add_one",
["x"],
["int"],
["def add_one(x: int) -> Any"],
"Test simple single argument case",
),
(
"add_two",
["x", "y"],
["int, str"],
["def add_two(x: int, y: str) -> Any"],
"Test simple multi argument case",
),
(
"add_two",
["x", "y"],
["int, str", "str, int"],
[
"def add_two(x: int, y: str) -> Any",
"def add_two(x: str, y: int) -> Any",
],
"Test multi argument case multi string",
),
)
@unpack
def test_generate_mypy_stub_string(
self,
function_name, # type: str
arg_names, # type: List[str]
arg_types, # type: List[str]
expected, # type: str
test_description, # type: str
):
# type: (...) -> None
function_json = {
"function_to_type": function_name,
"arg_names": arg_names,
"results": {"successes": {k: [1] for k in arg_types}},
}
function_string = fuzzer.generate_mypy_stub_strings(function_json)
self.assertEqual(function_string, expected, test_description)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3392011 | import torch
import torch.nn as nn
import numpy as np
import os
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
from model.model import EncoderRNN, Transformation
from utils.preprocess import MyDataset, get_sliding_data, get_loaders
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from torch.utils.data import DataLoader
from scipy import stats
from scipy.spatial import distance
import seaborn as sns
from sklearn.metrics import precision_score, recall_score, f1_score
from datetime import datetime
from tsmoothie.smoother import *
mse = nn.MSELoss(reduction='mean')
def similarity(x1, x2):
cosine_similarity = nn.CosineSimilarity(dim=-1)
return torch.exp(cosine_similarity(x1, x2) * 1 / 1) # 1 ~ e
def cosine_distance(x1, x2):
cosine_similarity = nn.CosineSimilarity(dim=-1) # -1 ~ 1
sim = (cosine_similarity(x1, x2) + 1) / 2 # 0 ~ 1
return 1.0 - sim * 1 / 1 # 1 ~ 0
def mahalanobis(x=None, mean=None, iv=None):
delta = x - mean
left = np.dot(delta, iv)
mahal_dist = np.dot(left, delta.T)
return np.sqrt(mahal_dist)[0]
def adjust_predicts(score, label):
"""
Args:
score (np.ndarray): The anomaly score
label (np.ndarray): The ground-truth label
Returns:
np.ndarray: predict labels
"""
if len(score) != len(label):
raise ValueError("score and label must have the same length")
score = np.asarray(score)
label = np.asarray(label)
latency = 0
predict = score > 0.1
actual = label > 0.1
anomaly_state = False
anomaly_count = 0
for i in range(len(score)):
if actual[i] and predict[i] and not anomaly_state:
anomaly_state = True
anomaly_count += 1
for j in range(i, 0, -1):
if not actual[j]:
break
else:
if not predict[j]:
predict[j] = True
latency += 1
elif not actual[i]:
anomaly_state = False
if anomaly_state:
predict[i] = True
return predict.astype(np.float32)
def evaluate_reconstruction(test_data, encoder, decoder, gt_labels, path, device):
start = datetime.now()
print('start:', start)
test_loader = DataLoader(test_data, batch_size=1, shuffle=False, drop_last=False)
pbar = tqdm(test_loader)
scores = []
criterion = nn.MSELoss()
for w_t, y_t, t in pbar:
w_t = w_t.float().to(device)
z_t = encoder(w_t)
w_t_hat = decoder(z_t)
dist = criterion(w_t_hat, w_t).detach().cpu().numpy()
scores.append(dist)
pbar.set_description(f'score: {dist.item():.8f}')
# scaler = MinMaxScaler()
# scaler.fit(scores)
# scores = scaler.transform(scores)
threshold = np.percentile(scores, 87.66)
print(f'threshold={threshold}')
pred_labels = np.zeros(len(scores))
pred_labels[scores > threshold] = 1
T = len(pred_labels)
gt_labels = np.array(gt_labels[:T])
for i in range(len(pred_labels)):
pred_labels[i] = int(pred_labels[i])
gt_labels[i] = int(gt_labels[i])
adj_pred_labels = adjust_predicts(pred_labels, gt_labels)
precision = precision_score(gt_labels, pred_labels)
recall = recall_score(gt_labels, pred_labels)
f1 = f1_score(gt_labels, pred_labels)
print(f'precision={precision:.4f}, recall={recall:.4f}, f1-score={f1:.4f}')
acc = [precision, recall, f1]
adj_precision = precision_score(gt_labels, adj_pred_labels)
adj_recall = recall_score(gt_labels, adj_pred_labels)
adj_f1 = f1_score(gt_labels, adj_pred_labels)
print(f'adj_precision={adj_precision:.4f}, adj_recall={adj_recall:.4f}, adj_f1-score={adj_f1:.4f}')
acc.append(adj_precision)
acc.append(adj_recall)
acc.append(adj_f1)
save_result(path, acc, gt_labels, pred_labels, scores, threshold)
end = datetime.now()
print('end:', end)
def evaluate_coreset(train_data, test_data, model, gt_labels, path, device='cuda:0', machine='machine-1-1'):
start = datetime.now()
print(f'{machine} start:{start}')
train_loader = DataLoader(train_data, batch_size=120, shuffle=True, drop_last=True)
train_z_t = []
pbar = tqdm(train_loader)
i = 0
T = len(train_loader)
with torch.no_grad():
for w_t, _, _, _ in pbar:
i += 1
z_t, _ = model(w_t.float().to(device))
train_z_t.append(z_t.detach().cpu().numpy())
pbar.set_description(f'progress: {i}/{T}')
train_z_t = np.array(train_z_t)
feature = train_z_t.shape[2]
train_z_t = train_z_t.reshape(-1, feature)
cluster_centers = torch.tensor(KMeans(n_clusters=10, init='k-means++', max_iter=300).fit(train_z_t).cluster_centers_).to(device)
test_loader = DataLoader(test_data, batch_size=1, shuffle=False, drop_last=False)
pbar = tqdm(test_loader)
scores = []
dist = []
forecast = []
i = 0
with torch.no_grad():
for w_t, y_t, t, nxt in pbar:
z_t, pred = model(w_t.float().to(device)) #[:, :, -1]
# z_t = z_t.to(device)
# pred = pred.to(device)
# nxt = nxt.to(device)
# forecast.append(mse(pred, nxt).detach().cpu().numpy())
max_dist = np.min([(cosine_distance(z_t, center) * torch.norm(z_t)).detach().cpu().numpy() for center in cluster_centers])
# max_dist = np.max([cosine_distance(z_t, center) for center in cluster_centers]).detach().cpu().numpy()
dist.append(max_dist)
# if max_dist < 0.5:
# pbar.set_description(f'{i}-th max distance:{dist[i]:.8f}, forecast error:{forecast[i]:.4f}')
pbar.set_description(f'{i}-th max distance:{dist[i]:.8f}')
# pbar.set_description(f'forecast error:{forecast[i]:.4f}')
i += 1
scaler = MinMaxScaler()
dist = scaler.fit_transform(np.asarray(dist).reshape(-1, 1)).squeeze(1)
# smoother = ExponentialSmoother(window_len=100, alpha=0.3)
# smoother.smooth(dist)
# dist[:-100] = smoother.data[0]
# np.save(os.path.join("./result/%s" % path, "tri_score.npy"), dist)
# forecast = scaler.fit_transform(np.asarray(forecast).reshape(-1, 1)).squeeze(1)
# forecast[-1] = 0
# np.save(os.path.join("./result/%s" % path, f'{machine}_score.npy'), forecast)
# gamma = 0.2
# scores = (gamma * dist + forecast) / (1 + gamma)
scores = dist
# scores = forecast
threshold = np.percentile(scores, 95)
print(f'threshold={threshold}')
pred_labels = np.zeros(len(scores))
pred_labels[scores > threshold] = 1
T = len(pred_labels)
gt_labels = np.array(gt_labels[:T])
for i in range(len(pred_labels)):
pred_labels[i] = int(pred_labels[i])
gt_labels[i] = int(gt_labels[i])
adj_pred_labels = adjust_predicts(pred_labels, gt_labels)
np.set_printoptions(suppress=True)
precision = precision_score(gt_labels, pred_labels)
recall = recall_score(gt_labels, pred_labels)
f1 = f1_score(gt_labels, pred_labels)
print(f'precision={precision:.4f}, recall={recall:.4f}, f1-score={f1:.4f}')
acc = [precision, recall, f1]
adj_precision = precision_score(gt_labels, adj_pred_labels)
adj_recall = recall_score(gt_labels, adj_pred_labels)
adj_f1 = f1_score(gt_labels, adj_pred_labels)
print(f'adj_precision={adj_precision:.4f}, adj_recall={adj_recall:.4f}, adj_f1-score={adj_f1:.4f}')
acc.append(adj_precision)
acc.append(adj_recall)
acc.append(adj_f1)
save_result(path, acc, gt_labels, pred_labels, scores, threshold, machine)
end = datetime.now()
print('end:', end)
def evaluate_recon_coreset(train_data, test_data, encoder, decoder, tri_encoder, gt_labels, path, device='cuda:0'):
start = datetime.now()
print('start:', start)
train_loader = DataLoader(train_data, batch_size=200, shuffle=True, drop_last=True)
train_z_t = []
pbar = tqdm(train_loader)
i = 0
T = len(train_loader)
for w_t, _, _ in pbar:
i += 1
z_t = encoder(w_t).detach().cpu().numpy()
train_z_t.append(z_t)
pbar.set_description(f'progress: {i}/{T}')
train_z_t = np.array(train_z_t)
feature = train_z_t.shape[2]
train_z_t = train_z_t.reshape(-1, feature)
cluster_centers = torch.tensor(KMeans(n_clusters=10, init='k-means++', max_iter=300).fit(train_z_t).cluster_centers_).to(device)
test_loader = DataLoader(test_data, batch_size=1, shuffle=False, drop_last=False)
pbar = tqdm(test_loader)
scores, recon_score, final_scores = [], [], []
i = 0
with torch.no_grad():
for w_t, y_t, t in pbar:
z_t = encoder(w_t).to(device)
min_dist = np.min([cosine_distance(z_t, center) * torch.norm(z_t) for center in cluster_centers]).detach().cpu().numpy()
scores.append(min_dist[0])
w_t = w_t.to(device)
w_t_hat = decoder(z_t).to(device)
recon = mse(w_t, w_t_hat).float().detach().cpu().numpy().max()
recon_score.append(recon)
# final_scores.append(max_dist[0] * recon)
pbar.set_description(f'{i}-th max distance:{scores[i]:.8f}, recon score:{recon_score[i]:.5f}')#, final score:{final_scores[i]:.4f}')
i += 1
scaler = MinMaxScaler()
recon_scores = scaler.fit_transform(np.asarray(recon_score).reshape(-1, 1))
tri_scores = scaler.fit_transform(np.asarray(scores).reshape(-1, 1))
final_scores = np.add(recon_scores, tri_scores).reshape(-1)
threshold = np.percentile(final_scores, 87.66)
print(f'threshold={threshold}')
pred_labels = np.zeros(len(final_scores))
pred_labels[final_scores > threshold] = 1
T = len(pred_labels)
gt_labels = np.array(gt_labels[:T])
for i in range(len(pred_labels)):
pred_labels[i] = int(pred_labels[i])
gt_labels[i] = int(gt_labels[i])
adj_pred_labels = adjust_predicts(pred_labels, gt_labels)
np.set_printoptions(suppress=True)
precision = precision_score(gt_labels, pred_labels)
recall = recall_score(gt_labels, pred_labels)
f1 = f1_score(gt_labels, pred_labels)
print(f'precision={precision:.4f}, recall={recall:.4f}, f1-score={f1:.4f}')
acc = [precision, recall, f1]
adj_precision = precision_score(gt_labels, adj_pred_labels)
adj_recall = recall_score(gt_labels, adj_pred_labels)
adj_f1 = f1_score(gt_labels, adj_pred_labels)
print(f'adj_precision={adj_precision:.4f}, adj_recall={adj_recall:.4f}, adj_f1-score={adj_f1:.4f}')
acc.append(adj_precision)
acc.append(adj_recall)
acc.append(adj_f1)
save_result(path, acc, gt_labels, pred_labels, final_scores, threshold)
end = datetime.now()
print('end:', end)
def save_result(path, acc, gt_labels, pred_labels, scores, threshold, machine):
# Save performance plots
if not os.path.exists('./plots/%s' % path):
os.mkdir('./plots/%s' % path)
T = len(pred_labels)
labels = [gt for gt in gt_labels]
preds = [pred for pred in pred_labels]
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
ax1.set_xticks(np.arange(0, T, 5000))
ax2.set_xticks(np.arange(0, T, 5000))
ax3.set_xticks(np.arange(0, T, 5000))
ax1.plot(np.arange(T), labels, label="ground truth")
ax2.plot(np.arange(T), preds, label="prediction")
threshold = np.repeat(threshold, T)
ax3.plot(np.arange(T), scores, label="anomaly score")
ax3.plot(np.arange(T), threshold, label="threshold")
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
ax3.legend(loc='upper right')
plt.savefig(os.path.join("./plots/%s" % path, f'{machine}_result.pdf'))
np.savetxt(os.path.join("./plots/%s" % path, f'{machine}_accuracy.txt'), acc, fmt='%.4f')
np.save(os.path.join("./result/%s" % path, f'{machine}_score.npy'), scores)
np.save(os.path.join("./result/%s" % path, f'{machine}_preds.npy'), preds)
def evaluate_euclidean(train_data, test_data, encoder, path='simulated', feature=25, device='cuda:0', positive_sample=10, step=10):
train_loader = DataLoader(train_data, batch_size=40, shuffle=True, drop_last=True)
train_z_t = []
for w_t, _, _ in train_loader:
z_t = encoder(w_t).detach().cpu().numpy()
train_z_t.append(z_t)
train_z_t = np.array(train_z_t)
test_loader = DataLoader(test_data, batch_size=1, shuffle=False, drop_last=False)
result = []
i = 0
with torch.no_grad():
pbar = tqdm(test_loader)
for w_t, y_t, t in pbar:
z_t = encoder(w_t).detach().cpu().numpy()
dist = distance.cdist(z_t.reshape(1, -1), train_z_t.reshape(-1, 128), 'euclidean').mean()
# sim = 0
# for z in train_z_t:
# sim += torch.log(similarity(z_t.unsqueeze(), z.unsqueeze()))
result.append(dist)
pbar.set_description(f'score: {dist.item():.8f}')
i += 1
# if dist > 1.3:
# print(f'{i}-th Euclidean distance:{dist}')
plt.plot(result)
plt.show()
print('end') | StarcoderdataPython |
12818390 | # Copyright (c) 2014, 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from botocore.exceptions import ClientError
import kappa.awsclient
import kappa.log
LOG = logging.getLogger(__name__)
class RestApi(object):
def __init__(self, context, config):
self._context = context
self._config = config
self._apigateway_client = kappa.awsclient.create_client(
'apigateway', context.session)
self._api = None
self._resources = None
self._resource = None
@property
def arn(self):
_, _, _, region, account, _ = self._context.function.arn.split(':', 5)
arn = 'arn:aws:execute-api:{}:{}:{}/*/*/{}'.format(
region, account, self.api_id, self.resource_name)
return arn
@property
def api_name(self):
return self._config['name']
@property
def description(self):
return self._config['description']
@property
def resource_name(self):
return self._config['resource']['name']
@property
def parent_resource(self):
return self._config['resource']['parent']
@property
def full_path(self):
parts = self.parent_resource.split('/')
parts.append(self.resource_name)
return '/'.join(parts)
@property
def api_id(self):
api = self._get_api()
return api.get('id')
@property
def resource_id(self):
resources = self._get_resources()
return resources.get(self.full_path).get('id')
def _get_api(self):
if self._api is None:
try:
response = self._apigateway_client.call(
'get_rest_apis')
LOG.debug(response)
for item in response['items']:
if item['name'] == self.api_name:
self._api = item
except Exception:
LOG.exception('Error finding restapi')
return self._api
def _get_resources(self):
if self._resources is None:
try:
response = self._apigateway_client.call(
'get_resources',
restApiId=self.api_id)
LOG.debug(response)
self._resources = {}
for item in response['items']:
self._resources[item['path']] = item
except Exception:
LOG.exception('Unable to find resources for: %s',
self.api_name)
return self._resources
def create_restapi(self):
if not self.api_exists():
LOG.info('creating restapi %s', self.api_name)
try:
response = self._apigateway_client.call(
'create_rest_api',
name=self.api_name,
description=self.description)
LOG.debug(response)
except Exception:
LOG.exception('Unable to create new restapi')
def create_resource_path(self):
path = self.full_path
parts = path.split('/')
resources = self._get_resources()
parent = None
build_path = []
for part in parts:
LOG.debug('part=%s', part)
build_path.append(part)
LOG.debug('build_path=%s', build_path)
full_path = '/'.join(build_path)
LOG.debug('full_path=%s', full_path)
if full_path is '':
parent = resources['/']
else:
if full_path not in resources and parent:
try:
response = self._apigateway_client.call(
'create_resource',
restApiId=self.api_id,
parentId=parent['id'],
pathPart=part)
LOG.debug(response)
resources[full_path] = response
except Exception:
LOG.exception('Unable to create new resource')
parent = resources[full_path]
self._item = resources[path]
def create_method(self, method, config):
LOG.info('creating method: %s', method)
try:
response = self._apigateway_client.call(
'put_method',
restApiId=self.api_id,
resourceId=self.resource_id,
httpMethod=method,
authorizationType=config.get('authorization_type'),
apiKeyRequired=config.get('apikey_required', False)
)
LOG.debug(response)
LOG.debug('now create integration')
uri = 'arn:aws:apigateway:{}:'.format(
self._apigateway_client.region_name)
uri += 'lambda:path/2015-03-31/functions/'
uri += self._context.function.arn
uri += ':${stageVariables.environment}/invocations'
LOG.debug(uri)
response = self._apigateway_client.call(
'put_integration',
restApiId=self.api_id,
resourceId=self.resource_id,
httpMethod=method,
integrationHttpMethod=method,
type='AWS',
uri=uri
)
except Exception:
LOG.exception('Unable to create integration: %s', method)
def create_deployment(self):
LOG.info('creating a deployment for %s to stage: %s',
self.api_name, self._context.environment)
try:
response = self._apigateway_client.call(
'create_deployment',
restApiId=self.api_id,
stageName=self._context.environment
)
LOG.debug(response)
LOG.info('Now deployed to: %s', self.deployment_uri)
except Exception:
LOG.exception('Unable to create a deployment')
def create_methods(self):
resource_config = self._config['resource']
for method in resource_config.get('methods', dict()):
if not self.method_exists(method):
method_config = resource_config['methods'][method]
self.create_method(method, method_config)
def api_exists(self):
return self._get_api()
def resource_exists(self):
resources = self._get_resources()
return resources.get(self.full_path)
def method_exists(self, method):
exists = False
resource = self.resource_exists()
if resource:
methods = resource.get('resourceMethods')
if methods:
for method_name in methods:
if method_name == method:
exists = True
return exists
def find_parent_resource_id(self):
parent_id = None
resources = self._get_resources()
for item in resources:
if item['path'] == self.parent:
parent_id = item['id']
return parent_id
def api_update(self):
LOG.info('updating restapi %s', self.api_name)
def resource_update(self):
LOG.info('updating resource %s', self.full_path)
def add_permission(self):
LOG.info('Adding permission for APIGateway to call function')
self._context.function.add_permission(
action='lambda:InvokeFunction',
principal='apigateway.amazonaws.com',
source_arn=self.arn)
def deploy(self):
if self.api_exists():
self.api_update()
else:
self.create_restapi()
if self.resource_exists():
self.resource_update()
else:
self.create_resource_path()
self.create_methods()
self.add_permission()
def delete(self):
LOG.info('deleting resource %s', self.resource_name)
try:
response = self._apigateway_client.call(
'delete_resource',
restApiId=self.api_id,
resourceId=self.resource_id)
LOG.debug(response)
except ClientError:
LOG.exception('Unable to delete resource %s', self.resource_name)
return response
def status(self):
try:
response = self._apigateway_client.call(
'delete_',
FunctionName=self.name)
LOG.debug(response)
except ClientError:
LOG.exception('function %s not found', self.name)
response = None
return response
| StarcoderdataPython |
392118 | # SPDX-License-Identifier: Apache-2.0
"""
An example console application that uses the subarulink package.
For more details about this api, please refer to the documentation at
https://github.com/G-Two/subarulink
"""
| StarcoderdataPython |
6413877 | # ===================================================================
#
# Copyright (c) 2021, Legrandin <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
from . import cSHAKE256
from .TupleHash128 import TupleHash
def new(**kwargs):
"""Create a new TupleHash256 object.
Args:
digest_bytes (integer):
Optional. The size of the digest, in bytes.
Default is 64. Minimum is 8.
digest_bits (integer):
Optional and alternative to ``digest_bytes``.
The size of the digest, in bits (and in steps of 8).
Default is 512. Minimum is 64.
custom (bytes):
Optional.
A customization bytestring (``S`` in SP 800-185).
:Return: A :class:`TupleHash` object
"""
digest_bytes = kwargs.pop("digest_bytes", None)
digest_bits = kwargs.pop("digest_bits", None)
if None not in (digest_bytes, digest_bits):
raise TypeError("Only one digest parameter must be provided")
if (None, None) == (digest_bytes, digest_bits):
digest_bytes = 64
if digest_bytes is not None:
if digest_bytes < 8:
raise ValueError("'digest_bytes' must be at least 8")
else:
if digest_bits < 64 or digest_bits % 8:
raise ValueError("'digest_bytes' must be at least 64 "
"in steps of 8")
digest_bytes = digest_bits // 8
custom = kwargs.pop("custom", b'')
if kwargs:
raise TypeError("Unknown parameters: " + str(kwargs))
return TupleHash(custom, cSHAKE256, digest_bytes)
| StarcoderdataPython |
8044902 | """
First pass at data centered class.
Ideally a single class instance can handle loading of the entire dataset.
TODO
- Add cache mechanism for quick storage/loading rather than going to synap
- Think of a way to get current feature cols and drug cols from
a subset data, add functionality
"""
import pandas as pd
from pybeataml.data import ExperimentalData
from pybeataml.load_data_from_synpase import load_table, load_file
# current synapse ids, check with Camilo to see if these are the final (
# I know there are some other corrected/v2/uncorrected in the R code)
global_id = 'syn25808020'
phospho_id = 'syn26477193' # syn25808662
rnaseq_id = 'syn26545877'
drug_response_id = 'syn25830473'
meta_file_id = 'syn26534982'
wes_id = 'syn26428827'
def prep_rnaseq():
cols = ['display_label', 'labId',
'RNA counts']
mapper = {
'display_label': 'gene_symbol',
'RNA counts': 'exp_value',
'labId': 'sample_id',
}
data = load_table(rnaseq_id)
subset = data.loc[:, cols]
subset.rename(mapper, axis=1, inplace=True)
subset['source'] = 'rna_seq'
subset['label'] = subset.gene_symbol + '_rna'
return subset
def prep_phosph():
pho_cols = ['Gene', 'SiteID', 'LogRatio',
'SampleID.full', 'Barcode.ID']
phosp_mapper = {
'Gene': 'gene_symbol',
'SiteID': 'label',
'LogRatio': 'exp_value',
'SampleID.full': 'sample_id_full',
'Barcode.ID': 'sample_id',
}
phospho_data = load_table(phospho_id)
phosph_subset = phospho_data.loc[:, pho_cols]
phosph_subset.rename(phosp_mapper, axis=1, inplace=True)
phosph_subset['source'] = 'phospho'
return phosph_subset
def prep_proteomics():
proteomics_mapper = {
'Gene': 'gene_symbol',
'SiteID': 'label',
'LogRatio': 'exp_value',
'SampleID.full': 'sample_id_full',
'Barcode.ID': 'sample_id',
}
global_data = load_table(global_id)
# remove empty gene columns? Is this safe
proteomics = global_data.loc[~global_data.Gene.isna(), :].copy()
proteomics.rename(proteomics_mapper, axis=1, inplace=True)
pho_cols = ['gene_symbol', 'exp_value',
'sample_id_full', 'sample_id']
proteomics = proteomics.loc[:, pho_cols]
# add source and label column for MAGINE
proteomics['label'] = proteomics.gene_symbol + '_prot'
proteomics['source'] = 'proteomics'
return proteomics
def load_drug_response():
response_data = load_table(drug_response_id)
new_auc = response_data[['lab_id', 'inhibitor', 'auc']].copy()
new_auc.rename({'lab_id': 'sample_id'}, axis=1, inplace=True)
new_auc.auc = new_auc.auc.astype(float)
return new_auc
def load_meta_data():
meta = load_file(meta_file_id)
def load_mutations():
"""
Loads WES data.
Processes mutational status into two levels. First one is at the gene level,
second one gene with amino acid level.
Returns
-------
"""
df = load_table(wes_id)
mapper = {
'symbol': 'gene_symbol',
'labId': 'sample_id',
}
df.rename(mapper, axis=1, inplace=True)
df['exp_value'] = 1
wes_gene_level = pd.pivot_table(
df,
index='sample_id',
values='exp_value',
columns='gene_symbol',
fill_value=0
)
wes_gene_level = wes_gene_level.melt(ignore_index=False).reset_index()
wes_gene_level['label'] = wes_gene_level.gene_symbol + '_mut'
wes_gene_level['exp_value'] = wes_gene_level['value']
wes_gene_level['source'] = 'wes'
wes_aa_level = df.copy()
wes_aa_level['label'] = wes_aa_level['hgvsp'].str.split(':p.').str.get(1)
wes_aa_level['label'] = wes_aa_level['gene_symbol'] + '_' + wes_aa_level['label']
wes_aa_level = pd.pivot_table(
wes_aa_level,
index='sample_id',
values='exp_value',
columns='label',
fill_value=0
)
wes_aa_level = wes_aa_level.melt(ignore_index=False).reset_index()
wes_aa_level['gene_symbol'] = wes_aa_level.label.str.split('_').str.get(0)
wes_aa_level['source'] = 'wes_protein_level'
wes_aa_level['exp_value'] = wes_aa_level['value']
return pd.concat([wes_gene_level, wes_aa_level])
class AMLData(object):
def __init__(self):
self._drug_names = None
self._auc_table = None
self.proteomics = prep_proteomics()
self.phospho = prep_phosph()
self.rna = prep_rnaseq()
self.functional = load_drug_response()
self.wes = load_mutations()
self.flat_data = pd.concat(
[self.phospho, self.proteomics, self.rna, self.wes]
)
# Until i find the tables, commenting this
meta_info_cols = ['sample_id', 'InitialAMLDiagnosis',
'PostChemotherapy', 'FLT3.ITD']
# meta = self.flat_data[meta_info_cols].drop_duplicates()
# meta.set_index('sample_id', inplace=True)
# self.meta = meta * 1
self.all_data = self.convert_to_matrix(self.flat_data)
self.feature_names = list(self.all_data.columns.values)
self.feature_names.remove('sample_id')
# format for magine.ExperimentalData class
d = self.flat_data.rename({'gene_symbol': 'identifier'}, axis=1)
d['species_type'] = 'gene'
self.exp_data = ExperimentalData(d)
@property
def drug_names(self):
if self._drug_names is None:
self._drug_names = list(set(self.functional['inhibitor'].unique()))
return self._drug_names
@property
def auc_table(self):
if self._auc_table is None:
self._auc_table = pd.pivot_table(
self.functional,
index='sample_id', columns='inhibitor', values='auc'
)
return self._auc_table
def subset(self, source):
if isinstance(source, str):
source = [source]
subset = self.flat_data.loc[self.flat_data.source.isin(source)]
return self.convert_to_matrix(subset)
def subset_flat(self, source):
if isinstance(source, str):
source = [source]
return self.flat_data.loc[self.flat_data.source.isin(source)]
def convert_to_matrix(self, flat_dataframe):
df = pd.pivot_table(
flat_dataframe,
index='sample_id',
values='exp_value',
columns='label'
).reset_index()
return df
# until meta info is added to tables, commenting this out
# return df.join(self.meta, on='sample_id').reset_index()
def get_trainable_data(self, source, drug_name):
# Filter experimental platform
mol_data = self.subset(source)
feature_names = list(mol_data.columns.values)
if 'sample_id' in feature_names:
feature_names.remove('sample_id')
# merge with auc table to get row=patient, col=genes + drug_auc
joined = mol_data.join(
self.auc_table[drug_name],
on='sample_id'
).set_index('sample_id')
# df_subset = joined.loc[:, feature_names + [drug_name]]
# remove rows without a AUC measurement
df_subset = joined[~joined[drug_name].isna()].copy()
# require 50% of the data be present for any given column
df_subset.dropna(
axis=0,
how='any',
thresh=df_subset.shape[1] * .50,
inplace=True
)
# filter down if missing any measured cols
# TODO Ask Camilo about the data filling
n_features_remaining = df_subset.shape[0]
df_subset.dropna(
axis=1,
how='any',
thresh=n_features_remaining,
inplace=True
)
return df_subset
if __name__ == '__main__':
data = AMLData()
| StarcoderdataPython |
9769294 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-20 13:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('entertainment_tonight', '0017_auto_20170420_1037'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='event_date',
),
]
| StarcoderdataPython |
9788625 | <reponame>d--j/salt<gh_stars>1-10
# -*- coding: utf-8 -*-
'''
tests.integration.shell.syndic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`<NAME> (<EMAIL>)`
:copyright: © 2012-2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
'''
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class SyndicTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-syndic'
if __name__ == '__main__':
from integration import run_tests
run_tests(SyndicTest)
| StarcoderdataPython |
5019628 | from flask import Flask
app = Flask(__name__)
@app.route('/')
def home_page():
return 'Hello World! <a href="/todo/">Todos</a>'
@app.route('/todo/')
def list_todos():
return 'Todo List <a href="/todo/1">First Todo</a>'
@app.route('/todo/<todo_id>')
def show_todo(todo_id):
return 'ToDo {todo_id}'.format(todo_id=todo_id)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
5174793 | <gh_stars>0
l=2**31
class Solution:
def reverse(s,x):r=(-1)**(x<0)*int(str(abs(x))[::-1]);return r if -l<r<l-1else 0
| StarcoderdataPython |
292175 | <gh_stars>10-100
"""Miscellaneous functions"""
import numpy as np
from numba import jit
from warnings import warn
@jit
def softmax(arr):
"""Scale-robust softmax choice rule."""
arr = np.exp(arr - np.max(arr))
return arr / arr.sum()
@jit
def pessimism(arr, w):
"""Pessimistic learning rule."""
return w * np.max(arr) + (1 - w) * np.min(arr)
def categorical(arr):
"""Categorical distribution rng."""
return np.argmax(np.random.multinomial(1,arr))
def check_params(beta=None, eta=None, gamma=None, w=None, epsilon=None):
"""Internal convenience function for sanity checking parameter values."""
if beta is not None and abs(beta) > 50:
warn('Parameter "beta" set very large.')
if eta is not None and (eta < 0 or eta > 1):
raise ValueError('Parameter "eta" must be in range [0,1].')
if gamma is not None and (gamma < 0 or gamma > 1):
raise ValueError('Parameter "gamma" must be in range [0,1].')
if w is not None and (w < 0 or w > 1):
raise ValueError('Parameter "w" must be in range [0,1].')
if epsilon is not None and (epsilon < 0 or epsilon > 1):
raise ValueError('Parameter "epsilon" must be in range [0,1].') | StarcoderdataPython |
3285802 | <filename>contentcuration/contentcuration/tests/test_authentication.py<gh_stars>0
from __future__ import absolute_import
from .base import BaseTestCase
from contentcuration.utils.policies import check_policies
class AuthenticationTestCase(BaseTestCase):
def setUp(self):
super(AuthenticationTestCase, self).setUp()
self.base_url = '/channels/{}'.format(self.channel.pk)
self.view_url = '{}/view'.format(self.base_url)
self.edit_url = '{}/edit'.format(self.base_url)
def test_authenticate_policy_update(self):
"""
Test that authenticated new users are shown the policies page regardless of what page was requested
if they have policies they have not yet agreed to.
"""
base_url = '/channels/{}'.format(self.channel.pk)
self.channel.viewers.add(self.user)
self.client.force_login(self.user)
assert len(check_policies(self.user)) > 0
# ensure that a new user is redirected to policy update after authenticating the first time.
response = self.client.get(base_url, follow=True)
assert "/policies/update" == response.redirect_chain[-1][0]
def test_staging_channel_access(self):
staging_url = '{}/staging'.format(self.base_url)
# test that when we are redirected we get taken to the login page since we're not signed in,
# and that after sign in we'll get sent to the right place.
response = self.get(staging_url, follow=True)
assert "/accounts/login/?next={}".format(staging_url) == response.redirect_chain[-1][0]
assert response.status_code == 200
self.channel.editors.remove(self.user)
self.sign_in()
response = self.get(staging_url)
assert response.status_code == 403
self.channel.viewers.add(self.user)
response = self.get(staging_url)
assert response.status_code == 403
self.channel.editors.add(self.user)
# finally!
response = self.get(staging_url)
assert response.status_code == 200
def test_channel_admin_access(self):
admin_url = '/administration/'
response = self.get(admin_url, follow=True)
assert "/accounts/login/?next={}".format(admin_url) == response.redirect_chain[-1][0]
assert response.status_code == 200
self.sign_in()
response = self.get(admin_url)
assert response.status_code == 403
self.user.is_admin = True
self.user.save()
response = self.get(admin_url)
assert response.status_code == 200
def test_unathenticated_channel_access(self):
"""
Ensures that short URLs without /view or /edit redirect based on the user's permissions,
that unauthenticated users are sent to the login page with the page they requested set as next,
and that direct visits to /edit or /view present unauthorized messages when the user doesn't have permissions.
"""
response = self.client.get(self.base_url)
# test that it redirects
assert response.status_code == 302
# now test that when we are redirected we get taken to the login page since we're not signed in,
# and that after sign in we'll get sent to the right place.
response = self.get(self.base_url, follow=True)
assert "/accounts/login/?next={}".format(self.view_url) == response.redirect_chain[-1][0]
assert response.status_code == 200
def test_no_rights_channel_access(self):
self.channel.editors.remove(self.user)
self.sign_in()
response = self.get(self.base_url, follow=True)
assert self.view_url == response.redirect_chain[-1][0]
assert response.status_code == 403
response = self.get(self.view_url)
assert response.status_code == 403
# /edit URL first switches to /view in case the user has view access, so make sure we track the redirect
response = self.get(self.edit_url, follow=True)
assert response.status_code == 403
def test_view_only_channel_access(self):
self.channel.editors.remove(self.user)
self.sign_in()
self.channel.viewers.add(self.user)
response = self.get(self.base_url, follow=True)
assert self.view_url == response.redirect_chain[-1][0]
assert response.status_code == 200
response = self.get(self.view_url)
assert response.status_code == 200
# make sure that a view-only user gets redirected if requesting edit page
response = self.get(self.edit_url, follow=True)
assert self.view_url == response.redirect_chain[-1][0]
assert response.status_code == 200
def test_edit_channel_access(self):
self.sign_in()
# we can edit!
response = self.get(self.base_url, follow=True)
assert self.edit_url == response.redirect_chain[-1][0]
assert response.status_code == 200
response = self.get(self.view_url)
assert response.status_code == 200
response = self.get(self.edit_url)
assert response.status_code == 200
| StarcoderdataPython |
3403759 | <filename>fastai/vision/learner.py<gh_stars>0
"`Learner` support for computer vision"
from ..torch_core import *
from ..basic_train import *
from ..data import *
from ..layers import *
__all__ = ['ConvLearner', 'create_body', 'create_head', 'num_features']
def create_body(model:Model, cut:Optional[int]=None, body_fn:Callable[[Model],Model]=None):
"Cut off the body of a typically pretrained model at `cut` or as specified by `body_fn`"
return (nn.Sequential(*list(model.children())[:cut]) if cut
else body_fn(model) if body_fn else model)
def num_features(m:Model)->int:
"Return the number of output features for a model"
for l in reversed(flatten_model(m)):
if hasattr(l, 'num_features'): return l.num_features
def create_head(nf:int, nc:int, lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5):
"""Model head that takes `nf` features, runs through `lin_ftrs`, and about `nc` classes.
:param ps: dropout, can be a single float or a list for each layer"""
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = listify(ps)
if len(ps)==1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
layers = [AdaptiveConcatPool2d(), Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1],lin_ftrs[1:],ps,actns):
layers += bn_drop_lin(ni,no,True,p,actn)
return nn.Sequential(*layers)
def _default_split(m:Model):
"By default split models between first and second layer"
return split_model(m, m[1])
def _resnet_split(m:Model):
"Split a resnet style model"
return split_model(m, (m[0][6],m[1]))
_default_meta = {'cut':-1, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
model_meta = {
tvm.resnet18 :{**_resnet_meta}, tvm.resnet34: {**_resnet_meta},
tvm.resnet50 :{**_resnet_meta}, tvm.resnet101:{**_resnet_meta},
tvm.resnet152:{**_resnet_meta}}
class ConvLearner(Learner):
"Builds convnet style learners"
def __init__(self, data:DataBunch, arch:Callable, cut=None, pretrained:bool=True,
lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5,
custom_head:Optional[nn.Module]=None, split_on:Optional[SplitFuncOrIdxList]=None, **kwargs:Any)->None:
meta = model_meta.get(arch, _default_meta)
torch.backends.cudnn.benchmark = True
body = create_body(arch(pretrained), ifnone(cut,meta['cut']))
nf = num_features(body) * 2
head = custom_head or create_head(nf, data.c, lin_ftrs, ps)
model = nn.Sequential(body, head)
super().__init__(data, model, **kwargs)
self.split(ifnone(split_on,meta['split']))
if pretrained: self.freeze()
apply_init(model[1], nn.init.kaiming_normal_)
| StarcoderdataPython |
4876374 | ###############################################################################
##
## Copyright (C) 2018-2020, New York University.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of MLDebugger.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import copy
import json
import logging
import os
from mldebugger.combinatorial_design import generate_tuples
from vistrails.core.modules.module_registry import get_module_registry
goodbad = [True, False]
numtests = 30
def evaluate(x, formula):
local = x
logging.debug("local is: " + str(local))
logging.debug("formula is: " + formula)
ret = eval(formula)
logging.debug("ret is " + str(ret))
return ret
def compute_score(experiment, input_parameters, pv_goodness, moralflag):
score = 0
for i in range(len(input_parameters)):
key = input_parameters[i]
v = experiment[i]
score += float(pv_goodness[key][v][moralflag]) / float(pv_goodness[key][v]['good'] + pv_goodness[key][v]['bad'])
return score
def loadtests(filename):
fileicareabout = open(filename, "r")
text = fileicareabout.readlines()
fileicareabout.close()
workflow = text[0]
if (workflow != "null\n"):
script, func = (workflow[:-1]).split(",")
workflow = getattr(__import__(script), func)
else:
workflow = None
formula = text[1]
cost = text[2]
cols = text[3][:-1].split(",")
alllines = text[4:]
allexperiments = []
allresults = [] # experiments and their results
for e in alllines:
exp = (e[:-1]).split(",")
allexperiments.append(exp)
for e in allexperiments:
x = copy.deepcopy(e)
x.append(evaluate(e, formula))
allresults.append(x)
return [worinfilekflow, allexperiments, allresults, formula, cost, cols]
def load_runs(filename, input_keys, lims=None):
if os.path.isfile(filename):
fileicareabout = open(filename, "r")
else:
fileicareabout = open(filename, "w+")
alllines = fileicareabout.readlines()
fileicareabout.close()
allexperiments = []
allresults = [] # experiments and their results
pv_goodness = {} # number of good and bad instances by parameter-value
if lims is None:
lims = [0, len(alllines)]
for e in alllines[lims[0]:lims[1]]:
try:
exp = []
exp_dict = json.loads(e[:-1])
if type(exp_dict['result']) == unicode:
result_value = exp_dict['result'].encode("utf-8")
else:
result_value = exp_dict['result']
for key in input_keys:
if key not in pv_goodness:
pv_goodness[key] = {}
if type(exp_dict[key]) == unicode:
v = exp_dict[key].encode("utf-8")
else:
v = exp_dict[key]
exp.append(v)
if v not in pv_goodness[key]:
pv_goodness[key][v] = {'good': 0, 'bad': 0}
if eval(result_value):
pv_goodness[key][v]['good'] += 1
else:
pv_goodness[key][v]['bad'] += 1
exp.append(result_value)
allexperiments.append(exp)
except:
pass
for e in allexperiments:
x = copy.deepcopy(e)
x[-1] = eval(x[-1])
allresults.append(x)
return [allexperiments, allresults, pv_goodness]
def load_dataxray(filename, input_keys, lims=None):
if os.path.isfile(filename):
fileicareabout = open(filename, "r")
else:
fileicareabout = open(filename, "w+")
alllines = fileicareabout.readlines()
fileicareabout.close()
feature_vector = ""
for i in range(len(input_keys)):
feature_vector += 'a:'
feature_vector += "\t" + feature_vector.replace('a',
'1') + ';rate;cost;false;' + feature_vector + ';' + feature_vector.replace(
'a', '0') + ';' + str(len(alllines)) + ';0;'
count = 0
count_error = 0
if lims is None:
lims = [0, len(alllines)]
print('limits', str(lims))
for e in alllines[lims[0]:lims[1]]:
try:
exp_dict = json.loads(e[:-1])
result = exp_dict['result']
feature_vector += str(count) + '%' + str(result) + '%'
if not result:
count_error += 1
for key in input_keys:
if type(exp_dict[key]) == unicode:
v = exp_dict[key].encode("utf-8")
else:
v = exp_dict[key]
feature_vector += 'a_' + key + '#' + str(v) + ':'
count += 1
feature_vector += '='
except:
pass
# TODO learn how to compute cost
return feature_vector.replace('rate', str(0 if count == 0 else count_error / float(count))).replace('cost', '99.99')
def load_combinatorial(input_dict):
return generate_tuples(input_dict)
def _iterate_over_keys(permutations, current_permutation, input_dict):
key = current_permutation.keys()[-1]
if key == input_dict.keys()[-1]:
for value in input_dict[key]:
current_permutation[key] = value
permutation = copy.deepcopy(current_permutation)
permutations.append(permutation)
else:
current_permutation[input_dict.keys()[len(current_permutation.keys())]] = None
for value in input_dict[key]:
current_permutation[key] = value
permutation = copy.deepcopy(current_permutation)
_iterate_over_keys(permutations, permutation, input_dict)
def load_permutations(input_dict):
permutations = []
current_permutation = {input_dict.keys()[0]: None}
_iterate_over_keys(permutations, current_permutation, input_dict)
return permutations
def record_run(moduleInfo, result):
paramDict = {}
vistrail_name = moduleInfo['locator'].name
file_name = vistrail_name.replace('.vt', '.adb')
f = open(file_name, "a")
reg = get_module_registry()
pipeline = moduleInfo['pipeline']
sortedModules = sorted(pipeline.modules.iteritems(),
key=lambda item: item[1].name)
for mId, module in sortedModules:
if len(module.functions) > 0:
for fId in xrange(len(module.functions)):
function = module.functions[fId]
desc = reg.get_descriptor_by_name('org.vistrails.vistrails.basic', 'OutputPort')
if module.module_descriptor is desc: continue
desc = reg.get_descriptor_by_name('org.vistrails.vistrails.basic', 'PythonSource')
if (module.module_descriptor is desc) and (function.name == 'source'): continue
if len(function.params) == 0: continue
v = [p.value() for p in function.params][0]
paramDict[function.name] = v
paramDict['result'] = str(result)
f.write(json.dumps(paramDict) + '\n')
f.close()
def record_python_run(paramDict, vistrail_name, origin=None):
if origin:
paramDict["origin"] = origin
file_name = vistrail_name.replace('.vt', '.adb')
f = open(file_name, "a")
f.write(json.dumps(paramDict) + '\n')
f.close()
| StarcoderdataPython |
4836775 | <gh_stars>1-10
import pytest
import vtk
from pytestvtk.assert_vtk import assert_vtk
@pytest.fixture
def vtk_string_array1():
result = vtk.vtkStringArray()
result.SetNumberOfTuples(1)
result.SetNumberOfValues(2)
result.SetName('testing_string')
result.SetValue(0, 'Value 0')
result.SetValue(1, 'Value 1')
return result
@pytest.fixture
def vtk_string_array2():
result = vtk.vtkStringArray()
result.SetNumberOfTuples(2)
result.SetNumberOfValues(3)
result.SetName('testing_string_modified')
result.SetValue(0, 'Value Modified 0')
result.SetValue(1, 'Value Modified 1')
result.SetValue(1, 'Value Modified 2')
return result
def test_compare_vtkStringArray(vtk_string_array1, vtk_string_array2):
assert_vtk(vtk_string_array1, vtk_string_array1)
with pytest.raises(pytest.fail.Exception) as excinfo:
assert_vtk(vtk_string_array1, vtk_string_array2)
| StarcoderdataPython |
6563918 | <reponame>jekel/gino
from .api import Gino # NOQA
from .engine import GinoEngine, GinoConnection # NOQA
from .exceptions import * # NOQA
from .strategies import GinoStrategy # NOQA
def create_engine(*args, **kwargs):
from sqlalchemy import create_engine
kwargs.setdefault('strategy', 'gino')
return create_engine(*args, **kwargs)
__version__ = '0.7.5'
| StarcoderdataPython |
4816461 | <reponame>memolp/U3M8DLoader
# -*- coding:utf-8 -*-
import u3m8
# cctv6
u3m8_data = u3m8.from_url("http://ivi.bupt.edu.cn/hls/cctv6hd.m3u8")
if u3m8_data:
u3m8.download(u3m8_data.get_ts_urls(), root="http://ivi.bupt.edu.cn/hls/", comb=True)
| StarcoderdataPython |
11267109 | <gh_stars>10-100
'''
Tests for central dogma submodule of reaction module.
'''
from nose.tools import assert_equal, assert_raises
from coral import reaction, DNA, Peptide, RNA
def test_transcription():
test_dna = DNA('ATGATGGGCAGTGTCGAATTAAATCTGCGTGAGACAGAATTGTGTT' +
'TGGGACTACCAGGCGGTGATACAGTTGCACCAGTAACAGGAAACAA' +
'AAGAGGATTCTCTGAAACAGTAGATTTGAAACTTAATTTGAACAAT' +
'GAGCCAGCCAACAAGGAAGGTTCCACCACTCATGACGTCGTCACAT' +
'TTGATAGTAAAGAAAAGAGTGCGTGTCCAAAAGATCCAGCTAAGCC' +
'ACCTGCCAAGGCTCAAGTCGTCGGATGGCCACCTGTGAGATCTTAT' +
'AGAAAGAACGTAATGGTTTCTTGTCAGAAGTCCAGTGGTGGTCCTG' +
'AAGCAGCGGCTtgaaaa')
reference_rna = RNA('AUGAUGGGCAGUGUCGAAUUAAAUCUGCGUGAGACAGAAUU' +
'GUGUUUGGGACUACCAGGCGGUGAUACAGUUGCACCAGUAA' +
'CAGGAAACAAAAGAGGAUUCUCUGAAACAGUAGAUUUGAAA' +
'CUUAAUUUGAACAAUGAGCCAGCCAACAAGGAAGGUUCCAC' +
'CACUCAUGACGUCGUCACAUUUGAUAGUAAAGAAAAGAGUG' +
'CGUGUCCAAAAGAUCCAGCUAAGCCACCUGCCAAGGCUCAA' +
'GUCGUCGGAUGGCCACCUGUGAGAUCUUAUAGAAAGAACGU' +
'AAUGGUUUCUUGUCAGAAGUCCAGUGGUGGUCCUGAAGCAG' +
'CGGCUugaaaa')
# Basic transcription should work
transcription_output = reaction.transcribe(test_dna)
assert_equal(transcription_output, reference_rna)
# Coding RNA should exclude anything after a stop codon
coding_rna_output = reaction.coding_sequence(transcription_output)
assert_equal(coding_rna_output, reference_rna[:-3])
# Should fail is sequence lacks start codon or stop codon
assert_raises(ValueError, reaction.coding_sequence,
reaction.transcribe(DNA('aaatag')))
assert_raises(ValueError, reaction.coding_sequence,
reaction.transcribe(DNA('atgaaa')))
def test_translation():
test_rna = RNA('AUGAUGGGCAGUGUCGAAUUAAAUCUGCGUGAGACAGAAUU' +
'GUGUUUGGGACUACCAGGCGGUGAUACAGUUGCACCAGUAA' +
'CAGGAAACAAAAGAGGAUUCUCUGAAACAGUAGAUUUGAAA' +
'CUUAAUUUGAACAAUGAGCCAGCCAACAAGGAAGGUUCCAC' +
'CACUCAUGACGUCGUCACAUUUGAUAGUAAAGAAAAGAGUG' +
'CGUGUCCAAAAGAUCCAGCUAAGCCACCUGCCAAGGCUCAA' +
'GUCGUCGGAUGGCCACCUGUGAGAUCUUAUAGAAAGAACGU' +
'AAUGGUUUCUUGUCAGAAGUCCAGUGGUGGUCCUGAAGCAG' +
'CGGCUugaaaa')
reference_peptide = Peptide('MMGSVELNLRETELCLGLPGGDTVAPVTGNK' +
'RGFSETVDLKLNLNNEPANKEGSTTHDVVTF' +
'DSKEKSACPKDPAKPPAKAQVVGWPPVRSYR' +
'KNVMVSCQKSSGGPEAAA')
# Basic transcription should work
translation_output = reaction.translate(test_rna)
assert_equal(translation_output, reference_peptide)
# Coding peptide should exclude anything after a stop codon
coding_rna = reaction.coding_sequence(test_rna)
coding_peptide = reaction.translate(coding_rna)
assert_equal(coding_peptide, reference_peptide)
def test_reverse_transcription():
test_rna = RNA('AUGAUGGGCAGUGUCGAAUUAAAUCUGCGUGAGACAGAAUU' +
'GUGUUUGGGACUACCAGGCGGUGAUACAGUUGCACCAGUAA' +
'CAGGAAACAAAAGAGGAUUCUCUGAAACAGUAGAUUUGAAA' +
'CUUAAUUUGAACAAUGAGCCAGCCAACAAGGAAGGUUCCAC' +
'CACUCAUGACGUCGUCACAUUUGAUAGUAAAGAAAAGAGUG' +
'CGUGUCCAAAAGAUCCAGCUAAGCCACCUGCCAAGGCUCAA' +
'GUCGUCGGAUGGCCACCUGUGAGAUCUUAUAGAAAGAACGU' +
'AAUGGUUUCUUGUCAGAAGUCCAGUGGUGGUCCUGAAGCAG' +
'CGGCUugaaaa')
ref_dna = DNA('ATGATGGGCAGTGTCGAATTAAATCTGCGTGAGACAGAATTGTGTT' +
'TGGGACTACCAGGCGGTGATACAGTTGCACCAGTAACAGGAAACAA' +
'AAGAGGATTCTCTGAAACAGTAGATTTGAAACTTAATTTGAACAAT' +
'GAGCCAGCCAACAAGGAAGGTTCCACCACTCATGACGTCGTCACAT' +
'TTGATAGTAAAGAAAAGAGTGCGTGTCCAAAAGATCCAGCTAAGCC' +
'ACCTGCCAAGGCTCAAGTCGTCGGATGGCCACCTGTGAGATCTTAT' +
'AGAAAGAACGTAATGGTTTCTTGTCAGAAGTCCAGTGGTGGTCCTG' +
'AAGCAGCGGCTtgaaaa')
# Basic transcription should work
r_transcription = reaction.reverse_transcribe(test_rna)
assert_equal(r_transcription, ref_dna)
| StarcoderdataPython |
6487680 | <reponame>iamovrhere/lpthw<filename>py2/ex1.py
# Inline comment.
"""
Block comment. Albeit also used as documentation blocks, I believe.
"""
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "Well, maybe. I'm probably going to go off script."
print 'Yay! Printing.'
print "I'd much rather you 'not'"
print 'I\'m guessing this the "quotes" section of book?'
| StarcoderdataPython |
5150388 | #!/usr/bin/env python
from pgmpy.utils.mathext import powerset
from pgmpy.base import UndirectedGraph
from pgmpy.models import BayesianModel
from pgmpy.estimators import StructureEstimator, HillClimbSearch, BDeuScore
from pgmpy.independencies import Independencies, IndependenceAssertion
from pgmpy.estimators.CITests import chi_square
class MmhcEstimator(StructureEstimator):
def __init__(self, data, **kwargs):
"""
Implements the MMHC hybrid structure estimation procedure for
learning BayesianModels from discrete data.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
Reference
---------
Tsamardinos et al., The max-min hill-climbing Bayesian network structure learning algorithm (2005)
http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf
"""
super(MmhcEstimator, self).__init__(data, **kwargs)
def estimate(self, scoring_method=None, tabu_length=10, significance_level=0.01):
"""
Estimates a BayesianModel for the data set, using MMHC. First estimates a
graph skeleton using MMPC and then orients the edges using score-based local
search (hill climbing).
Parameters
----------
significance_level: float, default: 0.01
The significance level to use for conditional independence tests in the data set. See `mmpc`-method.
scoring_method: instance of a Scoring method (default: BDeuScore)
The method to use for scoring during Hill Climb Search. Can be an instance of any of the
scoring methods implemented in pgmpy.
tabu_length: int
If provided, the last `tabu_length` graph modifications cannot be reversed
during the search procedure. This serves to enforce a wider exploration
of the search space. Default value: 100.
Returns
-------
model: BayesianModel()-instance, not yet parametrized.
Reference
---------
Tsamardinos et al., The max-min hill-climbing Bayesian network structure learning algorithm (2005),
Algorithm 3
http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import PC
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(2500, 4)), columns=list('XYZW'))
>>> data['sum'] = data.sum(axis=1)
>>> est = MmhcEstimator(data)
>>> model = est.estimate()
>>> print(model.edges())
[('Z', 'sum'), ('X', 'sum'), ('W', 'sum'), ('Y', 'sum')]
"""
if scoring_method is None:
scoring_method = BDeuScore(self.data, equivalent_sample_size=10)
skel = self.mmpc(significance_level)
hc = HillClimbSearch(self.data, scoring_method=scoring_method)
model = hc.estimate(
white_list=skel.to_directed().edges(), tabu_length=tabu_length
)
return model
def mmpc(self, significance_level=0.01):
"""Estimates a graph skeleton (UndirectedGraph) for the data set, using then
MMPC (max-min parents-and-children) algorithm.
Parameters
----------
significance_level: float, default=0.01
The significance level to use for conditional independence tests in the data set.
`significance_level` is the desired Type 1 error probability of
falsely rejecting the null hypothesis that variables are independent,
given that they are. The lower `significance_level`, the less likely
we are to accept dependencies, resulting in a sparser graph.
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
seperating_sets: dict
A dict containing for each pair of not directly connected nodes a
seperating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation)
References
----------
Tsamardinos et al., The max-min hill-climbing Bayesian network structure
learning algorithm (2005), Algorithm 1 & 2
http://www.dsl-lab.org/supplements/mmhc_paper/paper_online.pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import PC
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 5)), columns=list('ABCDE'))
>>> data['F'] = data['A'] + data['B'] + data ['C']
>>> est = PC(data)
>>> skel, sep_sets = est.estimate_skeleton()
>>> skel.edges()
[('A', 'F'), ('B', 'F'), ('C', 'F')]
>>> # all independencies are unconditional:
>>> sep_sets
{('D', 'A'): (), ('C', 'A'): (), ('C', 'E'): (), ('E', 'F'): (), ('B', 'D'): (),
('B', 'E'): (), ('D', 'F'): (), ('D', 'E'): (), ('A', 'E'): (), ('B', 'A'): (),
('B', 'C'): (), ('C', 'D'): ()}
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 3)), columns=list('XYZ'))
>>> data['X'] += data['Z']
>>> data['Y'] += data['Z']
>>> est = PC(data)
>>> skel, sep_sets = est.estimate_skeleton()
>>> skel.edges()
[('X', 'Z'), ('Y', 'Z')]
>>> # X, Y dependent, but conditionally independent given Z:
>>> sep_sets
{('X', 'Y'): ('Z',)}
"""
nodes = self.state_names.keys()
def assoc(X, Y, Zs):
"""Measure for (conditional) association between variables. Use negative
p-value of independence test.
"""
return 1 - chi_square(X, Y, Zs, self.data, boolean=False)[1]
def min_assoc(X, Y, Zs):
"Minimal association of X, Y given any subset of Zs."
return min(assoc(X, Y, Zs_subset) for Zs_subset in powerset(Zs))
def max_min_heuristic(X, Zs):
"Finds variable that maximizes min_assoc with `node` relative to `neighbors`."
max_min_assoc = 0
best_Y = None
for Y in set(nodes) - set(Zs + [X]):
min_assoc_val = min_assoc(X, Y, Zs)
if min_assoc_val >= max_min_assoc:
best_Y = Y
max_min_assoc = min_assoc_val
return (best_Y, max_min_assoc)
# Find parents and children for each node
neighbors = dict()
for node in nodes:
neighbors[node] = []
# Forward Phase
while True:
new_neighbor, new_neighbor_min_assoc = max_min_heuristic(
node, neighbors[node]
)
if new_neighbor_min_assoc > 0:
neighbors[node].append(new_neighbor)
else:
break
# Backward Phase
for neigh in neighbors[node]:
other_neighbors = [n for n in neighbors[node] if n != neigh]
for sep_set in powerset(other_neighbors):
if chi_square(
X=node,
Y=neigh,
Z=sep_set,
data=self.data,
significance_level=significance_level,
):
neighbors[node].remove(neigh)
break
# correct for false positives
for node in nodes:
for neigh in neighbors[node]:
if node not in neighbors[neigh]:
neighbors[node].remove(neigh)
skel = UndirectedGraph()
skel.add_nodes_from(nodes)
for node in nodes:
skel.add_edges_from([(node, neigh) for neigh in neighbors[node]])
return skel
| StarcoderdataPython |
214008 | <filename>zstats/statstests.py
import unittest
from zstats.stats import *
data0 = [1.0, 2.0, 3.0, 4.0, 5.0]
data2 = [1.0, 2.0, 2.0, 4.0, 5.0]
class TestStats(unittest.TestCase):
def test_zcount(self):
self.assertEqual(zcount(data2), 5)
def test_mean(self):
self.assertEqual(zmean(data0), 3)
def test_mode(self):
self.assertEqual(zmode(data2), 2.0)
def test_median(self):
self.assertEqual(zmedian(data2), 2.0)
def test_median(self):
self.assertEqual(zmedian(data0), 3.0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1868952 | """
Created on 25 Aug, 2014
@author: <NAME>
"""
class ContentType(object):
WEB_PAGE = 1
TEXT = 2
IMAGE = 3
LINK = 4
HTML = 5
JSON = 6
TEMPLATE = 9
VALUE_TO_NAME = dict((v,k) for k,v in locals().items() if not k.startswith('_'))
NAME_TO_VALUE = dict((value,key) for key,value in VALUE_TO_NAME.items())
class VisibleType(object):
INVISIBLE = 0
VISIBLE = 1
VALUE_TO_NAME = dict((v,k) for k,v in locals().items() if not k.startswith('_'))
NAME_TO_VALUE = dict((value,key) for key,value in VALUE_TO_NAME.items())
class JobType(object):
FUTURE_UPDATE = 0
VALUE_TO_NAME = dict((v,k) for k,v in locals().items() if not k.startswith('_'))
NAME_TO_VALUE = dict((value,key) for key,value in VALUE_TO_NAME.items())
class JobStatus(object):
WAITING = 0
FINISHED = 1
VALUE_TO_NAME = dict((v,k) for k,v in locals().items() if not k.startswith('_'))
NAME_TO_VALUE = dict((value,key) for key,value in VALUE_TO_NAME.items())
| StarcoderdataPython |
11300292 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import pandas as pd
#import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from datetime import datetime
from sklearn.externals import joblib
#print (sys.argv[1])
#print (sys.argv[2])
#nltk.download()
global inputfile
inputfile =pd.read_csv(r"D:\svnTicketDispatcher\Inex Ticket Dispatcher\data\test_data Anapalara\excel\Inex_test_data_1.csv", encoding='ISO-8859-1')
eng_stopwords = set(stopwords.words('english'))
#print(feature_train)
CountVectorizer = CountVectorizer(encoding='ISO-8859-1', stop_words='english')
feature_train = joblib.load(r'D:\svnTicketDispatcher\feature_train.sav')
xtrainCounts = CountVectorizer.fit_transform(feature_train)
print(xtrainCounts)
print(datetime.now())
print("Starting TFIDF...")
tfidf = TfidfTransformer()
xtrainTfidf = tfidf.fit_transform(xtrainCounts)
# ## SVM
print(datetime.now())
print("Starting Training...")
clfSVM = joblib.load(r'D:\svnTicketDispatcher\svm_mode.sav')
print(datetime.now())
print("Starting Prediction...")
global ft
ft = inputfile
ft = ft.replace(np.nan,"", regex=True)
ft1= ft['TICKET_PROBLEM_SUMMARY']
ft2= ft['Assigned Group*+']
ft3= ft['Incident ID*+']
xNewCounts = CountVectorizer.transform(ft1)
#print(xNewCounts)
yNewCounts = tfidf.fit_transform(xNewCounts)
#print(yNewCounts)
predictedSVM = clfSVM.predict(yNewCounts)
class_probabilities = clfSVM.predict_proba(yNewCounts) * 100
print(datetime.now())
print("Finished Prediction...")
outputfileSVM = r"D:\svnTicketDispatcher\Output_SVM.csv"
out_df = pd.DataFrame({"Ticket":ft3, "TICKET_PROBLEM_SUMMARY":ft1, "Assigned Group*+":ft2, "Assignment Group ML":predictedSVM, "Confidence Probability%":(class_probabilities.max(axis=1))})
column_order = ["Ticket","TICKET_PROBLEM_SUMMARY","Assigned Group*+","Assignment Group ML","Confidence Probability%"]
out_df[column_order].to_csv(outputfileSVM, index=False)
| StarcoderdataPython |
1852574 | # Copyright (c) 2015 <NAME> <<EMAIL>>
from setuptools import setup, find_packages
setup(name='mogul.locale',
version="0.1",
description="""mogul""",
# long_description=open('README.txt').read(),
author='<NAME>',
author_email='<EMAIL>',
url="http://www.sffjunkie.co.uk/python-mogul.html",
license='Apache-2.0',
package_dir={'': 'src'},
packages=['mogul.locale'],
namespace_packages=['mogul',],
package_data = {'mogul.locale': ['bcp47_registry.utf8']},
)
| StarcoderdataPython |
3561841 | <filename>architecture_tool_django/nodes/urls.py
from django.urls import path
from architecture_tool_django.nodes import views
app_name = "nodes"
urlpatterns = [
path("nodes/", views.NodeListView.as_view(), name="node.list"),
path("nodes/new/", views.newnode, name="node.new"),
path(
"nodes/<str:pk>/",
views.NodeDetailView.as_view(),
name="node.detail",
),
path(
"nodes/<str:pk>/update/",
views.update_node,
name="node.update",
),
path(
"nodes/<str:pk>/edit/",
views.edit_node,
name="node.edit",
),
path(
"nodes/<str:pk>/delete/",
views.NodeDeleteView.as_view(),
name="node.delete",
),
]
| StarcoderdataPython |
45377 | class UnshortenerOld():
"""
Todo option selenium ?
"""
def __init__(self,
logger=None,
verbose=True,
maxItemCount=100000000,
maxDataSizeMo=10000,
dataDir=None,
seleniumBrowserCount=20,
resetBrowsersRate=0.1,
useSelenium=False,
seleniumBrowsersIsNice=True,
storeAll=False,
readOnly=False,
shortenersDomainsFilePath=None):
self.readOnly = readOnly
self.seleniumBrowsersIsNice = seleniumBrowsersIsNice
self.resetBrowsersRate = resetBrowsersRate
self.seleniumBrowserCount = seleniumBrowserCount
self.shortenersDomainsFilePath = shortenersDomainsFilePath
if self.shortenersDomainsFilePath is None:
self.shortenersDomainsFilePath = getDataDir() + "/Misc/crawling/shorteners.txt"
self.useSelenium = useSelenium
self.storeAll = storeAll
self.maxDataSizeMo = maxDataSizeMo
self.maxItemCount = maxItemCount
self.dataDir = dataDir
if self.dataDir is None:
self.dataDir = getDataDir() + "/Misc/crawling/"
self.fileName = "unshortener-database"
self.urlParser = URLParser()
self.requestCounter = 0
self.verbose = verbose
self.logger = logger
self.data = SerializableDict \
(
self.dataDir, self.fileName,
cleanMaxSizeMoReadModifiedOlder=self.maxDataSizeMo,
limit=self.maxItemCount,
serializeEachNAction=20,
verbose=True
)
self.shortenersDomains = None
self.initShortenersDomains()
self.browsers = None
def initShortenersDomains(self):
if self.shortenersDomains is None:
shorteners = fileToStrList(self.shortenersDomainsFilePath, removeDuplicates=True)
newShorteners = []
for current in shorteners:
current = current.lower()
newShorteners.append(current)
shorteners = newShorteners
self.shortenersDomains = set()
for current in shorteners:
newCurrent = self.urlParser.getDomain(current)
self.shortenersDomains.add(newCurrent)
self.shortenersDomains = list(self.shortenersDomains)
# We filter all by presence of a point:
newShortenersDomains= []
for current in self.shortenersDomains:
if "." in current:
newShortenersDomains.append(current)
self.shortenersDomains = newShortenersDomains
def getUnshortenersDomains(self):
return self.shortenersDomains
def close(self):
self.data.close()
def isShortener(self, url):
smartDomain = self.urlParser.getDomain(url)
return smartDomain in self.shortenersDomains
def isStatusCodeOk(self, statusCode):
if isinstance(statusCode, dict) and dictContains(dict, "statusCode"):
statusCode = statusCode["statusCode"]
return statusCode == 200
def generateSeleniumBrowsers(self):
# We have to reset browsers sometimes because it can take a lot of RAM:
if self.browsers is None or getRandomFloat() < self.resetBrowsersRate:
if self.browsers is not None:
for browser in self.browsers:
browser.quit()
self.browsers = []
def generateRandomBrowser(proxy):
self.browsers.append(Browser(driverType=DRIVER_TYPE.phantomjs, proxy=proxy))
allThreads = []
for i in range(self.seleniumBrowserCount):
theThread = Thread(target=generateRandomBrowser, args=(getRandomProxy(),))
theThread.start()
allThreads.append(theThread)
for theThread in allThreads:
theThread.join()
def getRandomSeleniumBrowser(self):
return random.choice(self.browsers)
def unshort(self, url, force=False, useProxy=True, timeout=20, retryIf407=True):
"""
force as False will check if the given url have to match with a known shorter service
force as True will give the last url for the request...
"""
url = self.urlParser.normalize(url)
smartDomain = self.urlParser.getDomain(url)
if not force and smartDomain not in self.shortenersDomains:
result = \
{
"force": force,
"url": url,
"message": "The domain is not a shortener service!",
"status": -1,
}
return result
if self.data.hasKey(url):
log(url + " was in the Unshortener database!", self)
return self.data.get(url)
elif self.readOnly:
result = \
{
"force": force,
"url": url,
"message": "The url is not in the database and the unshortener was set as read only.",
"status": -2,
}
return result
else:
log("Trying to unshort " + url, self)
proxy = None
if useProxy:
proxy = getRandomProxy()
seleniumFailed = False
if self.useSelenium:
self.generateSeleniumBrowsers()
browser = self.getRandomSeleniumBrowser()
result = browser.html(url)
if result["status"] == REQUEST_STATUS.refused or \
result["status"] == REQUEST_STATUS.timeout:
seleniumFailed = True
logError("Selenium failed to get " + url + "\nTrying with a HTTPBrowser...", self)
else:
result = convertBrowserResponse(result, browser, nice=self.seleniumBrowsersIsNice)
if not self.useSelenium or seleniumFailed:
httpBrowser = HTTPBrowser(proxy=proxy, logger=self.logger, verbose=self.verbose)
result = httpBrowser.get(url)
result["force"] = force
if self.storeAll or result["status"] == 200 or result["status"] == 404:
self.data.add(url, result)
del result["html"]
log("Unshort of " + result["url"] + " : " + str(result["status"]), self)
return result
| StarcoderdataPython |
8122012 | <reponame>linuxbender/ai-programming-python
# imports
import argparse
import numpy as np
from PIL import Image
import torch
import json
import torch.nn.functional as F
from torchvision import transforms
from appHelper import gpuSupport, getModelClassifier, getModelSizeFromArch
# app logo
appLogo = """\
_ _ _
| (_) | |
__ _ _ __ _ __ _ __ _ __ ___ __| |_ ___| |_
/ _` | '_ \| '_ \ | '_ \| '__/ _ \/ _` | |/ __| __|
| (_| | |_) | |_) | | |_) | | | __/ (_| | | (__| |_
\__,_| .__/| .__/ | .__/|_| \___|\__,_|_|\___|\__|
| | | | | |
|_| |_| |_|
"""
def readInputArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--imagePath', dest='imagePath', default='./flowers/test/74/image_01254.jpg')
parser.add_argument('--loadCategoryFile', dest='loadCategoryFile', default='cat_to_name.json')
parser.add_argument('--trainedModelName', action='store', default='appCheckpoint.pth')
parser.add_argument('--topK', dest='topK', type=int, default=5)
parser.add_argument('--gpu', dest='gpu', type=bool, default=True)
return parser.parse_args()
# load category file
def loadCategoryFile(fileName):
with open(fileName) as f:
catToName = json.load(f)
return catToName
# process image input
def process_image(image):
img_pil = Image.open(image)
adjustments = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return adjustments(img_pil)
# load model from checkPoint
def loadCheckpointModel(checkpointName):
print('load checkpoint file :' + str(checkpointName))
# load data
checkpoint = torch.load(checkpointName)
# get model
model = checkpoint['model']
# set classifier
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
# convert tensor to array
def convertTensorToStringArray(tensor):
stringList = []
for i in tensor.cpu().numpy():
item = np.char.mod('%d',[i])
stringList = item[0]
return stringList;
# predict from trained model
def predict(imagePath, model, topk, gpuActiv):
if gpuActiv:
model.cuda()
model.to('cuda')
else:
model.cpu()
img_torch = process_image(imagePath)
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
output = model.forward(img_torch.cuda())
probability = F.softmax(output.data,dim=1)
probs, classes = probability.topk(topk)
return probs.cpu().numpy()[0], convertTensorToStringArray(classes)
# print result from the model
def printResult(probs, classes, catToName):
print('\nResults from the model are:')
print('-----------------------------')
print(probs)
print(classes)
print([catToName[x] for x in classes])
print('-----------------------------')
maxIndex = np.argmax(probs)
label = classes[maxIndex]
print('Maybe your image is a: ' + catToName[label] + '\n')
print('\nthx and bye bye...\n')
# main process
def main():
print(appLogo)
# read args
args = readInputArgs()
# load model
model = loadCheckpointModel(args.trainedModelName)
# load category
catToName = loadCategoryFile(args.loadCategoryFile)
# check gpu support
gpuActiv = gpuSupport(args.gpu)
# predict
probs, classes = predict(args.imagePath, model, args.topK, gpuActiv)
# print result
printResult(probs, classes, catToName)
if __name__ == "__main__":
main() | StarcoderdataPython |
1903426 | <reponame>thinkAmi-sandbox/python_zeep-sample<filename>wsdl_formatter/format_wsdl.py
import pathlib
read_file = pathlib.Path('./wsdl.txt')
with read_file.open(mode='r') as r:
f = r.read()
formatted = f.split(',')
write_file = pathlib.Path('./formatted.txt')
with write_file.open(mode='w') as w:
for f in formatted:
w.write(f'{f.strip()}\n')
| StarcoderdataPython |
22337 | import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
from torchvision.models import resnet101
import pytorch_lightning as pl
from model.AEINet import ADDGenerator, MultilevelAttributesEncoder
from model.MultiScaleDiscriminator import MultiscaleDiscriminator
from model.loss import GANLoss, AEI_Loss
from dataset import *
class AEINet(pl.LightningModule):
def __init__(self, hp):
super(AEINet, self).__init__()
self.hp = hp
self.G = ADDGenerator(hp.arcface.vector_size)
self.E = MultilevelAttributesEncoder()
self.D = MultiscaleDiscriminator(3)
self.Z = resnet101(num_classes=256)
self.Z.load_state_dict(torch.load(hp.arcface.chkpt_path, map_location='cpu'))
self.Loss_GAN = GANLoss()
self.Loss_E_G = AEI_Loss()
def forward(self, target_img, source_img):
z_id = self.Z(F.interpolate(source_img, size=112, mode='bilinear'))
z_id = F.normalize(z_id)
z_id = z_id.detach()
feature_map = self.E(target_img)
output = self.G(z_id, feature_map)
output_z_id = self.Z(F.interpolate(output, size=112, mode='bilinear'))
output_z_id = F.normalize(output_z_id)
output_feature_map = self.E(output)
return output, z_id, output_z_id, feature_map, output_feature_map
def training_step(self, batch, batch_idx, optimizer_idx):
target_img, source_img, same = batch
if optimizer_idx == 0:
output, z_id, output_z_id, feature_map, output_feature_map = self(target_img, source_img)
self.generated_img = output
output_multi_scale_val = self.D(output)
loss_GAN = self.Loss_GAN(output_multi_scale_val, True, for_discriminator=False)
loss_E_G, loss_att, loss_id, loss_rec = self.Loss_E_G(target_img, output, feature_map, output_feature_map, z_id,
output_z_id, same)
loss_G = loss_E_G + loss_GAN
self.logger.experiment.add_scalar("Loss G", loss_G.item(), self.global_step)
self.logger.experiment.add_scalar("Attribute Loss", loss_att.item(), self.global_step)
self.logger.experiment.add_scalar("ID Loss", loss_id.item(), self.global_step)
self.logger.experiment.add_scalar("Reconstruction Loss", loss_rec.item(), self.global_step)
self.logger.experiment.add_scalar("GAN Loss", loss_GAN.item(), self.global_step)
return loss_G
else:
multi_scale_val = self.D(target_img)
output_multi_scale_val = self.D(self.generated_img.detach())
loss_D_fake = self.Loss_GAN(multi_scale_val, True)
loss_D_real = self.Loss_GAN(output_multi_scale_val, False)
loss_D = loss_D_fake + loss_D_real
self.logger.experiment.add_scalar("Loss D", loss_D.item(), self.global_step)
return loss_D
def validation_step(self, batch, batch_idx):
target_img, source_img, same = batch
output, z_id, output_z_id, feature_map, output_feature_map = self(target_img, source_img)
self.generated_img = output
output_multi_scale_val = self.D(output)
loss_GAN = self.Loss_GAN(output_multi_scale_val, True, for_discriminator=False)
loss_E_G, loss_att, loss_id, loss_rec = self.Loss_E_G(target_img, output, feature_map, output_feature_map,
z_id, output_z_id, same)
loss_G = loss_E_G + loss_GAN
return {"loss": loss_G, 'target': target_img[0].cpu(), 'source': source_img[0].cpu(), "output": output[0].cpu(), }
def validation_end(self, outputs):
loss = torch.stack([x["loss"] for x in outputs]).mean()
validation_image = []
for x in outputs:
validation_image = validation_image + [x['target'], x['source'], x["output"]]
validation_image = torchvision.utils.make_grid(validation_image, nrow=3)
self.logger.experiment.add_scalar("Validation Loss", loss.item(), self.global_step)
self.logger.experiment.add_image("Validation Image", validation_image, self.global_step)
return {"loss": loss, "image": validation_image, }
def configure_optimizers(self):
lr_g = self.hp.model.learning_rate_E_G
lr_d = self.hp.model.learning_rate_D
b1 = self.hp.model.beta1
b2 = self.hp.model.beta2
opt_g = torch.optim.Adam(list(self.G.parameters()) + list(self.E.parameters()), lr=lr_g, betas=(b1, b2))
opt_d = torch.optim.Adam(self.D.parameters(), lr=lr_d, betas=(b1, b2))
return [opt_g, opt_d], []
def train_dataloader(self):
# transforms.Resize((256, 256)),
# transforms.CenterCrop((256, 256)),
transform = transforms.Compose([
transforms.ToTensor(),
])
dataset = AEI_Dataset(self.hp.data.dataset_dir, transform=transform)
return DataLoader(dataset, batch_size=self.hp.model.batch_size, num_workers=self.hp.model.num_workers, shuffle=True, drop_last=True)
def val_dataloader(self):
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop((256, 256)),
transforms.ToTensor(),
])
dataset = AEI_Val_Dataset(self.hp.data.valset_dir, transform=transform)
return DataLoader(dataset, batch_size=1, shuffle=False)
| StarcoderdataPython |
6635221 | <filename>mvlearn/embed/cca.py
"""Canonical Correlation Analysis"""
# Authors: <NAME>, <NAME>
# License: MIT
import numpy as np
import numbers
from scipy.stats import f, chi2
from sklearn.utils.validation import check_is_fitted
from .mcca import MCCA, _i_mcca, _mcca_gevp
from ..utils import check_Xs, param_as_list
class CCA(MCCA):
"""Canonical Correlation Analysis (CCA)
CCA inherits from MultiCCA (MCCA) but is restricted to 2 views which
allows for certain statistics to be computed about the results.
Parameters
----------
n_components : int (default 1)
Number of canonical components to compute and return.
regs : float | 'lw' | 'oas' | None, or list, optional (default None)
CCA regularization for each data view, which can be important
for high dimensional data. A list will specify for each view
separately. If float, must be between 0 and 1 (inclusive).
- 0 or None : corresponds to SUMCORR-AVGVAR MCCA.
- 1 : partial least squares SVD (generalizes to more than 2 views)
- 'lw' : Default ``sklearn.covariance.ledoit_wolf`` regularization
- 'oas' : Default ``sklearn.covariance.oas`` regularization
signal_ranks : int, None or list, optional (default None)
The initial signal rank to compute. If None, will compute the full SVD.
A list will specify for each view separately.
center : bool, or list (default True)
Whether or not to initially mean center the data. A list will specify
for each view separately.
i_mcca_method : 'auto' | 'svd' | 'gevp' (default 'auto')
Whether or not to use the SVD based method (only works with no
regularization) or the gevp based method for informative MCCA.
multiview_output : bool, optional (default True)
If True, the ``.transform`` method returns one dataset per view.
Otherwise, it returns one dataset, of shape (n_samples, n_components)
Attributes
----------
means_ : list of numpy.ndarray
The means of each view, each of shape (n_features,)
loadings_ : list of numpy.ndarray
The loadings for each view used to project new data,
each of shape (n_features_b, n_components).
common_score_norms_ : numpy.ndarray, shape (n_components,)
Column norms of the sum of the fitted view scores.
Used for projecting new data
evals_ : numpy.ndarray, shape (n_components,)
The generalized eigenvalue problem eigenvalues.
n_views_ : int
The number of views
n_features_ : list
The number of features in each fitted view
n_components_ : int
The number of components in each transformed view
See also
--------
MCCA, KMCCA
References
----------
.. [#1cca] <NAME>., "Canonical Analysis of Several Sets of
Variables." Biometrika, 58:433-451, (1971)
.. [#2cca] <NAME>., et al. "Regularized generalized canonical
correlation analysis." Psychometrika, 76:257–284, 2011
Examples
--------
>>> from mvlearn.embed import CCA
>>> X1 = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> X2 = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA()
>>> cca.fit([X1, X2])
CCA()
>>> Xs_scores = cca.transform([X1, X2])
"""
def _fit(self, Xs):
"""Helper function for the `.fit` function"""
Xs, self.n_views_, _, self.n_features_ = check_Xs(
Xs, return_dimensions=True
)
if self.n_views_ != 2:
raise ValueError(
f"CCA accepts exactly 2 views but {self.n_views_}"
"were provided. Consider using MCCA for more than 2 views")
if not (isinstance(self.n_components, numbers.Integral) and
1 <= self.n_components <= min(self.n_features_)):
raise ValueError(
"n_components must be an integer in the range"
f"[1, {min(self.n_features_)}]")
centers = param_as_list(self.center, self.n_views_)
self.means_ = [np.mean(X, axis=0) if c else None
for X, c in zip(Xs, centers)]
Xs = [X - m if m is not None else X for X, m in zip(Xs, self.means_)]
if self.signal_ranks is not None:
self.loadings_, scores, common_scores_normed, \
self.common_score_norms_, self.evals_ = _i_mcca(
Xs,
signal_ranks=self.signal_ranks,
n_components=self.n_components,
regs=self.regs,
method=self.i_mcca_method,
)
else:
self.loadings_, scores, common_scores_normed, \
self.common_score_norms_, self.evals_ = _mcca_gevp(
Xs,
n_components=self.n_components,
regs=self.regs
)
return scores, common_scores_normed
def stats(self, scores, stat=None):
r"""
Compute relevant statistics from the fitted CCA.
Parameters
----------
scores: array-like, shape (2, n_samples, n_components)
The CCA scores.
stat : str, optional (default None)
The statistic to return. If None, returns a dictionary of all
statistics. Otherwise, specifies one of the following statistics
- 'r' : numpy.ndarray of shape (n_components,)
Canonical correlations of each component.
- 'Wilks' : numpy.ndarray of shape (n_components,)
Wilks' Lambda likelihood ratio statistic.
- 'df1' : numpy.ndarray of shape (n_components,)
Degrees of freedom for the chi-squared statistic, and
the numerator degrees of freedom for the F statistic.
- 'df2' : numpy.ndarray of shape (n_components,)
Denominator degrees of freedom for the F statistic.
- 'F' : numpy.ndarray of shape (n_components,)
Rao's approximate F statistic for H_0(k).
- 'pF' : numpy.ndarray of shape (n_components,)
Right-tail pvalue for stats['F'].
- 'chisq' : numpy.ndarray of shape (n_components,)
Bartlett's approximate chi-squared statistic for H_0(k)
with Lawley's modification.
- 'pChisq' : numpy.ndarray of shape (n_components,)
Right-tail pvalue for stats['chisq'].
Returns
-------
stats : dict or numpy.ndarray
Dict containing the statistics with keys specified above or
one of the statistics if specified by the `stat` parameter.
"""
check_is_fitted(self)
scores = check_Xs(scores, enforce_views=2)
S1, S2 = scores
assert S1.shape[1] == S2.shape[1], \
"Scores from each view must have the same number of components."
n_components = S1.shape[1]
stats = {}
# pearson correlation coefficient
r = self.canon_corrs(scores)
stats['r'] = r
r = r.squeeze()
# Wilks' Lambda test statistic
d = min([n_components, min(self.n_features_)])
k = np.arange(d)
rank1_k = self.n_features_[0] - k
rank2_k = self.n_features_[1] - k
if r.size > 1:
nondegen = np.argwhere(r < 1 - 2 * np.finfo(float).eps).squeeze()
elif r < 1 - 2 * np.finfo(float).eps:
nondegen = np.array(0, dtype=int)
else:
nondegen = np.array([], dtype=int)
log_lambda = np.NINF * np.ones(n_components,)
if nondegen.size > 0:
if r.size > 1:
log_lambda[nondegen] = np.cumsum(
(np.log(1 - r[nondegen]**2))[::-1])
log_lambda[nondegen] = log_lambda[nondegen][::-1]
else:
log_lambda[nondegen] = np.cumsum(
(np.log(1 - r**2)))
stats['Wilks'] = np.exp(log_lambda)
# Rao's approximation to F distribution.
# default value for cases where the exponent formula fails
s = np.ones(d,)
# cases where (d1k,d2k) not one of (1,2), (2,1), or (2,2)
okCases = np.argwhere(rank1_k*rank2_k > 2).squeeze()
snumer = rank1_k*rank1_k*rank2_k*rank2_k - 4
sdenom = rank1_k*rank1_k + rank2_k*rank2_k - 5
s[okCases] = np.sqrt(np.divide(snumer[okCases], sdenom[okCases]))
# Degrees of freedom for null hypothesis H_0k
stats['df1'] = rank1_k * rank2_k
stats['df2'] = (
S1.shape[0] - .5 * (self.n_features_[0] + self.n_features_[1] + 3)
) * s - (.5 * rank1_k * rank2_k) + 1
# Rao's F statistic
pow_lambda = stats['Wilks']**(1 / s)
ratio = np.inf * np.ones(d,)
ratio[nondegen] = ((1 - pow_lambda[nondegen]) / pow_lambda[nondegen])
stats['F'] = ratio * stats['df2'] / stats['df1']
# Right-tailed pvalue for Rao's F
stats['pF'] = 1 - f.cdf(stats['F'], stats['df1'], stats['df2'])
# Lawley's modification to Bartlett's chi-squared statistic
if r.size == 1:
r = np.array([r])
stats['chisq'] = -log_lambda * (
S1.shape[0] - k -
0.5 * (self.n_features_[0] + self.n_features_[1] + 3) +
np.cumsum(np.hstack((np.zeros(1,), 1 / r[: d-1]))**2))
# Right-tailed pvalue for the Lawley modification to Barlett
stats['pChisq'] = 1 - chi2.cdf(stats['chisq'], stats['df1'])
if stat is None:
return stats
else:
try:
return stats[stat]
except KeyError:
raise KeyError(f"Provided statistic {stat} must be one of"
" the statistics listed in the Parameters.")
| StarcoderdataPython |
9695300 | <gh_stars>0
def convert_headers_to_environ(headers):
"""
Converts HTTP headers into WSGI environ variables.
"""
return {
'HTTP_' + key.replace('-', '_').upper(): value.strip()
for key, value in headers.items()
}
| StarcoderdataPython |
259672 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
This is a plugin for the Sublime Text Editor
https://www.sublimetext.com/
Replace all occurences of the currently selected text in the document with an incrementing number.
Some options are provided:
* Start with an offset
* Use fixed number of digits (fill up with leading 0s)
* Define a preceding text in front of the iterator
'''
import sublime, sublime_plugin
import re
SETTINGS_FILE = "SimpleIncrementor.sublime-settings"
EXPHELP = '''Use key:value pairs separated by a blank character to pass options.
Valid Keys:
digits, offset, prectext, step
Example:
digits:5 offset:10
To re-show this dialogue, enable show_help in the Plugin Settings.
'''
def settings():
return sublime.load_settings(SETTINGS_FILE)
class SimpleIncrementExpertParseCommand(sublime_plugin.TextCommand):
''' Take the arguments from expert mode and create a dictionary from it to
call the main function
'''
def run(self, edit, cmd):
cmds = dict(re.findall(r'(\S+):(\S+)', cmd))
sublime.active_window().run_command('simple_increment', cmds)
#print(cmds)
class SimpleIncrementExpertCommand(sublime_plugin.TextCommand):
''' Get the user input for expert-mode execution '''
def run(self, edit):
shelp = False
if settings().has("show_help"):
shelp = settings().get("show_help")
if shelp:
sublime.message_dialog(EXPHELP)
settings().set("show_help", False)
sublime.save_settings(SETTINGS_FILE)
self.view.window().show_input_panel(
'Simple Incrementor - Expert Mode:',
'',
lambda x: sublime.active_window().run_command('simple_increment_expert_parse', {
'cmd': x
}),
None,
None)
class SimpleIncrementCommand(sublime_plugin.TextCommand):
''' The main component for doing the replacement '''
def run(self, edit, **kwargs):
offset = int(kwargs.get('offset', 0))
digits = int(kwargs.get('digits', 0))
step = int(kwargs.get('step', 1))
prectext = kwargs.get('prectext', '')
# Select all occurances of the selected text
sublime.active_window().run_command('find_all_under')
i = offset
cntr = 0
for occurance in self.view.sel():
self.view.replace(edit, occurance, prectext + str(i).zfill(digits))
i += step
cntr += 1
self.view.window().status_message('Replaced {} occurances'.format(cntr))
class SimpleIncrementDigitsCommand(sublime_plugin.TextCommand):
''' Fill up the left part with leading zeros to match the given number of digits '''
prectext = ''
def run(self, edit, prectext = ''):
self.prectext = prectext
self.view.window().show_input_panel(
'Simple Incrementor: How many total digits?',
'',
lambda x: sublime.active_window().run_command('simple_increment', {
'digits': x,
'prectext': self.prectext
}),
None,
None)
class SimpleIncrementPrectextCommand(sublime_plugin.TextCommand):
''' Get the preceding text from the user '''
def run(self, edit):
self.view.window().show_input_panel(
'Simple Incrementor: Preceding Text?',
'',
lambda x: sublime.active_window().run_command('simple_increment', {
'prectext': x
}),
None,
None)
class SimpleIncrementPrectextDigitsCommand(sublime_plugin.TextCommand):
''' Combination of preceding text and fill-up with leading zeros '''
def run(self, edit):
self.view.window().show_input_panel(
'Simple Incrementor: Preceding Text?',
'',
lambda x: sublime.active_window().run_command('simple_increment_digits', {
'prectext': x
}),
None,
None)
class SimpleIncrementOffsetCommand(sublime_plugin.TextCommand):
''' Start incrementation with an offset '''
def run(self, edit):
self.view.window().show_input_panel(
'Simple Incrementor: Offset?',
'',
lambda x: sublime.active_window().run_command('simple_increment', {
'offset': x
}),
None,
None)
| StarcoderdataPython |
316882 | <reponame>hashnfv/hashnfv-daisy<filename>tests/unit/test_libvirt_utils.py
##############################################################################
# Copyright (c) 2017 ZTE Corp and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import os
import sys
import pytest
import xml.etree.ElementTree as ET
from deploy.utils import WORKSPACE
import mock
sys.modules['libvirt'] = mock.Mock()
from deploy import libvirt_utils # noqa: ignore=E402
from deploy.libvirt_utils import (
get_nets_name,
modify_vm_boot_order,
create_virtual_disk,
modify_vm_name,
modify_vm_disk_file,
modify_vm_bridge,
create_vm,
reboot_vm,
get_disk_file,
# delete_vm_and_disk,
create_virtual_network,
# delete_virtual_network,
# get_vm_mac_addresses
) # noqa: ignore=E402
@pytest.mark.parametrize('template_name, exp', [
('templates/physical_environment/vms/daisy.xml', []),
('templates/virtual_environment/vms/daisy.xml', ['daisy1'])])
def test_get_nets_name(template_name, exp):
template = os.path.join(WORKSPACE, template_name)
tree = ET.ElementTree(file=template)
root = tree.getroot()
ret = get_nets_name(root)
assert ret == exp
def test_modify_vm_boot_order():
template = os.path.join(WORKSPACE, 'templates/virtual_environment/vms/daisy.xml')
tree = ET.ElementTree(file=template)
root = tree.getroot()
boot_devs = ['hd_test1', 'hd_test2']
modify_vm_boot_order(root, boot_devs)
os_elem = root.find('os')
boots = os_elem.findall('boot')
assert len(boots) == len(boot_devs)
for i in range(len(boots)):
assert boots[i].attrib['dev'] == boot_devs[i]
def test_modify_vm_name():
template = os.path.join(WORKSPACE, 'templates/physical_environment/vms/daisy.xml')
tree = ET.ElementTree(file=template)
root = tree.getroot()
vm_name = 'test_vm'
modify_vm_name(root, vm_name)
name_elem = root.find('./name')
assert name_elem.text == vm_name
def test_modify_vm_disk_file():
template = os.path.join(WORKSPACE, 'templates/physical_environment/vms/daisy.xml')
tree = ET.ElementTree(file=template)
root = tree.getroot()
disk_path1 = os.path.join('/home/qemu/vms', 'daisy_test1.qcow2')
disk_path2 = os.path.join('/home/qemu/vms', 'daisy_test2.qcow2')
disks_path = [disk_path1, disk_path2]
modify_vm_disk_file(root, disks_path)
devices = root.find('./devices')
disks = [disk for disk in devices.findall('disk') if disk.attrib['device'] == 'disk']
assert len(disks) == len(disks_path)
for i in range(len(disks)):
assert disks[i].attrib['type'] == 'file'
driver = disks[i].find('driver')
assert driver.attrib['name'] == 'qemu' and driver.attrib['type'] == 'qcow2'
target = disks[i].find('target')
assert target.attrib['bus'] == 'ide'
source = disks[i].find('source')
assert source.attrib['file'] == disks_path[i]
def test_modify_vm_bridge():
template = os.path.join(WORKSPACE, 'templates/virtual_environment/vms/daisy.xml')
tree = ET.ElementTree(file=template)
root = tree.getroot()
bridge = 'daisy_test'
modify_vm_bridge(root, bridge)
devices = root.find('./devices')
is_match = False
for interface in devices.findall('interface'):
source = interface.find('source')
if interface.attrib.get('type', None) == 'bridge' \
and source is not None \
and source.attrib.get('bridge', None) == bridge:
is_match = True
break
assert is_match
@pytest.mark.parametrize('status', [
(0),
(1)])
@mock.patch('deploy.libvirt_utils.commands.getstatusoutput')
@mock.patch('deploy.libvirt_utils.err_exit')
def test_create_virtual_disk(mock_err_exit, mock_getstatusoutput, status):
mock_getstatusoutput.return_value = (status, 'command_output')
disk_file = '/tmp/vms/daisy.qcow2'
size = 110
create_virtual_disk(disk_file, size)
mock_getstatusoutput.assert_called_once()
if status:
mock_err_exit.assert_called_once()
else:
mock_err_exit.assert_not_called()
@pytest.mark.parametrize('name, disk_name, physical_bridge', [
('dasiy_test_vm', 'daisy_test.qcow2', 'daisy_test_br'),
(None, None, None)])
def test_create_vm(name, disk_name, physical_bridge):
template = os.path.join(WORKSPACE, 'templates/physical_environment/vms/daisy.xml')
if disk_name:
disk_path = os.path.join('/home/qemu/vms', 'daisy_test.qcow2')
disks_path = [disk_path]
else:
disks_path = None
ret = create_vm(template, name=name, disks=disks_path, physical_bridge=physical_bridge)
assert ret is not None
@pytest.mark.parametrize('vm_name, boot_devs', [
('dasiy_test_vm', None)])
def test_reboot_vm(vm_name, boot_devs):
reboot_vm(vm_name, boot_devs=boot_devs)
def test_get_disk_file():
template = os.path.join(WORKSPACE, 'templates/physical_environment/vms/daisy.xml')
tree = ET.ElementTree(file=template)
root = tree.getroot()
exp_disks = ['/tmp/workdir/daisy/centos7.qcow2']
ret = get_disk_file(root)
assert ret == exp_disks
def test_create_virtual_network():
template = os.path.join(WORKSPACE, 'templates/physical_environment/networks/daisy.xml')
ret = create_virtual_network(template)
assert ret is not None
| StarcoderdataPython |
11369475 | <filename>tests/test_population.py
import unittest
# import unittest2 as unittest
import os
from .local_mongo import has_local_mongo
from pychemia.population import LJCluster, RelaxStructures, OrbitalDFTU, NonCollinearMagMoms, RealFunction
def funx2(x):
return x ** 2
class PopulationTest(unittest.TestCase):
def test_ljcluster(self):
"""
Test (pychemia.population.LJCluster) :
"""
if not has_local_mongo():
return
popu = LJCluster('test', 'Ne4')
popu.add_random()
popu.add_random()
popu.pcdb.clean()
def test_structure(self):
"""
Test (pychemia.population.RelaxStructures) :
"""
if not has_local_mongo():
return
popu = RelaxStructures('test', 'NaCl')
popu.add_random()
popu.add_random()
popu.pcdb.clean()
def test_noncoll(self):
"""
Test (pychemia.population.NonCollinearMagMoms) :
"""
if not has_local_mongo():
return
popu = NonCollinearMagMoms('test', source_dir='tests/data/vasp_02')
popu.add_random()
popu.add_random()
popu.pcdb.clean()
def test_dftu(self):
"""
Test (pychemia.population.OrbitalDFTU) :
"""
if not has_local_mongo():
return
print(os.getcwd())
popu = OrbitalDFTU('test', 'tests/data/abinit_05/abinit.in')
popu.add_random()
popu.add_random()
popu.pcdb.clean()
def test_euclidean(self):
"""
Test (pychemia.population.RealFunction) :
"""
if not has_local_mongo():
return
popu = RealFunction(funx2, 2, [-1, 1])
popu.add_random()
popu.add_random()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11331163 | <filename>IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/words/topfiles/setup.py
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
try:
from twisted.python import dist
except ImportError:
raise SystemExit("twisted.python.dist module not found. Make sure you "
"have installed the Twisted core package before "
"attempting to install any other Twisted projects.")
if __name__ == '__main__':
extraMeta = dict(
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Communications :: Chat",
"Topic :: Communications :: Chat :: AOL Instant Messenger",
"Topic :: Communications :: Chat :: ICQ",
"Topic :: Communications :: Chat :: Internet Relay Chat",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
])
dist.setup(
twisted_subproject="words",
scripts=dist.getScripts("words"),
# metadata
name="Twisted Words",
description="Twisted Words contains Instant Messaging implementations.",
author="Twisted Matrix Laboratories",
author_email="<EMAIL>",
maintainer="<NAME>",
url="http://twistedmatrix.com/trac/wiki/TwistedWords",
license="MIT",
long_description="""\
Twisted Words contains implementations of many Instant Messaging protocols,
including IRC, Jabber, OSCAR (AIM & ICQ), and some functionality for creating
bots, inter-protocol gateways, and a client application for many of the
protocols.
In support of Jabber, Twisted Words also contains X-ish, a library for
processing XML with Twisted and Python, with support for a Pythonic DOM and
an XPath-like toolkit.
""",
**extraMeta)
| StarcoderdataPython |
9659303 | """Miscellaneous routines."""
from __future__ import annotations
import json
import logging
from typing import Any, Callable, Iterable, Mapping, Set, TypeVar, cast
import requests
from ghaudit.auth import AuthDriver
GITHUB_GRAPHQL_DEFAULT_ENDPOINT = "https://api.github.com/graphql"
# pylint: disable=too-few-public-methods
class LazyJsonFmt:
"""Lazy JSON formatter for logging.
Defer the json.dumps operation of some arguments so that the formatting
actually only happens if a logging operation is actually evaluated.
"""
def __init__(self, argument: Any) -> None:
self._argument = argument
def __str__(self) -> str:
return json.dumps(self._argument)
def github_graphql_call(
call_str: str,
auth_driver: AuthDriver,
variables: Iterable[str],
session: requests.Session | None,
endpoint: str = GITHUB_GRAPHQL_DEFAULT_ENDPOINT,
) -> Mapping[str, Any]:
"""Make a GraphQL github API call."""
logging.debug(
'Github GraphQL query: "%s"',
LazyJsonFmt({"query": call_str, "variables": json.dumps(variables)}),
)
if not session:
session = requests.session()
result_raw = session.post(
endpoint,
json={"query": call_str, "variables": json.dumps(variables)},
headers=auth_driver(),
)
if result_raw.status_code != 200:
error_fmt = (
"Call failed to run by returning code of {}."
"Error message: {}."
"Query: {}"
)
raise Exception(
error_fmt.format(
result_raw.status_code, result_raw.text, call_str[:200]
)
)
result = result_raw.json()
if "errors" in result:
raise RuntimeError(
"github returned an error: {}".format(result["errors"])
)
return cast(Mapping[str, Any], result)
# pylint: disable=invalid-name
T = TypeVar("T")
def find_duplicates(
sequence: Iterable[T],
hash_func: Callable[[T], str] | None = None,
) -> Set[str]:
if hash_func:
hsequence = cast(Iterable[str], map(hash_func, sequence))
else:
hsequence = cast(Iterable[str], sequence)
first_seen = set() # type: Set[str]
first_seen_add = first_seen.add
return {i for i in hsequence if i in first_seen or first_seen_add(i)}
| StarcoderdataPython |
3232163 | import numpy as np
import torch
from .data import torch2np
from PIL import Image
def colorize(mask: torch.Tensor, palette: list):
"""
"""
_mask = torch2np(mask, squeeze=True)
_mask = Image.fromarray(_mask.astype(np.uint8)).convert('P')
_mask.putpalette(palette)
return np.array(_mask.convert('RGB'))
def get_palette(dataset: str):
dataset = dataset.lower()
if dataset == 'brats':
return BRATS
elif dataset == 'binary':
return BRATS_BINARY
else:
raise ValueError(f"unknown palette {dataset}")
"""
Palette for datasets
currently using:
1. Brats
2. n/a
"""
BRATS = [
45, 0, 55, # 0: Background
20, 90, 139, # 1: Tumor core (BLUE)
22, 159, 91, # 2: Invaded Tissue (GREEN)
255, 232, 9 # 3: Enhancing Tumor (YELLOW)
]
BRATS_BINARY = [
45, 0, 55, # 0: Background
255, 232, 9 # 3: Whole Tumor (YELLOW)
]
| StarcoderdataPython |
1904156 | <filename>tests/test_clean.py
import pytest
from pyspark.sql import types as T
from delta_utils.clean import fix_invalid_column_names, flatten
def test_fix_invalid_col_names(spark):
spark.conf.set("spark.sql.caseSensitive", "true")
schema = T.StructType(
[
T.StructField("id", T.StringType(), True),
T.StructField("dupli,cate", T.StringType(), True),
T.StructField("dupli;cate", T.StringType(), True),
T.StructField("ge n(de-r", T.StringType(), True),
T.StructField("sa;lar)y", T.IntegerType(), True),
]
)
data = [
(
"1",
"asd",
"asd2",
"Unknown",
4,
),
(
"2",
"asd",
"asd2",
"Man",
3,
),
]
df = spark.createDataFrame(data, schema)
columns = fix_invalid_column_names(df).columns
assert columns == ["id", "dupli44cate", "dupli59cate", "ge32n40de45r", "sa59lar41y"]
def test_invalid_col_names_raise_error(spark):
spark.conf.set("spark.sql.caseSensitive", "true")
schema = T.StructType(
[
T.StructField("id", T.StringType(), True),
T.StructField("dupli59cate", T.StringType(), True),
T.StructField("dupli;cate", T.StringType(), True),
T.StructField("ge n(de-r", T.StringType(), True),
T.StructField("sa;lar)y", T.IntegerType(), True),
]
)
data = [
(
"1",
"asd",
"asd2",
"Unknown",
4,
),
(
"2",
"asd",
"asd2",
"Man",
3,
),
]
df = spark.createDataFrame(data, schema)
with pytest.raises(
ValueError,
match="Found duplicates columns when renaming invalid columns: dupli59cate",
):
fix_invalid_column_names(df)
def test_flatten_table(spark):
spark.conf.set("spark.sql.caseSensitive", "true")
schema = T.StructType(
[
T.StructField(
"name",
T.StructType(
[
T.StructField("first name", T.StringType(), True),
T.StructField("id", T.StringType(), True),
T.StructField("@ID", T.StringType(), True),
T.StructField("last,name", T.StringType(), True),
T.StructField("lastname.test", T.StringType(), True),
T.StructField(
"nested",
T.StructType(
[T.StructField("test sfd", T.StringType(), True)]
),
True,
),
]
),
),
T.StructField(
"items",
T.ArrayType(
T.StructType([T.StructField("swo:rd", T.BooleanType(), True)])
),
),
T.StructField("id", T.StringType(), True),
T.StructField("dupli,cate", T.StringType(), True),
T.StructField("dupli;cate", T.StringType(), True),
T.StructField("ge n(de-r", T.StringType(), True),
T.StructField("sa;lar)y", T.IntegerType(), True),
]
)
data = [
(
("Linus", "123", "456", "Wallin", "W2", ("asd",)),
[(True,)],
"1",
"asd",
"asd2",
"Unknown",
4,
),
(
("Niels", "123", "768", "Lemmens", "L2", ("asd",)),
[(True,)],
"2",
"asd",
"asd2",
"Man",
3,
),
]
df = spark.createDataFrame(data, schema)
columns = flatten(df).columns
assert columns == [
"name_first name",
"name_id",
"name_@ID",
"name_last,name",
"name_lastname_test",
"name_nested_test sfd",
"items",
"id",
"dupli,cate",
"dupli;cate",
"ge n(de-r",
"sa;lar)y",
]
def test_flatten_table_raise_error(spark):
spark.conf.set("spark.sql.caseSensitive", "true")
schema = T.StructType(
[
T.StructField(
"name",
T.StructType(
[
T.StructField("id", T.StringType(), True),
]
),
),
T.StructField("name_id", T.StringType(), True),
]
)
data = [
(
("Linus",),
"1",
),
(
("Linus",),
"1",
),
]
df = spark.createDataFrame(data, schema)
with pytest.raises(
ValueError,
match="Could not rename column `name`.`id` to name_id, because name_id already exists",
):
flatten(df)
| StarcoderdataPython |
6532931 | <gh_stars>0
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A, B):
end = -1
count = 0
for i in range(len(A)):
if A[i] > end:
count += 1
end = B[i]
return count
| StarcoderdataPython |
324820 | <filename>various_responses/various_response_app.py<gh_stars>0
import pathlib
import json
from io import StringIO
import csv
from urllib.parse import quote
from werkzeug.exceptions import HTTPException
from werkzeug.routing import Map, Rule
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.datastructures import Headers
class Application:
def __init__(self):
self.url_map = Map([
Rule('/get-only', endpoint='get_only', methods=['GET']),
Rule('/post-only', endpoint='post_only', methods=['POST']),
Rule('/json', endpoint='json'),
Rule('/upload', endpoint='upload'),
Rule('/download', endpoint='download'),
Rule('/extension.html', endpoint='extension'),
])
def dispatch_request(self, request):
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, f'{endpoint}_handler')(request, **values)
except HTTPException as e:
return e
def get_only_handler(self, request):
# $ curl --include localhost:5000/get-only
# HTTP/1.0 200 OK
# Content-Type: text/plain; charset=utf-8
# ...
# GET Only!
#
# $ curl -w '\n' --include -X POST 'localhost:5000/get-only' --data 'foo=1'
# HTTP/1.0 405 METHOD NOT ALLOWED
# Content-Type: text/html
# Allow: HEAD, GET
return Response('GET Only!\n')
def post_only_handler(self, request):
# $ curl -w '\n' --include -X POST 'localhost:5000/post-only' --data 'foo=1'
# HTTP/1.0 200 OK
# Content-Type: text/plain; charset=utf-8
# ...
# POST Only: 1
#
# $ curl --include 'localhost:5000/post-only'
# HTTP/1.0 405 METHOD NOT ALLOWED
# Content-Type: text/html
# Allow: POST
return Response(f'POST Only: {request.form.get("foo")}\n')
def json_handler(self, request):
input_data = request.form.get('input')
result = {
'foo': 'abc',
'bar': ['ham', 'spam', 'egg'],
'result': input_data,
}
return Response(json.dumps(result), content_type='application/json')
def upload_handler(self, request):
f = request.files.get('upload_file')
f.save(str(pathlib.Path(f'./uploads/{f.filename}')))
return Response('hello upload')
def download_handler(self, request):
field_names = ['No', 'Name']
contents = [
{'No': 1, 'Name': 'Apple'},
{'No': 2, 'Name': 'Mandarin'},
{'No': 3, 'Name': 'Grape'},
]
stream = StringIO()
writer = csv.DictWriter(stream, fieldnames=field_names)
# CSVヘッダの書込
writer.writeheader()
# CSVデータの書込
writer.writerows(contents)
# ストリームからデータを取得し、レスポンスとする
data = stream.getvalue()
headers = Headers()
# この書き方の場合、日本語ファイル名はNG(IEを除く)
# headers.add('Content-Disposition', 'attachment', filename='foo.csv')
# 日本語ファイルをURLエンコーディング(RFC5987による方法)
# こうしても、Macだとチルダがアンスコに変換されている:ファイル名として不適切な文字
encoded_filename = quote(request.form['filename'], safe='~')
headers.add('Content-Disposition', f"attachment; filename*=UTF-8''{encoded_filename}")
return Response(
data,
headers=headers,
)
def extension_handler(self, request):
return Response('extension request')
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app(with_static=True):
application = Application()
if with_static:
application.wsgi_app = SharedDataMiddleware(
application.wsgi_app, {
'/static': str(pathlib.Path('./static')),
})
return application
if __name__ == '__main__':
from werkzeug.serving import run_simple
app = create_app()
run_simple('0.0.0.0', 5000, app, use_debugger=True, use_reloader=True)
| StarcoderdataPython |
112157 | <filename>tests/test_formparsers.py
import os
from starlette.formparsers import UploadFile
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.testclient import TestClient
class ForceMultipartDict(dict):
def __bool__(self):
return True
# FORCE_MULTIPART is an empty dict that boolean-evaluates as `True`.
FORCE_MULTIPART = ForceMultipartDict()
def app(scope):
async def asgi(receive, send):
request = Request(scope, receive)
data = await request.form()
output = {}
for key, value in data.items():
if isinstance(value, UploadFile):
content = await value.read()
output[key] = {"filename": value.filename, "content": content.decode()}
else:
output[key] = value
await request.close()
response = JSONResponse(output)
await response(receive, send)
return asgi
def test_multipart_request_data(tmpdir):
client = TestClient(app)
response = client.post("/", data={"some": "data"}, files=FORCE_MULTIPART)
assert response.json() == {"some": "data"}
def test_multipart_request_files(tmpdir):
path = os.path.join(tmpdir, "test.txt")
with open(path, "wb") as file:
file.write(b"<file content>")
client = TestClient(app)
response = client.post("/", files={"test": open(path, "rb")})
assert response.json() == {
"test": {"filename": "test.txt", "content": "<file content>"}
}
def test_urlencoded_request_data(tmpdir):
client = TestClient(app)
response = client.post("/", data={"some": "data"})
assert response.json() == {"some": "data"}
def test_no_request_data(tmpdir):
client = TestClient(app)
response = client.post("/")
assert response.json() == {}
| StarcoderdataPython |
4859808 | """A system wide device which can be defined in the main config."""
import abc
import asyncio
from mpf.core.device import Device
class SystemWideDevice(Device, metaclass=abc.ABCMeta):
"""A system wide device which can be defined in the main config."""
__slots__ = []
@asyncio.coroutine
def device_added_system_wide(self):
"""Add the device system wide."""
yield from self._initialize()
| StarcoderdataPython |
1953983 | from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from ckeditor_uploader.fields import RichTextUploadingField
class Staff(models.Model):
username = models.CharField(max_length=50,null=True)
user = models.OneToOneField(User,on_delete=models.CASCADE,null=True)
profile_picture = models.ImageField(blank=True)
email = models.EmailField(blank=True)
phone_number = models.IntegerField(blank=True,null=True)
name = models.CharField(max_length=50,null=True,blank=True)
def __str__(self):
return self.get_name
@property
def get_name(self):
if self.name is not None:
if len(self.name) > 0:
return str(self.name)
return self.username
class Tag(models.Model):
tag = models.CharField(max_length=30)
def __str__(self):
return self.tag
class Post(models.Model):
author = models.ForeignKey(Staff,on_delete=models.CASCADE,null=True)
slug = models.SlugField(blank=True,null=True)
body = RichTextUploadingField()#Replae with rich text field
thumbnail = models.ImageField(upload_to='posts')
key_words = models.TextField(blank=True)
tags = models.ManyToManyField(Tag,blank=True)
posted = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
trending = models.BooleanField(default=False) # The first three posts on home screen
title = models.CharField(max_length=50)
visitors = models.IntegerField(default=0)
top_post = models.BooleanField(default=False) #Appear Near bottom
catogory = models.CharField(max_length=40,null=True)
sub_title = models.CharField(max_length=100,null=True)
active = models.BooleanField(default=False) #If true, then dont show the post
show_thumbnail = models.BooleanField(default=False)
def save(self):
self.key_words = self.title + " " + self.sub_title + ' ' + self.key_words
slug = slugify(str(self.title)) if self.slug is None else slugify(self.slug) #Replace With sluggify later
if self.posted is None: #That means we are creating this model and not updating it ; Since posted will be added only after calling super().save()
slug_exists = Post.objects.filter(slug=slug).exists() #Returns either True or False
count = 0
while slug_exists:
slug = slug[:-len(str(count))] #If we have added a number and slug still exists, remove the previous number and add the new one
#If number is not removed , it will add the new number -- which is incremented by 1 every itr-- at the add rather that adding that to previous number
slug = slugify(slug + '-' + str(count))
count += 1
slug_exists = Post.objects.filter(slug=slug).exists()
self.slug = slug
super().save()
def __str__(self):
return str(self.title + ' by ' + self.author.get_name)
@property
def has_tags(self):
return len(list(self.tags.all())) > 0
@property
def get_tags(self):
return self.tags.all()
#All The models that we want to register in admin page.so we can edit them their
to_be_added_in_admin_page = [Post,Staff,Tag] | StarcoderdataPython |
3449515 | <reponame>PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition
'''
Source codes for Python Machine Learning By Example 2nd Edition (Packt Publishing)
Chapter 3: Mining the 20 Newsgroups Dataset with Clustering and Topic Modeling Algorithms
Author: Yuxi (<NAME>
'''
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 2:4]
y = iris.target
import numpy as np
from matplotlib import pyplot as plt
y_0 = np.where(y==0)
plt.scatter(X[y_0, 0], X[y_0, 1])
y_1 = np.where(y==1)
plt.scatter(X[y_1, 0], X[y_1, 1])
y_2 = np.where(y==2)
plt.scatter(X[y_2, 0], X[y_2, 1])
plt.show()
k = 3
random_index = np.random.choice(range(len(X)), k)
centroids = X[random_index]
def visualize_centroids(X, centroids):
plt.scatter(X[:, 0], X[:, 1])
plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=200, c='#050505')
plt.show()
visualize_centroids(X, centroids)
def dist(a, b):
return np.linalg.norm(a - b, axis=1)
def assign_cluster(x, centroids):
distances = dist(x, centroids)
cluster = np.argmin(distances)
return cluster
def update_centroids(X, centroids, clusters):
for i in range(k):
cluster_i = np.where(clusters == i)
centroids[i] = np.mean(X[cluster_i], axis=0)
clusters = np.zeros(len(X))
tol = 0.0001
max_iter = 100
iter = 0
centroids_diff = 100000
from copy import deepcopy
while iter < max_iter and centroids_diff > tol:
for i in range(len(X)):
clusters[i] = assign_cluster(X[i], centroids)
centroids_prev = deepcopy(centroids)
update_centroids(X, centroids, clusters)
iter += 1
centroids_diff = np.linalg.norm(centroids - centroids_prev)
print('Iteration:', str(iter))
print('Centroids:\n', centroids)
print('Centroids move: {:5.4f}'.format(centroids_diff))
visualize_centroids(X, centroids)
for i in range(k):
cluster_i = np.where(clusters == i)
plt.scatter(X[cluster_i, 0], X[cluster_i, 1])
plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=200, c='#050505')
plt.show()
| StarcoderdataPython |
134693 | #!/usr/bin/env python
import sctp
import binascii
import sys, socket
import time, IPy
from itertools import repeat
#for local testing: sudo ncat --sctp -l -p 36422
#interface 3GPP S1-MME
#verify that the following modules are loaded
#sudo insmod /lib/modules/3.6.11-4.fc16.i686/kernel/lib/libcrc32c.ko
#sudo insmod /lib/modules/3.6.11-4.fc16.i686/kernel/net/sctp/sctp.ko
#echo 1 > /proc/sys/net/sctp/max_init_retransmits
def usage():
print "usage : python x2_pci_collision_server.py <dst ip > <local_ip>"
exit(0)
if len(sys.argv) < 3 :
usage()
exit(0)
#pci set to 159,157,161
payload = ("0006008ac9000004001500080022f2100000011000140089cd0540009f0022f210f"
"4241015926022f210004c2c05dc55002e0022f210f4242010000105dc0022f210f4"
"242020000005dc0022f210f4242030000205dc0022f210dbedd02000f505dc0022f"
"210dbedd01000f305dc0022f210dbe7f010006005dc0022f210dbe7f030006105dc"
"0022f210dbcd702000ac05dc0022f210dbea8040017b05dc0022f210dbd30020011"
"105dc0022f210dbd30030011305dc0022f210dbf91010002905dc0022f210dbd810"
"20007705dc0022f210dbcb5040003605dc0022f210dbf7f03000e105dc0022f210d"
"be7f060006105dc0022f210dbc23040010705dc0022f210dbc23050010505dc0022"
"f210dbf7f02000e305dc0022f210dbcb901001e605dc0022f210dbe7f040006005d"
"c0022f210dbcac01001b705dc0022f210dbe7f050006205dc0022f210f424204000"
"9d0c670022f210f424205000030c670022f210f424206000040c670022f210dbd54"
"0100113189c0022f210dbd540200112189c0022f210dbd540300111189c0022f210"
"dbc230100149189c0022f210dbc230200147189c0022f210dbcd50200193189c002"
"2f210dbc4602001bf189c0022f210dbea8010018e189c0022f210dbeb4030009218"
"9c0022f210dbdaf01001cc189c0022f210dbd55010010c189c0022f210dbd550200"
"<KEY>"
"<KEY>"
"0dbe66020005a189c0022f210dbcd50300194189c0022f210dbedb0300150189c40"
"00a10022f210f4241025926022f210004c2c05dc55001f0022f210f424202000000"
"5dc0022f210f4242010000105dc0022f210f4242030000205dc0022f210dbd30030"
"011305dc0022f210dbcac01001b705dc0022f210dbea8040017b05dc0022f210dbf"
"7f03000e105dc0022f210dbc23050010505dc0022f210dbea8050017a05dc0022f2"
"10f424204000050c670022f210f424205000030c670022f210f424206000040c670"
"022f210dbd540100113189c0022f210dbd540200112189c0022f210dbd540300111"
"189c0022f210dbc230200147189c0022f210dbeb40100090189c0022f210dbeb402"
"00091189c0022f210dbeb40300092189c0022f210dbeec01000e0189c0022f210db"
"daf01001cc189c0022f210dbdaf02001cb189c0022f210dbdaf03001cd189c0022f"
"210dbd55010010c189c0022f210dbd55020010b189c0022f210dbd55030010d189c"
"0022f210dbdac0200172189c0022f210dbea8020018d189c0022f210dbea8010018"
"e189c0022f210dbcd50300194189c0022f210dbe1302001bc189c40009f0022f210"
"f4241035926022f210004c2c05dc5500290022f210f4242010000105dc0022f210f"
"4242030000205dc0022f210f4242020000005dc0022f210dbedd02000f505dc0022"
"f210dbedd01000f305dc0022f210dbcd702000ac05dc0022f210dbd30030011305d"
"c0022f210dbd4202000b105dc0022f210dbf7f03000e105dc0022f210dbca301001"
"0b05dc0022f210dbca3030010d05dc0022f210dbea8040017b05dc0022f210f4242"
"04000050c670022f210f424205000030c670022f210f424206000040c670022f210"
"dbd540100113189c0022f210dbd540200112189c0022f210dbd540300111189c002"
"2f210dbe9003001e7189c0022f210dbc4602001bf189c0022f210dbc4603001c018"
"9c0022f210dbcb90400196189c0022f210dbe1303001bd189c0022f210dbea80300"
"18c189c0022f210dbe3201001d4189c0022f210dbedc010009a189c0022f210dbed"
"<KEY>"
"0dbd55010010c189c0022f210dbe5a010013a189c0022f210dbe66010005b189c00"
"22f210dbdaf01001cc189c0022f210dbea8010018e189c0022f210dbe66030005c1"
"89c0022f210dbd55020010b189c0022f210dbe5a0300139189c0022f210dbcd7040"
"00ed189c0022f210dbdaf03001cd189c0022f210dbe66020005a189c0022f210dbe"
"5a0200138189c4000090022f210f4241045926022f2100052b70c6744001a0022f2"
"10f424204000050c670022f210f424205000030c670022f210f424206000040c670"
"022f210f4242010000105dc0022f210f4242020000005dc0022f210f42420300002"
"05dc0022f210dbd30020011105dc0022f210dbcd702000ac05dc0022f210dbf7f03"
"000e105dc0022f210dbea8040017b05dc0022f210dbf91010002905dc0022f210db"
"d540100113189c0022f210dbd540200112189c0022f210dbd540300111189c0022f"
"210dbc230100149189c0022f210dbc230200147189c0022f210dbcd50200193189c"
"0022f210dbc4602001bf189c0022f210dbea8010018e189c0022f210dbeb4030009"
"2189c0022f210dbdaf01001cc189c0022f210dbd55010010c189c0022f210dbd550"
"20010b189c0022f210dbd55030010d189c0022f210dbe5a010013a189c0022f210d"
"be7f0100162189c40000b0022f210f4241055926022f2100052b70c674400180022"
"f210f424204000050c670022f210f424205000030c670022f210f424206000040c6"
"70022f210f4242010000105dc0022f210f4242030000205dc0022f210f424202000"
"0005dc0022f210dbea8040017b05dc0022f210dbf7f03000e105dc0022f210dbd54"
"0100113189c0022f210dbd540200112189c0022f210dbd540300111189c0022f210"
"dbc230200147189c0022f210dbeb40100090189c0022f210dbeb40200091189c002"
"2f210dbeb40300092189c0022f210dbeec01000e0189c0022f210dbdaf01001cc18"
"9c0022f210dbdaf02001cb189c0022f210dbdaf03001cd189c0022f210dbd550100"
"10c189c0022f210dbd55020010b189c0022f210dbd55030010d189c0022f210dbda"
"c0200172189c0022f210dbea8010018e189c40000a0022f210f4241065926022f21"
"00052b70c6744001d0022f210f424204000050c670022f210f424205000030c6700"
"22f210f424206000040c670022f210f4242010000105dc0022f210f424202000000"
"5dc0022f210f4242030000205dc0022f210dbcd702000ac05dc0022f210dbd54010"
"0113189c0022f210dbd540200112189c0022f210dbd540300111189c0022f210dbe"
"9003001e7189c0022f210dbc4602001bf189c0022f210dbc4603001c0189c0022f2"
"10dbcb90400196189c0022f210dbe1303001bd189c0022f210dbea8030018c189c0"
"022f210dbe3201001d4189c0022f210dbedb0300150189c0022f210dbe7903000ae"
"189c0022f210dbd55030010d189c0022f210dbd55010010c189c0022f210dbe5a03"
"00139189c0022f210dbd55020010b189c0022f210dbdaf03001cd189c0022f210db"
"e66010005b189c0022f210dbe5a010013a189c0022f210dbdaf01001cc189c0022f"
"210dbe66030005c189c0022f210dbe66020005a189c001800060022f210801400c8"
"4080d90b0022f210f4241010f80a0a010a000001f4100022f210f4241020f80a0a0"
"10b000001f4100022f210f4241030f80a0a010c000001f4100022f210f4241040f8"
"0a0a010d000001f4100022f210f4241050f80a0a010e000001f4100022f210f4241"
"060f80a0a010f000001f4100022f210f4241070f80a0a0110000001f4100022f210"
"f4241080f80a0a0111000001f4100022f210f4241090f80a0a0112000001f410002"
"2f210f42410a0f80a0a0113000001f4100022f210f42410b0f80a0a0114000001f4"
"100022f210f42410c0f80a0a0115000001f410")
dest_ip = sys.argv[1]
ip = IPy.IP(sys.argv[2])
local_ip = (ip.strNormal(0), 36422)
s = sctp.sctpsocket_tcp(socket.AF_INET)
s.bind(local_ip)
s.listen(1)
while 1 :
opened_conn, client_address = s.accept()
if dest_ip == client_address[0] :
break
opened_conn.settimeout(5)
data = binascii.unhexlify(payload)
opened_conn.send(data)
time.sleep(1)
raw_input("press ESC to quit....")
s.close()
| StarcoderdataPython |
4952681 | <reponame>khilnani/badspider.py
#!/usr/bin/env python
import sys
import re
import os
import json
from distutils.dir_util import mkpath
import argparse
import urllib2
import requests
from urlparse import urlparse
from mimetypes import guess_extension
import time
"""
./spidey.py \
-d test \
-f 'www.google.com' \
-u 'https://www.google.com/' \
-hh '{"Accept" : "application/json"}' \
-n 2 \
-m 10
./spidey.py \
--dir test \
--filter 'www.google.com' \
--url 'https://www.google.com/' \
--headers '{"Accept" : "application/json"}' \
--depth 2 \
--max 10
"""
downloaded_urls = []
max_downloads = 100
max_depth = 10
sleep = 0
base_url = None
parser = argparse.ArgumentParser(description='Terrible web spider, but useful for recursive API downloads.')
parser.add_argument('-d', '--dir', dest='dir', metavar="DIR", help="Directory to save to.")
parser.add_argument('-u', '--url', dest='url', metavar="URL", help="The url or api endpint to download.")
parser.add_argument('-b', '--base', dest='base', metavar="URL", help="The base url to use for relative links.")
parser.add_argument('-f', '--filter', dest='filter', metavar="FILTER", help="URL filter to limit recursive API calls.")
parser.add_argument('-hh', '--headers', dest='headers', metavar="HEADERS", help="HTTP Headers.")
parser.add_argument('-n', '--depth', dest='depth', type=int, metavar="DEPTH", help="Recursive depth (Default: {}).".format(max_depth))
parser.add_argument('-m', '--max', dest='max', type=int, metavar="NUM", help="Maximum number of downloads (Default: {}).".format(max_downloads))
parser.add_argument('-s', '--sleep', dest='sleep', type=int, metavar="NUM", help="Pause or sleep time between downloads in secondsi.")
args = parser.parse_args()
def get_ext(content_type):
ext = guess_extension(content_type.split()[0].rstrip(";"))
if ext:
return ext
elif 'json' in content_type:
return '.json'
elif 'html' in content_type:
return '.html'
return '.txt'
def find_urls(data, filter):
results = []
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', data)
for url in urls:
if filter in url:
results.append(url)
aurls = re.findall('<a\s+(?:[^>]*?\s+)?href="([^"]*)"', data)
for aurl in aurls:
if filter in aurl:
if aurl.find('http') != 0:
aurl = base_url + aurl
results.append(aurl)
return results
def sanitize(filename):
valid_chars = '-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
filename = filename.replace('/', '_')
filename = filename.replace('?', '..')
filename = filename.replace('=', '.')
filename = filename.replace('&', '.')
filename = filename.replace(';', '-')
new_name = ''.join(c for c in filename if c in valid_chars)
if len(new_name) > 1:
if new_name[0] == '_':
new_name = new_name[1:]
if new_name[-1] == '_':
new_name = new_name[:-1]
elif new_name == '_':
new_name = 'index'
new_name = new_name[0:125]
return new_name
def wait():
if sleep > 0:
print('Waiting for {} seconds ...'.format(sleep))
time.sleep(sleep)
def download( url, filter, dir, headers, count_depth=1):
global max_depth, max_downloads, downloaded_urls
print('Downloading ({}/{}, {}/{}) {} ...'.format(len(downloaded_urls)+1, max_downloads, count_depth, max_depth, url))
# print(url, filter, dir)
ext = '.txt'
try:
# req = urllib2.Request(url, headers=headers)
# data = urllib2.urlopen(req).read()
resp = requests.get(url, headers=headers)
content_type = resp.headers['content-type']
ext = get_ext(content_type)
try:
json_data = resp.json()
if json_data:
data = json.dumps(json_data, indent=4)
else:
data = resp.text
except Exception as e:
data = resp.text
except urllib2.HTTPError as e:
msg = '{} for {}'.format(str(e), url)
print( msg )
data = msg
except urllib2.URLError as e:
msg = '{} for {}'.format(str(e), url)
print( msg )
data = msg
except requests.exceptions.ConnectionError as e:
msg = '{} for {}'.format(str(e), url)
print( msg )
data = msg
finally:
downloaded_urls.append(url)
url_ = urlparse(url)
file_name = url_.path + (('?' + url_.query) if url_.query else '')
file_name = sanitize(file_name) + ext
file_path = os.path.join(args.dir, file_name)
with open(file_path, 'wb') as f:
f.write(data.encode('utf8'))
print('Downloaded to {}'.format(file_path))
if len(downloaded_urls) >= max_downloads:
print('\nReached specified maximum downloads {}.\n'.format(max_downloads))
sys.exit(0)
urls = find_urls(data, filter)
print( "{} URLs found.".format(len(urls)) )
wait()
count_depth = count_depth+1
if count_depth <= max_depth:
for u in urls:
if u not in downloaded_urls:
download(u, filter, dir, headers, count_depth)
else:
print('\nReached specified maximum depth {}.\n'.format(max_depth))
sys.exit(0)
def main():
global max_depth, max_downloads, sleep, base_url
if args.url == None or args.filter == None or args.dir == None:
parser.print_help()
else:
print ('')
print('URL: {}'.format(args.url))
if args.base != None:
base_url = args.base
print('Base URL: {}'.format(base_url))
print('Filter: {}'.format(args.filter))
print('Directory: {}'.format(args.dir))
# create header obj
headers = json.loads(args.headers) if args.headers else {}
print('Headers: {}'.format(headers))
print('')
# depth
if args.depth != None and args.depth != 0:
max_depth = args.depth
# max
if args.max != None and args.max != 0:
max_downloads = args.max
# sleep
if args.sleep != None and args.sleep != 0:
sleep = args.sleep
# Create dir
mkpath(args.dir)
try:
download(args.url, args.filter, args.dir, headers)
except KeyboardInterrupt as e:
print('\nAborted by user.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3551272 | import numpy as np
from scipy.ndimage import gaussian_filter
import torch
import torch.nn as nn
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx = len(self) + idx
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
class ModelAndLoss(nn.Module):
def __init__(self, model, loss, use_mask=False):
super().__init__()
self.model = model
self.loss = loss
self.use_mask = use_mask
def forward(self, *args, **kwargs):
input = args[:-1]
target = args[-1]
if not isinstance(input, (tuple, list)):
input = [input]
output = self.model(*input, **kwargs)
if self.use_mask and 'mask' in kwargs and kwargs['mask'] is not None:
loss = self.loss(output * kwargs['mask'], target)
else:
loss = self.loss(output, target)
return output, loss
class BoxFilter(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super().__init__()
self.seq = nn.Sequential(
nn.ReflectionPad2d(kernel_size//2),
nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, bias=None, groups=8)
)
self.weights_init(kernel_size)
def forward(self, x):
return self.seq(x)
def weights_init(self, kernel_size):
kernel = torch.ones((kernel_size, kernel_size)) / kernel_size ** 2
self.seq[1].weight.data.copy_(kernel)
class GaussianLayer(nn.Module):
_instance = None
def __init__(self, in_channels, out_channels, kernel_size=21, sigma=3):
super(GaussianLayer, self).__init__()
self.seq = nn.Sequential(
nn.ReflectionPad2d(kernel_size//2),
nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, bias=None, groups=8)
)
self.weights_init(kernel_size, sigma)
def forward(self, x):
return self.seq(x)
def weights_init(self, kernel_size, sigma):
n= np.zeros((kernel_size, kernel_size))
n[kernel_size//2, kernel_size//2] = 1
k = gaussian_filter(n,sigma=sigma)
for name, f in self.named_parameters():
f.data.copy_(torch.from_numpy(k))
@staticmethod
def get_instance():
if GaussianLayer._instance is None:
GaussianLayer._instance = GaussianLayer(8, 8, kernel_size=13, sigma=6).cuda()
return GaussianLayer._instance
class NetAndTexture(nn.Module):
def __init__(self, net, textures, supersampling=1, temporal_average=False):
super().__init__()
self.net = net
self.ss = supersampling
try:
textures = dict(textures)
except TypeError:
textures = {0: textures}
self._textures = {k: v.cpu() for k, v in textures.items()}
self._loaded_textures = []
self.last_input = None
self.temporal_average = temporal_average
def load_textures(self, texture_ids):
if torch.is_tensor(texture_ids):
texture_ids = texture_ids.cpu().tolist()
elif isinstance(texture_ids, int):
texture_ids = [texture_ids]
for tid in texture_ids:
self._modules[str(tid)] = self._textures[tid]
self._loaded_textures = texture_ids
def unload_textures(self):
for tid in self._loaded_textures:
self._modules[str(tid)].cpu()
del self._modules[str(tid)]
def reg_loss(self):
loss = 0
for tid in self._loaded_textures:
loss += self._modules[str(tid)].reg_loss()
return loss
def forward(self, inputs, **kwargs):
out = []
texture_ids = inputs['id']
del inputs['id']
if torch.is_tensor(texture_ids):
texture_ids = texture_ids.tolist()
elif isinstance(texture_ids, int):
texture_ids = [texture_ids]
for i, tid in enumerate(texture_ids): # per item in batch
input = {k: v[i][None] for k, v in inputs.items()}
assert 'uv' in list(input)[0], 'first input must be uv'
texture = self._modules[str(tid)]
j = 0
keys = list(input)
input_multiscale = []
while j < len(keys): # sample texture at multiple scales
tex_sample = None
input_ex = []
if 'uv' in keys[j]:
tex_sample = texture(input[keys[j]])
j += 1
while j < len(keys) and 'uv' not in keys[j]:
input_ex.append(input[keys[j]])
j += 1
assert tex_sample is not None
input_cat = torch.cat(input_ex + [tex_sample], 1)
# filter = GaussianLayer(input_cat.shape[1], input_cat.shape[1]).cuda()
# input_cat = filter(input_cat)
if self.ss > 1:
input_cat = nn.functional.interpolate(input_cat, scale_factor=1./self.ss, mode='bilinear')
input_multiscale.append(input_cat)
if self.temporal_average:
if self.last_input is not None:
for i in range(len(input_multiscale)):
input_multiscale[i] = (input_multiscale[i] + self.last_input[i]) / 2
self.last_input = list(input_multiscale)
out1 = self.net(*input_multiscale, **kwargs)
out.append(out1)
out = torch.cat(out, 0)
if kwargs.get('return_input'):
return out, input_multiscale
else:
return out
class MultiscaleNet(nn.Module):
def __init__(self, net, input_modality, supersampling=1):
super().__init__()
self.net = net
self.input_modality = input_modality
self.ss = supersampling
def forward(self, inputs, **kwargs):
del inputs['id']
modes = len(inputs)
assert modes % self.input_modality == 0
inputs_ms = []
input_values = list(inputs.values())
for i in range(modes // self.input_modality):
i0 = i * self.input_modality
i1 = (i + 1) * self.input_modality
cat = torch.cat(input_values[i0:i1], 1)
if self.ss > 1:
cat = nn.functional.interpolate(cat, scale_factor=1./self.ss, mode='bilinear')
inputs_ms.append(cat)
out = self.net(*inputs_ms, **kwargs)
if kwargs.get('return_input'):
return out, inputs_ms
else:
return out
class RGBTexture(nn.Module):
def __init__(self, texture, supersampling=1):
super().__init__()
self.texture = texture
self.ss = supersampling
def forward(self, inputs, **kwargs):
del inputs['id']
assert list(inputs) == ['uv_2d'], 'check input format'
uv = inputs['uv_2d']
out = self.texture(uv)
if kwargs.get('return_input'):
return out, uv
else:
return out
| StarcoderdataPython |
8014566 | import torch
import numpy as np
from matplotlib import gridspec
import matplotlib.pyplot as plt
from metrics import *
def plot_metrics(histories):
""" given a dictionary with info about specific metrics, plot them with matplotlib """
# define some lists that will be the concatenation of every metric for all the folds
train_losses = []
val_losses = []
train_accuracies = []
val_accuracies = []
train_f1s = []
val_f1s = []
# for each fold
for history in histories:
# append the metrics of that fold to the above lists
train_losses.extend(history["train_losses"])
val_losses.extend(history["val_losses"])
train_accuracies.extend(history["train_accuracies"])
val_accuracies.extend(history["val_accuracies"])
train_f1s.extend(history["train_f1"])
val_f1s.extend(history["val_f1"])
# determine the number of total epochs: epochs per fold * folds
epochs = len(train_losses)
# define the figure and itds size
fig = plt.figure(figsize=(40, 40))
# create a gridspec object
gs1 = gridspec.GridSpec(2, 2)
# configure the space between the plots
gs1.update(wspace=0.135)
# axis for losses
ax1 = plt.subplot(gs1[0, 0])
# axis for accuracies
ax2 = plt.subplot(gs1[0, 1])
# axis for F1 scores
ax3 = plt.subplot(gs1[1, :])
# make sure that epoch ticks are integers
plt.xticks(range(1, epochs))
xtick_frequency = max(epochs // 10, 1)
ax1.set_xticks(np.arange(0, epochs, xtick_frequency))
ax2.set_xticks(np.arange(0, epochs, xtick_frequency))
ax3.set_xticks(np.arange(0, epochs, xtick_frequency))
# make tick fontsize a bit bigger than default
ax1.tick_params(axis="both", which="major", labelsize=21)
ax2.tick_params(axis="both", which="major", labelsize=21)
ax3.tick_params(axis="both", which="major", labelsize=23)
# add title for both plots
plt.suptitle("\n\nTraining/Validation Losses/Accuracies/F1-Scores for every epoch in every fold during the Training procedure", fontsize=42)
# plot training loss
train_loss_label = "Training Loss"
ax1.plot(train_losses, color="b", label=train_loss_label)
# plot validation loss
val_loss_label = "Validation Loss"
ax1.plot(val_losses, color="r", label=val_loss_label)
# add side labels and legend
ax1.set_xlabel("epochs", fontsize=25)
ax1.set_ylabel(r"Loss $J(w)$", fontsize=25)
ax1.legend(prop={"size": 24})
# plot training accuracy
train_acc_label = "Training Accuracy"
ax2.plot(train_accuracies, color="b", label=train_acc_label)
# plot validation accuracy
val_acc_label = "Validation Accuracy"
ax2.plot(val_accuracies, color="r", label=val_acc_label)
# add side labels and legend
ax2.set_xlabel("epochs", fontsize=25)
ax2.set_ylabel("Accuracy", fontsize=25)
ax2.legend(prop={"size": 24})
# plot the Training F1 scores
train_f1_label = "Training F1 score"
ax3.plot(train_f1s, color="b", label=train_f1_label)
# plot the Validation F1 scores
val_f1_label = "Validation F1 score"
ax3.plot(val_f1s, color="r", label=val_f1_label)
# add side labels and legend
ax3.set_xlabel("epochs", fontsize=35)
ax3.set_ylabel("F1 Score", fontsize=35)
ax3.legend(prop={"size": 28})
# show the plots
plt.show()
def plot_roc_curves(models, modelnames, colors, Xs, ys, threshold_step=0.0001):
""" plot the ROC curve of the given models on a specific dataset """
# define the thresholds that will be used to compute the ROC curve
thresholds = np.arange(threshold_step, 1.0, threshold_step)
# define the list with the values of (sensitivity and 1 - specificity)
recalls = {model: [] for model in models}
fall_outs = {model: [] for model in models}
# make the prediction
y_pred = {model: model(Xs[model]) for model in models}
# compute the metrics for every threshold
for threshold in thresholds:
# for each model
for model in models:
# get the roc metrics
recall, fall_out = roc_metrics(y_pred[model], ys[model], threshold=threshold)
# append to the corresponding lists
recalls[model].append(recall)
fall_outs[model].append(fall_out)
# configure the size of the ROC curve plots
plt.rcParams["figure.figsize"] = [15, 10]
plt.rcParams["xtick.labelsize"] = 14
plt.rcParams["ytick.labelsize"] = 14
# for every model
for model in models:
# plot its ROC curve
color = colors[model] if colors is not None else "royalblue"
plt.plot(fall_outs[model], recalls[model], color=color, label=modelnames[model])
# plot y = x for comparison
x = np.arange(0, 1.01, 0.1)
plt.plot(x, x, color="brown", linestyle="--", label=r"$y\;=\;x$")
# add legend, labels and title
plt.legend()
plt.xlabel(r"1 - Specificity", fontsize=20)
plt.ylabel(r"Sensitivity", fontsize=20)
plt.title("ROC curve\n", fontsize=25)
plt.show() | StarcoderdataPython |
1933223 | <filename>tests/components/tts/test_init.py<gh_stars>1-10
"""The tests for the TTS component."""
import pytest
import yarl
from homeassistant.components.demo.tts import DemoProvider
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as DOMAIN_MP,
MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.components.tts import _get_cache_files
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
from tests.async_mock import PropertyMock, patch
from tests.common import assert_setup_component, async_mock_service
def relative_url(url):
"""Convert an absolute url to a relative one."""
return str(yarl.URL(url).relative())
@pytest.fixture
def demo_provider():
"""Demo TTS provider."""
return DemoProvider("en")
@pytest.fixture(autouse=True)
def mock_get_cache_files():
"""Mock the list TTS cache function."""
with patch(
"homeassistant.components.tts._get_cache_files", return_value={}
) as mock_cache_files:
yield mock_cache_files
@pytest.fixture(autouse=True)
def mock_init_cache_dir():
"""Mock the TTS cache dir in memory."""
with patch(
"homeassistant.components.tts._init_tts_cache_dir",
side_effect=lambda hass, cache_dir: hass.config.path(cache_dir),
) as mock_cache_dir:
yield mock_cache_dir
@pytest.fixture
def empty_cache_dir(tmp_path, mock_init_cache_dir, mock_get_cache_files, request):
"""Mock the TTS cache dir with empty dir."""
mock_init_cache_dir.side_effect = None
mock_init_cache_dir.return_value = str(tmp_path)
# Restore original get cache files behavior, we're working with a real dir.
mock_get_cache_files.side_effect = _get_cache_files
yield tmp_path
if request.node.rep_call.passed:
return
# Print contents of dir if failed
print("Content of dir for", request.node.nodeid)
for fil in tmp_path.iterdir():
print(fil.relative_to(tmp_path))
# To show the log.
assert False
@pytest.fixture(autouse=True)
def mutagen_mock():
"""Mock writing tags."""
with patch(
"homeassistant.components.tts.SpeechManager.write_tags",
side_effect=lambda *args: args[1],
):
yield
@pytest.fixture(autouse=True)
async def internal_url_mock(hass):
"""Mock internal URL of the instance."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
async def test_setup_component_demo(hass):
"""Set up the demo platform with defaults."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
assert hass.services.has_service(tts.DOMAIN, "demo_say")
assert hass.services.has_service(tts.DOMAIN, "clear_cache")
async def test_setup_component_demo_no_access_cache_folder(hass, mock_init_cache_dir):
"""Set up the demo platform with defaults."""
config = {tts.DOMAIN: {"platform": "demo"}}
mock_init_cache_dir.side_effect = OSError(2, "No access")
assert not await async_setup_component(hass, tts.DOMAIN, config)
assert not hass.services.has_service(tts.DOMAIN, "demo_say")
assert not hass.services.has_service(tts.DOMAIN, "clear_cache")
async def test_setup_component_and_test_service(hass, empty_cache_dir):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_config_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "de"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_config_language_special(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with extend language."""
import homeassistant.components.demo.tts as demo_tts
demo_tts.SUPPORT_LANGUAGES.append("en_US")
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "en_US"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_wrong_conf_language(hass):
"""Set up the demo platform and call service with wrong config."""
config = {tts.DOMAIN: {"platform": "demo", "language": "ru"}}
with assert_setup_component(0, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
async def test_setup_component_and_test_service_with_service_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
).is_file()
async def test_setup_component_test_service_with_wrong_service_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "lang",
},
blocking=True,
)
assert len(calls) == 0
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_lang_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_service_options(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {"voice": "alex", "age": 5},
},
blocking=True,
)
opt_hash = tts._hash_options({"voice": "alex", "age": 5})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== f"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_with_service_options_def(hass, empty_cache_dir):
"""Set up the demo platform and call service with default options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN), patch(
"homeassistant.components.demo.tts.DemoProvider.default_options",
new_callable=PropertyMock(return_value={"voice": "alex"}),
):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
opt_hash = tts._hash_options({"voice": "alex"})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== f"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_service_options_wrong(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with wrong options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {"speed": 1},
},
blocking=True,
)
opt_hash = tts._hash_options({"speed": 1})
assert len(calls) == 0
await hass.async_block_till_done()
assert not (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_base_url_set(hass):
"""Set up the demo platform with ``base_url`` set and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "base_url": "http://fnord"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID] == "http://fnord"
"/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491"
"_en_-_demo.mp3"
)
async def test_setup_component_and_test_service_clear_cache(hass, empty_cache_dir):
"""Set up the demo platform and call service clear cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
# To make sure the file is persisted
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
await hass.services.async_call(
tts.DOMAIN, tts.SERVICE_CLEAR_CACHE, {}, blocking=True
)
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_receive_voice(
hass, demo_provider, hass_client
):
"""Set up the demo platform and call service and receive voice."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
req = await client.get(relative_url(calls[0].data[ATTR_MEDIA_CONTENT_ID]))
_, demo_data = demo_provider.get_tts_audio("bla", "en")
demo_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3",
demo_data,
demo_provider,
"AI person is in front of your door.",
"en",
None,
)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_test_service_with_receive_voice_german(
hass, demo_provider, hass_client
):
"""Set up the demo platform and call service and receive voice."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "de"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
req = await client.get(relative_url(calls[0].data[ATTR_MEDIA_CONTENT_ID]))
_, demo_data = demo_provider.get_tts_audio("bla", "de")
demo_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3",
demo_data,
demo_provider,
"There is someone at the door.",
"de",
None,
)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_web_view_wrong_file(hass, hass_client):
"""Set up the demo platform and receive wrong file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
req = await client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_setup_component_and_web_view_wrong_filename(hass, hass_client):
"""Set up the demo platform and receive wrong filename from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/265944dsk32c1b2a621be5930510bb2cd_en_-_demo.mp3"
req = await client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_setup_component_test_without_cache(hass, empty_cache_dir):
"""Set up demo platform without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "cache": False}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_test_with_cache_call_service_without_cache(
hass, empty_cache_dir
):
"""Set up demo platform with cache and call service without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_CACHE: False,
},
blocking=True,
)
assert len(calls) == 1
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_test_with_cache_dir(
hass, empty_cache_dir, demo_provider
):
"""Set up demo platform with cache and call service without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
_, demo_data = demo_provider.get_tts_audio("bla", "en")
cache_file = (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
with patch(
"homeassistant.components.demo.tts.DemoProvider.get_tts_audio",
return_value=(None, None),
):
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
async def test_setup_component_test_with_error_on_get_tts(hass):
"""Set up demo platform with wrong get_tts_audio."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN), patch(
"homeassistant.components.demo.tts.DemoProvider.get_tts_audio",
return_value=(None, None),
):
assert await async_setup_component(hass, tts.DOMAIN, config)
async def test_setup_component_load_cache_retrieve_without_mem_cache(
hass, demo_provider, empty_cache_dir, hass_client
):
"""Set up component and load cache and get without mem cache."""
_, demo_data = demo_provider.get_tts_audio("bla", "en")
cache_file = (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
req = await client.get(url)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_web_get_url(hass, hass_client):
"""Set up the demo platform and receive file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_get_url"
data = {"platform": "demo", "message": "There is someone at the door."}
req = await client.post(url, json=data)
assert req.status == 200
response = await req.json()
assert response.get("url") == (
"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
async def test_setup_component_and_web_get_url_bad_config(hass, hass_client):
"""Set up the demo platform and receive wrong file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_get_url"
data = {"message": "There is someone at the door."}
req = await client.post(url, json=data)
assert req.status == 400
| StarcoderdataPython |
4881004 | import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
#---------------------------------------------------------
fd_out='./out/a00_tumor_01_plt-case'
f_in='./out/a00_tumor_00_clean/data_case.csv'
#--------------------------------------------------------
Path(fd_out).mkdir(exist_ok=True, parents=True)
#load
df=pd.read_csv(f_in, index_col=0)
df['side']=pd.Categorical(df['side'], categories=['None', 'Left', 'Right', 'Both'], ordered=True)
#--------------------------------------------------------
def plt_cnt(dfi, f_out, title=None, sz=(5,3), y='cnt', cmap='grey', ylbl='Count', ylim=[0, 320]):
sns.set()
sns.despine()
#plot
fig, ax=plt.subplots(figsize=sz)
ax=sns.barplot(x=dfi.index, y=y, data=dfi, color=cmap)
#adjust
ax.set_title(title, x=0.5, fontsize=14, weight='semibold', pad=10)
ax.set_xlabel('')
plt.ylabel(ylbl, fontsize=12, labelpad=10, weight='semibold')
plt.xticks(fontsize=11, rotation=0, weight='semibold')
plt.yticks(fontsize=8)
ax.tick_params(axis='x', which='major', pad=4)
ax.tick_params(axis='y', which='major', pad=-1)
plt.ylim(ylim)
#add text
for i in range(dfi.shape[0]):
text_x=i
text_y=dfi[y][i]+10
text=str(dfi[y][i])
plt.text(text_x, text_y, text, weight='semibold', fontsize=10, ha='center')
#save
plt.tight_layout()
plt.savefig(f_out, dpi=300)
plt.close()
return
##################################################################
#cnt side
dfi=df.copy()
dfi['cnt']=1
dfi=dfi.groupby('side').agg({'cnt': 'count'})
#plot
f_out=f'{fd_out}/cnt.png'
title='Case Numbers'
plt_cnt(dfi, f_out, title=title)
| StarcoderdataPython |
11390933 | from celery import Celery
from textblob import TextBlob
app = Celery('tasks', backend='amqp', broker='amqp://guest@localhost//')
@app.task
def sentiment(row):
blob = TextBlob(row[3]) # tweet text
return row[0], blob.sentiment.polarity
| StarcoderdataPython |
4892464 | # -*- coding:utf-8 -*-
# UerMiniAPP的路由
from flask import request
from app import app
from . import database
import pymysql
import json
@app.route('/signIn', methods=['POST', 'GET'])
def signIn():
'''登陆'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
upswd = request.args.get("Upswd")
# 数据库
sql = "SELECT UID FROM User WHERE UID='%s' AND Upswd='%s'" % (uid, upswd)
try:
cursor.execute(sql)
data = cursor.fetchall()
# 如果查询到数据
if data:
j = '{"result": true}'
return j
else:
j = '{"result": false}'
return j
finally:
database.close_mysql(db, cursor)
@app.route('/signUp', methods=['POST', 'GET'])
def signUp():
'''注册'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
upswd = request.args.get("Upswd")
# 数据库
sql = "SELECT UID FROM User WHERE UID='%s'" % (uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
# 如果查询到数据
if data: # 已注册
j = '{"result": false}'
return j
else:
sql = "INSERT INTO User (UID, Upswd, Balance) VALUES ('%s', '%s', '%s')" % (
uid, upswd, '0')
cursor.execute(sql)
db.commit()
j = '{"result": true}'
return j
finally:
database.close_mysql(db, cursor)
@app.route('/getLot', methods=['POST', 'GET'])
def getLot():
'''获取车位库存'''
db, cursor = database.connect_mysql()
# 数据库
sql = "SELECT Num FROM Lot WHERE LID='MAIN'"
try:
cursor.execute(sql) # 执行
data = cursor.fetchall() # 获取查询结果
if data:
fields = cursor.description # 获取字段名
result = {}
result[fields[0][0]] = data[0][0]
j = json.dumps(result) # 转为json
return j
else:
return "false"
finally:
database.close_mysql(db, cursor)
@app.route('/getLotCar', methods=['POST', 'GET'])
def getLotCar():
'''获取当前用户停车信息'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
# 数据库
sql = "SELECT CID,Intime FROM LotCar WHERE CID=(SELECT CID FROM UserCar WHERE UID='%s')" % (
uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
if data:
fields = cursor.description # 获取字段名
result = {}
result[fields[0][0]] = data[0][0]
result[fields[1][0]] = str(data[0][1])
j = json.dumps(result) # 转为json
return j
else:
return "false"
finally:
database.close_mysql(db, cursor)
@app.route('/getRecord', methods=['POST', 'GET'])
def getRecord():
'''获取交易记录'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
# 数据库
sql = "SELECT CID,Intime,Outtime,Cost FROM Record WHERE UID='%s'" % (uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
if data:
fields = cursor.description # 获取字段名
name = []
for i in fields:
name.append(i[0])
results = []
for i in data:
result = {}
result[fields[0][0]] = i[0]
result[fields[1][0]] = str(i[1])
result[fields[2][0]] = str(i[2])
result[fields[3][0]] = i[3]
results.append(result)
j = json.dumps(results)
return j
else:
return "false"
finally:
database.close_mysql(db, cursor)
@app.route('/getUserCar', methods=['POST', 'GET'])
def getUserCar():
'''获取当前用户绑定车牌号'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
# 数据库
sql = "SELECT CID FROM UserCar WHERE UID='%s'" % (uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
if data:
fields = cursor.description # 获取字段名
result = {}
result[fields[0][0]] = data[0][0]
j = json.dumps(result) # 转为json
return j
else:
return "false"
finally:
database.close_mysql(db, cursor)
@app.route('/getUser', methods=['POST', 'GET'])
def getUser():
'''获取当前用户信息'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
# 数据库
sql = "SELECT Uname,Uphone,Balance FROM User WHERE UID='%s'" % (uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
if data:
fields = cursor.description # 获取字段名
result = {}
result[fields[0][0]] = data[0][0]
result[fields[1][0]] = data[0][1]
result[fields[2][0]] = data[0][2]
j = json.dumps(result) # 转为json
return j
else:
return "false"
finally:
database.close_mysql(db, cursor)
@app.route('/changeCar', methods=['POST', 'GET'])
def changeCar():
'''修改车牌号'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
cid = request.args.get("CID")
if cid == "undefined": # 如果没有则删除
sql = "DELETE FROM UserCar WHERE (UID = '%s')" % (uid)
try:
cursor.execute(sql)
db.commit()
j = '{"result": true}'
return j
finally:
cursor.close()
db.close()
else:
sql = "SELECT CID FROM LotCar WHERE CID = (SELECT CID FROM UserCar WHERE UID = '%s')" % (
uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
if data: # 正在车库
j = '{"result": false}'
return j
else:
# 查询车牌号是否存在
sql = "SELECT CID FROM UserCar WHERE CID='%s'" % (cid)
cursor.execute(sql)
data = cursor.fetchall()
if data: # 已存在
j = '{"result": false}'
return j
else:
sql = "SELECT UID FROM UserCar WHERE UID='%s'" % (uid)
cursor.execute(sql)
data = cursor.fetchall()
# 如果查询到数据
if data: # 已绑定,改变
sql = "UPDATE UserCar SET CID = '%s' WHERE (UID = '%s')" % (
cid, uid)
cursor.execute(sql)
db.commit()
else: # 未绑定,添加
sql = "INSERT INTO UserCar (UID, CID) VALUES ('%s', '%s')" % (
uid, cid)
cursor.execute(sql)
db.commit()
j = '{"result": true}'
return j
finally:
cursor.close()
db.close()
@app.route('/changeInfo', methods=['POST', 'GET'])
def changeInfo():
'''修改用户信息'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
uname = request.args.get("Uname")
uphone = request.args.get("Uphone")
if uname == "undefined":
uname = ""
if uphone == "undefined":
uphone = ""
sql = "UPDATE User SET Uname = '%s', Uphone = '%s' WHERE (UID = '%s')" % (
uname, uphone, uid)
try:
cursor.execute(sql)
db.commit()
finally:
database.close_mysql(db, cursor)
@app.route('/charge', methods=['POST', 'GET'])
def charge():
'''修改余额(充值)'''
db, cursor = database.connect_mysql()
# 获取json数据
uid = request.args.get("UID")
balance = request.args.get("Balance")
if balance == "undefined":
balance = 0
sql = "SELECT Balance FROM User WHERE (UID = '%s')" % (uid)
try:
cursor.execute(sql)
data = cursor.fetchall()
if data:
balance = float(balance) + float(data[0][0])
sql = "UPDATE User SET Balance = '%f' WHERE (UID = '%s')" % (
balance, uid)
cursor.execute(sql)
db.commit()
finally:
database.close_mysql(db, cursor)
| StarcoderdataPython |
6654953 | # -*- coding: utf-8 -*-
"""
colMulti.py
Author: SMFSW
Copyright (c) 2016-2021 SMFSW
"""
from colorConv import *
from colorConvTemperature import TEMPtoYxy
from CIEobs import * # CIE parameters class
from colRGB import ColRGB
from colRGBW import ColRGBW
from colRGBDim import ColRGBDim
from colHSL import ColHSL
from colHSV import ColHSV
from colHWB import ColHWB
from colXYZ import ColXYZ
from colYxy import ColYxy
from colHunterLab import ColHunterLab
from colCIELab import ColCIELab
from colCIELCHab import ColCIELCHab
from colCIELuv import ColCIELuv
from colCIELCHuv import ColCIELCHuv
from colCMY import ColCMY
from colCMYK import ColCMYK
from colHEX import ColHEX
from colYUV import ColYUV
from colYIQ import ColYIQ
from colYCbCr import ColYCbCr
from colYDbDr import ColYDbDr
from colYCoCg import ColYCoCg
from colYCC import ColYCC
from colNamedColours import ColHTMLrestricted, ColCSS
from colWebSafe import ColWebSafe
from colBlackBody import ColBlackBody
from colRAL import ColRAL
from colPantone import ColPantone
# TODO: find how to use colNamedColours, ColWebSafe, ColRAL & colPantone imports
from refsTools import RefColorSet
class Color(object):
""" Color class """
# TODO: reconstruct list at init getting method names and removing _from
lcolspace_type = ['RGB', 'RGBW', 'RGBDim',
'HSL', 'HSV', 'HWB', 'NCS',
'CMY', 'CMYK',
'HEX',
'YUV', 'YIQ', 'YCbCr', 'YDbDr',
'YCoCg', 'YCC',
'XYZ', 'Yxy', 'RAL',
'CIELab', 'CIELCHab', 'CIELuv', 'CIELCHuv', 'HunterLab',
'Pantone', 'HTMLrestricted', 'CSS', 'WebSafe',
'Ncol', 'Temp', 'BlackBody']
lGamma = ['1.0', '1.8', '2.2', 'sRGB', 'L*']
def __init__(self, ctype='RGB', *col, **kwargs):
""" Init self following type """
self.RGBSpace = kwargs['rgb_space'] if 'rgb_space' in kwargs and isinstance(kwargs['rgb_space'], str) else 'sRGB'
self.type = 'Multi'
# self.args = ctype, col # keep former type & args for reference
self.RGB = ColRGB(**kwargs) # RGB instance
self.RGBW = ColRGBW(**kwargs) # RGBW instance
self.RGBDim = ColRGBDim(**kwargs) # RGBDim instance
self.HSL = ColHSL(**kwargs) # HSL instance
self.HSV = ColHSV(**kwargs) # HSV instance
self.HWB = ColHWB(**kwargs) # HWB instance (can represent NCS space)
self.CMY = ColCMY(**kwargs) # CMY instance
self.CMYK = ColCMYK(**kwargs) # CMYK instance
self.HEX = ColHEX(**kwargs) # HEX instance
self.YUV = ColYUV(**kwargs) # YUV instance
self.YIQ = ColYIQ(**kwargs) # YIQ instance
self.YCbCr = ColYCbCr(**kwargs) # YCbCr instance
self.YDbDr = ColYDbDr(**kwargs) # YDbDr instance
self.YCoCg = ColYCoCg(**kwargs) # YCoCg instance
self.YCC = ColYCC(**kwargs) # YCC instance
self.XYZ = ColXYZ(**kwargs) # XYZ instance
self.CIELab = ColCIELab(**kwargs) # CIE-L*ab instance
self.CIELCHab = ColCIELCHab(**kwargs) # CIE-L*CH°ab instance
self.CIELuv = ColCIELuv(**kwargs) # CIE-L*uv instance
self.CIELCHuv = ColCIELCHuv(**kwargs) # CIE-L*CH°ab instance
self.HunterLab = ColHunterLab(**kwargs) # Hunter-L*ab instance
self.Yxy = ColYxy(**kwargs) # Yxy instance
self.RAL = None # reserved for RAL instance (includes Yxy space)
self.BlackBody = None # reserved for BlackBody instance (includes Yxy space)
self.Pantone = None # reserved for Pantone instance (includes RGB space)
self.HTMLrestricted = None # reserved for HTMLrestricted instance (includes HEX space)
self.CSS = None # reserved for CSS instance (includes HEX space)
self.WebSafe = None # reserved for WebSafe instance (includes HEX space)
# some other params as hue,dim,Ncol can be added in class
self.Temp = 0
self.Ncol = ''
self.WhiteRef = RefColorSet.ref_ColorSet.get(self.RGBSpace)[12]
self.Gamma = RefColorSet.ref_ColorSet.get(self.RGBSpace)[11]
self.Adaptation = 'None'
self.observer = CIEObs(illum=self.WhiteRef)
# TODO: add a class function to change observer if default observer doesn't fit
# print("Observer reference: {}, {}".format(self.observer.ref_variant, self.observer.ref_illum))
# print("XYZ reference: {}, {}, {}".format(self.observer.ref_X, self.observer.ref_Y, self.observer.ref_Z))
# print("uv reference: {}, {}".format(self.observer.ref_U, self.observer.ref_V))
# print("")
self.dcolspace_type = dict(zip(range(len(self.lcolspace_type)), self.lcolspace_type)) # make dict from fields list
self.refs = lambda: [vars(self)[var] for var in self.lcolspace_type] # make list from color space members
try:
self._set(ctype, *col, **kwargs) # init with given params
except ValueError:
self._set('RGB', 0, 0, 0, **kwargs)
def get(self, ctype='RGB'):
""" Get color following type """
if ctype in self.lcolspace_type:
if ctype == 'NCS': # special case as Natural Color is included in HWB space
return self.Ncol, self.HWB.W, self.HWB.B
return vars(self)[ctype] # return desired space from self
def set(self, ctype, *col, **kwargs):
""" Set a new color following type and convert to other different spaces
:param ctype: color type (string)
:param col:
*col: Values for color type
:param kwargs:
**rgb_space (str): RGB working space """
if 'rgb_space' in kwargs and isinstance(kwargs['rgb_space'], str):
self.__init__(ctype, *col, **kwargs)
else:
self._set(ctype, *col, **kwargs)
def _set(self, ctype, *col, **kwargs):
""" Internal method to set a new color following type and convert to other different spaces """
self._rst_isolated_spaces()
if ctype in self.lcolspace_type:
method = getattr(self, '_from' + ctype) # get attribute from reconstructed method name
method(*col, **kwargs) # launch method passing color arguments
else:
print("Type given not recognized: {}".format(self.type))
def _rst_isolated_spaces(self):
self.RAL = None # reserved for RAL instance
self.BlackBody = None # reserved for BlackBody instance
self.Pantone = None # reserved for Pantone instance
self.HTMLrestricted = None # reserved for HTMLrestricted instance
self.CSS = None # reserved for CSS instance
self.WebSafe = None # reserved for WebSafe instance
# Internal commmon isolated conversions
def _toRGBDerivedStandards(self, *args, **kwargs):
""" update color spaces from RGB space datas to Derived standards spaces
:return: updated self """
self.RGBW.fromRGB(self.RGB)
self.RGBDim.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HSV.fromRGB(self.RGB)
self.HWB.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self.HEX.fromRGB(self.RGB)
def _toCIEStandards(self, *args, **kwargs):
""" update color spaces from RGB space datas to CIE standards
:return: updated self """
self.XYZ.fromRGB(self.RGB)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELab.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.CIELuv.fromXYZ(self.XYZ)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.HunterLab.fromXYZ(self.XYZ)
def _toTVStandards(self, *args, **kwargs):
""" update color spaces from RGB space datas to TV standards spaces
:return: updated self """
self.YUV.fromRGB(self.RGB)
self.YIQ.fromRGB(self.RGB)
self.YCbCr.fromRGB(self.RGB)
self.YDbDr.fromRGB(self.RGB)
self.YCoCg.fromRGB(self.RGB)
self.YCC.fromRGB(self.RGB)
def _toPrintersStandards(self, *args, **kwargs):
""" update color spaces from RGB space datas to Printers standards spaces
:return: updated self """
self.CMY.fromRGB(self.RGB)
self.CMYK.fromCMY(self.CMY)
# Conversions
def _fromRGB(self, *col, **kwargs):
""" update color spaces from RGB space datas
:param col: either RGB tuple or ColRGB class
:return: updated self """
self.RGB = ColRGB(*col, **kwargs)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromRGBW(self, *col, **kwargs):
""" update color spaces from RGBW space datas
:param col: either RGBW tuple or ColRGBW class
:return: updated self """
self.RGBW = ColRGBW(*col, **kwargs)
self.RGB.fromRGBW(self.RGBW)
self.RGBDim.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HSV.fromRGB(self.RGB)
self.HWB.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self.HEX.fromRGB(self.RGB)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromRGBDim(self, *col, **kwargs):
""" update color spaces from RGBDim space datas
:param col: either RGBDim tuple or ColRGBDim class
:return: updated self """
self.RGBDim = ColRGBDim(*col, **kwargs)
self.HSV.fromRGBDim(self.RGBDim)
self.RGB.fromHSV(self.HSV)
self.RGBW.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HWB.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self.HEX.fromRGB(self.RGB)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromHSL(self, *col, **kwargs):
""" update color spaces from HSL space datas
:param col: either HSL tuple or ColHSL class
:return: updated self """
self.HSL = ColHSL(*col, **kwargs)
self.RGB.fromHSL(self.HSL)
self.RGBW.fromRGB(self.RGB)
self.RGBDim.fromRGB(self.RGB)
self.HSV.fromRGB(self.RGB)
self.HWB.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self.HEX.fromRGB(self.RGB)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromHSV(self, *col, **kwargs):
""" update color spaces from HSV space datas
:param col: either HSV tuple or ColHSL class
:return: updated self """
self.HSV = ColHSV(*col, **kwargs)
self.RGB.fromHSV(self.RGB)
self.RGBW.fromRGB(self.RGB)
self.RGBDim.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HWB.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self.HEX.fromRGB(self.RGB)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromHWB(self, *col, **kwargs):
""" update color spaces from HWB space datas
:param col: either HWB tuple or ColHWB class
:return: updated self """
self.HWB = ColHWB(*col, **kwargs)
self.Ncol = HUEtoNCOL(self.HWB.H)
self.RGB.fromHWB(self.HWB)
self.RGBW.fromRGB(self.RGB)
self.RGBDim.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HSV.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self.HEX.fromRGB(self.RGB)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromNCS(self, *col, **kwargs):
""" update color spaces from NCS space datas
:param col: either NCS tuple or ColHWB class
:return: updated self """
self.HWB.Ncol, self.HWB.W, self.HWB.B = col
self.Ncol = self.HWB.Ncol
self.HWB.fromNCS(self.HWB)
self.RGB.fromHWB(self.HWB)
self.RGBW.fromRGB(self.RGB)
self.RGBDim.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HSV.fromRGB(self.RGB)
self.HEX.fromRGB(self.RGB)
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromHEX(self, *col, **kwargs):
""" update color spaces from HEX space datas
:param col: either HEX string or ColHEX class
:return: updated self """
self.HEX = ColHEX(*col, **kwargs)
self.RGB.fromHEX(self.HEX)
self.RGBW.fromRGB(self.RGB)
self.RGBDim.fromRGB(self.RGB)
self.HSL.fromRGB(self.RGB)
self.HSV.fromRGB(self.RGB)
self.HWB.fromRGB(self.RGB)
self.Ncol = self.HWB.Ncol
self._toCIEStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromCMY(self, *col, **kwargs):
""" update color spaces from CMY space datas
:param col: either CMY tuple or ColCMY class
:return: updated self """
self.CMY = ColCMY(*col, **kwargs)
self.CMYK.fromCMY(self.CMY)
self.RGB.fromCMY(self.CMY)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toTVStandards(self)
return self
def _fromCMYK(self, *col, **kwargs):
""" update color spaces from CMYK space datas
:param col: either CMYK tuple or ColCMYK class
:return: updated self """
self.CMYK = ColCMYK(*col, **kwargs)
self.CMY.fromCMYK(self.CMYK)
self.RGB.fromCMY(self.CMY)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toTVStandards(self)
return self
def _fromYUV(self, *col, **kwargs):
""" update color spaces from YUV space datas
:param col: either YUV tuple or ColYUV class
:return: updated self """
self.YUV = ColYUV(*col, **kwargs)
self.RGB.fromYUV(self.YUV)
self.YDbDr.fromYUV(self.YUV)
self.YIQ.fromRGB(self.RGB)
self.YCbCr.fromRGB(self.RGB)
self.YCoCg.fromRGB(self.RGB)
self.YCC.fromRGB(self.RGB)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toPrintersStandards(self)
return self
def _fromYIQ(self, *col, **kwargs):
""" update color spaces from YIQ space datas
:param col: either YIQ tuple or ColYIQ class
:return: updated self """
self.YIQ = ColYIQ(*col, **kwargs)
self.RGB.fromYIQ(self.YIQ)
self.YUV.fromRGB(self.RGB)
self.YCbCr.fromRGB(self.RGB)
self.YDbDr.fromRGB(self.RGB)
self.YCoCg.fromRGB(self.RGB)
self.YCC.fromRGB(self.RGB)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toPrintersStandards(self)
return self
def _fromYCbCr(self, *col, **kwargs):
""" update color spaces from YCbCr space datas
:param col: either YCbCr tuple or ColYCbCr class
:return: updated self """
self.YCbCr = ColYCbCr(*col, **kwargs)
self.RGB.fromYCbCr(self.YCbCr)
self.YUV.fromRGB(self.RGB)
self.YIQ.fromRGB(self.RGB)
self.YDbDr.fromRGB(self.RGB)
self.YCoCg.fromRGB(self.RGB)
self.YCC.fromRGB(self.RGB)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toPrintersStandards(self)
return self
def _fromYDbDr(self, *col, **kwargs):
""" update color spaces from YDbDr space datas
:param col: either YDbDr tuple or ColYDbDr class
:return: updated self """
self.YDbDr = ColYDbDr(*col, **kwargs)
self.RGB.fromYDbDr(self.YDbDr)
self.YUV.fromYDbDr(self.YDbDr)
self.YIQ.fromRGB(self.RGB)
self.YCbCr.fromRGB(self.RGB)
self.YCoCg.fromRGB(self.RGB)
self.YCC.fromRGB(self.RGB)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toPrintersStandards(self)
return self
def _fromYCoCg(self, *col, **kwargs):
""" update color spaces from YCoCg space datas
:param col: either YCoCg tuple or ColYCoCg class
:return: updated self """
self.YCoCg = ColYCoCg(*col, **kwargs)
self.RGB.fromYCoCg(self.YCoCg)
self.YUV.fromRGB(self.RGB)
self.YIQ.fromRGB(self.RGB)
self.YCbCr.fromRGB(self.RGB)
self.YDbDr.fromRGB(self.RGB)
self.YCC.fromRGB(self.RGB)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toPrintersStandards(self)
return self
def _fromYCC(self, *col, **kwargs):
""" update color spaces from YCC space datas
:param col: either YCC tuple or ColYCC class
:return: updated self """
self.YCC = ColYCC(*col, **kwargs)
self.RGB.fromYCC(self.YCC)
self.YUV.fromRGB(self.RGB)
self.YIQ.fromRGB(self.RGB)
self.YCbCr.fromRGB(self.RGB)
self.YDbDr.fromRGB(self.RGB)
self.YCoCg.fromRGB(self.RGB)
self._toRGBDerivedStandards(self)
self._toCIEStandards(self)
self._toPrintersStandards(self)
return self
def _fromXYZ(self, *col, **kwargs):
""" update color spaces from XYZ space datas
:param col: either XYZ tuple or ColXYZ class
:return: updated self """
self.XYZ = ColXYZ(*col, **kwargs)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELab.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.CIELuv.fromXYZ(self.XYZ)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.HunterLab.fromXYZ(self.XYZ)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromYxy(self, *col, **kwargs):
""" update color spaces from Yxy space datas
:param col: either Yxy tuple or ColYxy class
:return: updated self """
self.Yxy = ColYxy(*col, **kwargs)
self.XYZ.fromYxy(self.Yxy)
self.Temp = self.XYZ.colorTemp
self.CIELab.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.CIELuv.fromXYZ(self.XYZ)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.HunterLab.fromXYZ(self.XYZ)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromHunterLab(self, *col, **kwargs):
""" update color spaces from Hunter-L*ab space datas
:param col: either Hunter-L*ab tuple or ColHunterLab class
:return: updated self """
self.HunterLab = ColHunterLab(*col, **kwargs)
self.XYZ.fromHunterLab(self.HunterLab)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELab.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.CIELuv.fromXYZ(self.XYZ)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromCIELab(self, *col, **kwargs):
""" update color spaces from CIE-L*ab space datas
:param col: either CIE-L*ab tuple or ColCIELab class
:return: updated self """
self.CIELab = ColCIELab(*col, **kwargs)
self.XYZ.fromCIELab(self.CIELab)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.CIELuv.fromXYZ(self.XYZ)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.HunterLab.fromXYZ(self.XYZ)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromCIELuv(self, *col, **kwargs):
""" update color spaces from CIE-L*uv space datas
:param col: either CIE-L*uv tuple or ColCIELuv class
:return: updated self """
self.CIELuv = ColCIELuv(*col, **kwargs)
self.XYZ.fromCIELuv(self.CIELuv)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELab.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.HunterLab.fromXYZ(self.XYZ)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromCIELCHab(self, *col, **kwargs):
""" update color spaces from CIE-L*CH°ab space datas
:param col: either CIE-L*CH°ab tuple or ColCIELCHab class
:return: updated self """
self.CIELCHab = ColCIELCHab(*col, **kwargs)
self.CIELab.fromCIELCHab(self.CIELCHab)
self.XYZ.fromCIELab(self.CIELab)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELuv.fromXYZ(self.XYZ)
self.CIELCHuv.fromCIELuv(self.CIELuv)
self.HunterLab.fromXYZ(self.XYZ)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
def _fromCIELCHuv(self, *col, **kwargs):
""" update color spaces from CIE-L*CH°ab space datas
:param col: either CIE-L*CH°ab tuple or ColCIELCHab class
:return: updated self """
self.CIELCHuv = ColCIELCHuv(*col, **kwargs)
self.CIELuv.fromCIELCHuv(self.CIELCHuv)
self.XYZ.fromCIELuv(self.CIELuv)
self.Temp = self.XYZ.colorTemp
self.Yxy.fromXYZ(self.XYZ)
self.CIELab.fromXYZ(self.XYZ)
self.CIELCHab.fromCIELab(self.CIELab)
self.HunterLab.fromXYZ(self.XYZ)
self.RGB.fromXYZ(self.XYZ)
self._toRGBDerivedStandards(self)
self._toTVStandards(self)
self._toPrintersStandards(self)
return self
# Isolated conversions
def _fromBlackBody(self, *col, **kwargs):
""" update color spaces from Black Body temperature datas
:param col: Black Body temperature in Kelvins
:return: updated self """
self.BlackBody = ColBlackBody(*col, **kwargs)
self._fromYxy(*self.BlackBody.refs()) # refs is inherited from Yxy class and should return Yxy list
# TODO: update WebSafe from other spaces (if applicable)
return self
def _fromTemp(self, *col, **kwargs):
""" update color spaces from Temperature in kelvins
:param col: White temperature in Kelvins
:return: updated self """
# TODO: handle exception when Temperature is outside range?
self._fromYxy(*TEMPtoYxy(col[0])) # Col is usually a Tuple
return self
def _fromRAL(self, *col, **kwargs):
""" update color spaces from HEX space datas
:param col: either RAL string or ColRAL class
:return: updated self """
self.RAL = ColRAL(*col, **kwargs)
self._fromCIELab(self.RAL)
# TODO: update RAL from other spaces (if applicable)
return self
def _fromPantone(self, *col, **kwargs):
""" update color spaces from Pantone space datas
:param col: either Pantone color code string or ColPantone class
:return: updated self """
self.Pantone = ColPantone(*col, **kwargs)
self._fromRGB(*self.Pantone.refs()) # refs is inherited from RGB class and should return RGB list
# TODO: update Pantone from other spaces (if applicable)
return self
def _fromHTMLrestricted(self, *col, **kwargs):
""" update color spaces from HEX space datas
:param col: either HTMLrestricted color name string or ColHTMLrestricted class
:return: updated self """
self.HTMLrestricted = ColHTMLrestricted(*col, **kwargs)
self._fromHEX(self.HTMLrestricted.HEX)
# TODO: update HTMLrestricted from other spaces (if applicable)
return self
def _fromCSS(self, *col, **kwargs):
""" update color spaces from HEX space datas
:param col: either CSS color name string or ColCSS class
:return: updated self """
self.CSS = ColCSS(*col, **kwargs)
self._fromHEX(self.CSS.HEX)
# TODO: update CSS from other spaces (if applicable)
return self
def _fromWebSafe(self, *col, **kwargs):
""" update color spaces from HEX space datas
:param col: either WebSafe string or ColWebSafe class
:return: updated self """
self.WebSafe = ColWebSafe(*col, **kwargs)
self._fromHEX(self.WebSafe.HEX)
# TODO: update WebSafe from other spaces (if applicable)
return self
# Common methods
def print_all(self):
""" print all color spaces from class in a human readable form """
print("RGB\t\t\t{}".format(self.get('RGB')))
print("RGBW\t\t{}".format(self.get('RGBW')))
print("RGBDim\t\t{}".format(self.get('RGBDim')))
print("HSL\t\t\t{}".format(self.get('HSL')))
print("HSV\t\t\t{}".format(self.get('HSV')))
print("HWB\t\t\t{} {}".format(self.get('HWB'), self.get('Ncol')))
print("CMY\t\t\t{}".format(self.get('CMY')))
print("CMYK\t\t{}".format(self.get('CMYK')))
print("HEX\t\t\t{}".format(self.get('HEX')))
print("YUV\t\t\t{}".format(self.get('YUV')))
print("YIQ\t\t\t{}".format(self.get('YIQ')))
print("YCbCr\t\t{}".format(self.get('YCbCr')))
print("YDbDr\t\t{}".format(self.get('YDbDr')))
print("YCoCg\t\t{}".format(self.get('YCoCg')))
print("YCC\t\t\t{}".format(self.get('YCC')))
print("XYZ\t\t\t{} {}K".format(self.get('XYZ'), self.get('Temp')))
print("Yxy\t\t\t{}".format(self.get('Yxy')))
print("CIELuv\t\t{}".format(self.get('CIELuv')))
print("CIELCHuv\t{}".format(self.get('CIELCHuv')))
print("CIELab\t\t{}".format(self.get('CIELab')))
print("CIELCHab\t{}".format(self.get('CIELCHab')))
print("HunterLab\t{}".format(self.get('HunterLab')))
print("")
if __name__ == "__main__":
c = Color()
# c.set('RGB', 10, 150, 20)
# c.print_all()
print("6504K temperature conversion:")
c.set('Temp', 6504)
c.print_all()
print("6504K XYZ conversion (taken from XYZ conversion value from previous test):")
c.set('XYZ', 93.93442544360434, 99.04590353038346, 104.59307451742508)
c.print_all()
print("5000K temperature conversion:")
c.set('Temp', 5000)
c.print_all()
print("RGB White conversion:")
c.set('RGB', 255, 255, 255)
c.print_all()
c.set('NCS', 'G6', 4, 41)
print(c.get('RGB'))
print(c.get('NCS'))
| StarcoderdataPython |
5048065 | <gh_stars>0
import pytest
import numpy as np
import torch
import random
import gym
from gym.envs.registration import register, registry
from tests.fixtures.net.a2c import net
from prop.algorithms.a2c import Agent
@pytest.fixture
def agent(net):
env_id = 'TestEnv-v0'
if env_id not in [spec.id for spec in registry.all()]:
register(
id=env_id,
entry_point='tests.fixtures.env:Env',
reward_threshold=1.5,
)
env = gym.make(env_id, obs_size=9, n_actions=3)
return Agent(
env=env,
net=net)
def test_run_episode(agent):
ep_reward, step_rewards, saved_actions, entropy = agent.run_episode()
assert isinstance(ep_reward, np.integer)
assert isinstance(step_rewards, list) and len(step_rewards) > 0
assert ep_reward == sum(step_rewards)
assert isinstance(saved_actions, list)
assert isinstance(entropy, torch.Tensor)
def test_evaluate_policy(agent):
running_reward = 0
stop, avg_rewards = agent.evaluate_policy(running_reward)
def test_select_action(agent):
obs = agent.env.reset()
obs = torch.FloatTensor(obs)
saved_actions = []
n_tests = 1000
for step_n in range(n_tests):
action, action_dist, dist_entropy = agent.select_action(
state=obs,
legal_actions=agent.env.legal_actions,
saved_actions=saved_actions)
assert action in range(agent.env.action_space_n)
assert len(action_dist) == agent.env.action_space_n
# action distribution sums up to 1 (up to 1e-3 accuracy)
assert abs(sum(action_dist) - 1) < 1e-3
# all probabilities >= 0
assert [n>=0 for n in action_dist]
assert len(saved_actions) == n_tests
def test_calculate_returns(agent):
step_rewards = [-0.1, -0.1, 1]
rewards = agent.calculate_returns(step_rewards)
r2 = step_rewards[-1] + 0 * agent.discount
r1 = step_rewards[-2] + r2 * agent.discount
r0 = step_rewards[-3] + r1 * agent.discount
assert rewards == [r0, r1, r2]
def test_standardize_returns(agent):
returns = [0.7811, 0.89, 1.0]
std_returns = agent.standardize_returns(returns)
# mean is 0 (+/- fractions)
assert abs(std_returns.mean() - 0) < 1e-3
# std is 1 (+/- fractions)
assert abs(std_returns.std() - 1.0) < 1e-3 | StarcoderdataPython |
5111884 | from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
def __str__(self):
return self.first_name + ' ' + self.last_name
class Illustrator(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
def __str__(self):
return self.first_name + ' ' + self.last_name
class Book(models.Model):
name = models.CharField(max_length=255)
alias_name = models.CharField(max_length=255, blank=True)
pages = models.PositiveSmallIntegerField()
author = models.ForeignKey(Author, on_delete=models.CASCADE)
illustrator = models.ForeignKey(Illustrator, on_delete=models.CASCADE)
# icon = models.ImageField()
description = models.CharField(max_length=1000)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
| StarcoderdataPython |
9680558 | <reponame>Rapen/RapSocket
from RapSocket import RapSocket
socket = RapSocket()
socket.connect('http://localhost', 1234)
| StarcoderdataPython |
5007503 | # Generated by Django 4.0.3 on 2022-03-29 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0008_homepage_banners'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='featured_articles',
field=models.TextField(blank=True, default=15, help_text='featured articles', null=True),
),
]
| StarcoderdataPython |
4968390 | <filename>examples/avro/py/simulate_cc.py
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import sys
import os
import shutil
import tempfile
from pydoop.mapreduce.simulator import HadoopSimulatorNetwork
from pydoop.mapreduce.pipes import InputSplit
import pydoop.test_support as pts
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT = os.path.join(THIS_DIR, os.pardir)
WD = tempfile.mkdtemp(prefix="pydoop_")
def cp_script(script):
dest = os.path.join(WD, os.path.basename(script))
with open(script) as f, open(dest, "w") as fo:
fo.write(pts.set_python_cmd(f.read()))
os.chmod(dest, 0o755)
return dest
def main(argv):
try:
data_in = argv[1]
except IndexError:
sys.exit("Usage: python %s AVRO_FILE" % argv[0])
shutil.copy(os.path.join(PARENT, 'schemas', 'stats.avsc'), 'stats.avsc')
program_name = cp_script(os.path.join(THIS_DIR, 'avro_pyrw.py'))
path = os.path.realpath(data_in)
length = os.stat(path).st_size
input_split = InputSplit.to_string('file://' + path, 0, length)
out_path = os.path.realpath('.')
conf = {
"mapreduce.task.partition": "0",
"mapreduce.task.output.dir": 'file://%s' % out_path,
}
hsn = HadoopSimulatorNetwork(program=program_name)
hsn.run(None, None, conf, input_split=input_split)
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
5030628 | <reponame>natcap/opal
import os
import json
import hashlib
import logging
from types import UnicodeType
from types import DictType
import multiprocessing
import shutil
import random
import tempfile
import sys
import distutils.sysconfig
import zipfile
from osgeo import gdal
from osgeo import ogr
from natcap.invest.sdr import sdr
from natcap.invest.nutrient import nutrient
from natcap.invest.carbon import carbon_combined as carbon
import pygeoprocessing
import numpy
import scipy
import preprocessing
import utils
LOGGER = logging.getLogger('natcap.opal.static_maps')
NODATA = 999999.0
COLOMBIA_BARE_LUCODE = 301
COLOMBIA_PAVED_LUCODE = 89
MODELS = {
'carbon': {
'module': carbon,
'landcover_key': 'lulc_cur_uri',
'target_raster': os.path.join('output', 'tot_C_cur.tif'),
'watersheds_key': None,
},
'sediment': {
'module': sdr,
'landcover_key': 'lulc_uri',
'target_raster': os.path.join('output', 'sed_export.tif'),
'watersheds_key': 'watersheds_uri',
},
'nutrient': {
'module': nutrient,
'landcover_key': 'lulc_uri',
'target_raster': os.path.join('output', 'n_export.tif'),
'watersheds_key': 'watersheds_uri',
}
}
def _write_future_json(workspace, future_type):
"""Write the future type to a json object at workspace/future_type.json.
workspace - a URI to the static maps workspace.
future_type - a string indicating the future type. One of 'protection'
or 'restoration'
Returns nothing."""
json_uri = os.path.join(workspace, 'future_type.json')
json.dumps({'future_type': future_type}, open(json_uri, 'w'), indent=4,
sort_keys=True)
def execute(args):
"""Entry point for generating static sediment maps.
args - a python dictionary with the following attributes:
workspace_dir (required)
landuse_uri (required)
landcover_code (required)
future_type (required) - either 'protection' or 'restoration'
model_name (required) - either 'carbon' or 'sediment'
do_parallelism (optional) - Boolean. Assumed to be False.
fut_landuse_uri (optional) - URI. If not present, a future
landcover scenario will not be calculated.
If model_name is 'sediment', these keys may be provided. If they
are not provided, default values will be assumed.
dem_uri (optional)
erosivity_uri (optional)
erodibility_uri(optional)
watersheds_uri (optional)
biophysical_table_uri (optional)
threshold_flow_accumulation (optional)
slope_threshold (optional)
sediment_threshold_table_uri (optional)
If model_name is either 'sediment' or 'nutrient', the following
is optional:
num_simulations - an int indicating the number of impact sites
that should be simulated per watershed. If this key is not
provided, static map quality estimates will be skipped.
"""
for key in ['workspace_dir', 'landuse_uri', 'paved_landcover_code',
'bare_landcover_code', 'model_name']:
assert key in args, "Args is missing a key: %s" % key
assert args['model_name'] in ['carbon', 'sediment', 'nutrient'], (
'Model name must be one of "carbon", "sediment", or "nutrient",'
'not %s' % args['model_name'])
if not os.path.exists(args['workspace_dir']):
os.makedirs(args['workspace_dir'])
LOGGER.debug('Creating new workspace: %s', args['workspace_dir'])
# create a logging handler and write its contents out to the logfile.
log_handler = logging.FileHandler(os.path.join(args['workspace_dir'],
'logfile.txt'))
log_formatter = logging.Formatter(
fmt=(
'%(asctime)s %(name)-18s '
'%(threadName)-10s %(levelname)-8s %(message)s'),
datefmt='%m/%d/%Y %H:%M:%S ')
log_handler.setFormatter(log_formatter)
LOGGER.addHandler(log_handler)
temp_dir = os.path.join(args['workspace_dir'], 'tmp')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
tempfile.tempdir = temp_dir
if args['model_name'] == 'carbon':
args['lulc_cur_uri'] = args['landuse_uri']
elif args['model_name'] in ['nutrient', 'sediment']:
args['lulc_uri'] = args['landuse_uri']
model_args = {}
default_config = get_static_data_json(args['model_name'])
for key, value in default_config.iteritems():
if key in args:
model_args[key] = args[key]
else:
model_args[key] = value
if args['model_name'] == 'sediment':
# we're ok with the landuse dictionary key.
# clip the DEM to the landcover raster
new_dem_uri = os.path.join(
model_args['workspace_dir'],
'clipped_dem.tif')
LOGGER.debug('Current DEM: %s', model_args['dem_uri'])
LOGGER.debug('Saving clipped DEM to %s', new_dem_uri)
_clip_dem(model_args['dem_uri'], model_args['lulc_uri'], new_dem_uri)
model_args['dem_uri'] = new_dem_uri
elif args['model_name'] == 'nutrient':
# filter the watersheds to just those that intersect with the LULC.
new_watersheds_uri = os.path.join(model_args['workspace_dir'],
'watersheds_filtered.shp')
preprocessing.filter_by_raster(
model_args['lulc_uri'],
model_args['watersheds_uri'],
new_watersheds_uri)
model_args['watersheds_uri'] = new_watersheds_uri
model_args['soil_depth_uri'] = model_args[
'depth_to_root_rest_layer_uri']
try:
model_args['eto_uri'] = args['potential_evapotranspiration']
except KeyError:
# MAFE uses the internal eto_uri, so we can skip.
LOGGER.debug('No key "potential_evapotranspiration"')
LOGGER.debug(
'Using these model args:\n%s',
json.dumps(
model_args,
sort_keys=True,
indent=4))
# now, run the sediment model on the input landcover
LOGGER.info('Running the model on the original landscape')
original_workspace = os.path.join(args['workspace_dir'],
'%s_base' % args['model_name'])
if not os.path.exists(original_workspace):
os.makedirs(original_workspace)
LOGGER.debug('Making workspace for base scenario: %s',
original_workspace)
LOGGER.debug('Original workspace: %s', original_workspace)
execute_model(args['model_name'], args['landuse_uri'], original_workspace,
model_args)
base_raster = os.path.join(original_workspace,
MODELS[args['model_name']]['target_raster'])
try:
do_parallelism = args['do_parallelism']
LOGGER.debug('Process-based parallelism enabled')
except KeyError:
LOGGER.debug('Process-based parallelism disabled')
do_parallelism = False
try:
num_simulations = int(args['num_simulations'])
LOGGER.debug('User requested to do %s simulations per watershed',
num_simulations)
except KeyError:
num_simulations = None
LOGGER.debug('Skipping impact simulations')
processes = []
for impact_type in ['paved', 'bare']:
LOGGER.debug('Starting calculations for impact %s', impact_type)
impact_code = int(args['%s_landcover_code' % impact_type])
impact_workspace = os.path.join(args['workspace_dir'], impact_type)
static_map_uri = os.path.join(
args['workspace_dir'], '%s_%s_static_map.tif' %
(args['model_name'], impact_type))
# Carbon is the only known ES that has inverted values.
invert = True if args['model_name'] == 'carbon' else False
if do_parallelism:
process = multiprocessing.Process(
target=build_static_map,
args=(
args['model_name'],
args['landuse_uri'],
impact_code,
static_map_uri,
base_raster,
model_args,
impact_workspace),
kwargs={
'num_simulations': num_simulations,
'invert': invert})
processes.append(process)
process.start()
else:
build_static_map(
args['model_name'],
args['landuse_uri'],
impact_code,
static_map_uri,
base_raster,
model_args,
impact_workspace,
num_simulations=num_simulations,
invert=invert)
# Build the static protection map if the user has provided a future
# landcover scenario.
LOGGER.info('Found a future landcover. Building (%s) map.',
args['future_type'])
if 'fut_landuse_uri' not in args:
LOGGER.debug('No custom future lulc found. Clipping default.')
# if the user has not provided a custom lulc, we should clip the
# existing future lulc to the size of the user-defined current lulc.
common_data = get_common_data_json()
future_landuse_uri = os.path.join(
args['workspace_dir'],
'future_landuse.tif')
_clip_dem(common_data['future_landcover'], args['landuse_uri'],
future_landuse_uri)
else:
future_landuse_uri = args['fut_landuse_uri']
LOGGER.debug('Future landcover %s', future_landuse_uri)
# determine whether the future, converted landcover should be inverted,
# based on protection or restoration.
_write_future_json(args['workspace_dir'], args['future_type'])
if args['future_type'] == 'protection':
invert = True
future_tif_name = 'protect'
else:
invert = False
future_tif_name = 'restore'
# if we're building carbon static maps, the invert flag is opposite of what
# other models would do, for all cases.
if args['model_name'] == 'carbon':
invert = not invert
future_map_uri = os.path.join(
args['workspace_dir'], '%s_%s_static_map.tif' %
(args['model_name'], future_tif_name))
future_workspace = os.path.join(args['workspace_dir'], future_tif_name)
build_static_map(args['model_name'], args['landuse_uri'], future_landuse_uri,
future_map_uri, base_raster, model_args, future_workspace,
convert_landcover=False, # just use the future landcover
num_simulations=num_simulations, invert=invert)
LOGGER.info('Completed the future (%s) static map.', args['future_type'])
# If we aren't doing parallelism, then this list has no elements in it.
for process in processes:
process.join()
# If we just ran the nutrient model, we need to copy the appropriate
# percent-to-stream rasters to the root static maps directory.
# Do this copy by recompressing the GeoTiff using DEFLATE instead of LZW.
# See issue 2910 (code.google.com/p/invest-natcap/issues/detail?id=2910)
if args['model_name'] in ['nutrient']:
fmt_string = os.path.join(args['workspace_dir'], '%s',
'%s_converted' % args['model_name'], 'intermediate')
pts_name = 'n_percent_to_stream.tif'
fmt_string = os.path.join(fmt_string, pts_name)
percent_to_streams = [
(fmt_string % 'paved', '%s_paved_pts.tif' % args['model_name']),
(fmt_string % 'bare', '%s_bare_pts.tif' % args['model_name']),
(fmt_string % future_tif_name, '%s_%s_pts.tif' %
(args['model_name'], future_tif_name)),
]
for source_uri, dest_uri in percent_to_streams:
dest_uri = os.path.join(args['workspace_dir'], dest_uri)
LOGGER.debug('Copying %s to %s', source_uri, dest_uri)
preprocessing.recompress_gtiff(source_uri, dest_uri, 'DEFLATE')
LOGGER.debug('Completed creating the %s static maps', args['model_name'])
def raster_math(args):
"""Perform all of the raster math to create static maps from the input
model run rasters.
args - a python dictionary with the following attributes:
workspace_dir - a URI to the output workspace
name - a string name to be used in the filename of all output static maps
base_uri - a URI to a GDAL raster of the base scenario's service values.
paved_uri - a URI to a GDAL raster of the paved scenario's service values.
bare_uri - a URI to a GDAL raster of the bare scenario's service values.
future_uri - a URI to a GDAL raster of the future scenario's
service values.
future_type - a string, either 'protection' or 'restoration'
Returns None, but writes the following files to disk:
workspace/<name>_bare_static_map.tif
workspace/<name>_paved_static_map.tif
workspace/<name>_future_static_map.tif"""
workspace = args['workspace_dir']
name = args['name']
base_uri = args['base_uri']
paved_uri = args['paved_uri']
bare_uri = args['bare_uri']
future_uri = args['future_uri']
future_scenario = args['future_type']
_write_future_json(workspace, future_scenario)
if not os.path.exists(workspace):
LOGGER.debug('Creating output workspace %s', workspace)
os.makedirs(workspace)
def _ws_path(static_map_type):
return os.path.join(workspace,
'%s_%s_static_map.tif' % (name, static_map_type))
future_tif_base = 'protect' if future_scenario == 'protection' else 'restore'
bare_sm_uri = _ws_path('bare')
paved_sm_uri = _ws_path('paved')
future_sm_uri = _ws_path(future_tif_base)
# create the bare static map
LOGGER.debug('Creating the bare static map %s', bare_sm_uri)
subtract_rasters(bare_uri, base_uri, bare_sm_uri)
# create the paved static map
LOGGER.debug('Creating the paved static map %s', paved_sm_uri)
subtract_rasters(paved_uri, base_uri, paved_sm_uri)
# create the future static map
LOGGER.debug(
'Creating the %s static map %s',
future_scenario,
future_sm_uri)
if future_scenario == 'protection':
subtract_rasters(base_uri, future_uri, future_sm_uri)
else: # when args['future_type'] is 'restoration'
subtract_rasters(future_uri, base_uri, future_sm_uri)
LOGGER.debug('Finished creating the static maps')
def _clip_dem(dem_uri, lulc_uri, out_dem_uri):
"""Clip the input DEM to the LULC and save the resulting raster to the
out_dem_uri."""
utils.assert_files_exist([dem_uri, lulc_uri])
nodata = pygeoprocessing.get_nodata_from_uri(dem_uri)
pixel_size = pygeoprocessing.get_cell_size_from_uri(dem_uri)
datatype = pygeoprocessing.get_datatype_from_uri(dem_uri)
pygeoprocessing.vectorize_datasets([dem_uri, lulc_uri], lambda x, y: x,
dataset_out_uri=out_dem_uri, datatype_out=datatype, nodata_out=nodata,
pixel_size_out=pixel_size, bounding_box_mode='intersection',
vectorize_op=False)
def convert_lulc(lulc_uri, new_code, out_uri):
"""Use vectorize_datasets to convert a land cover raster to a new landcover
code and save it to an output dataset.
lulc_uri - a uri to a GDAL landcover raster on disk.
new_code - an integer landcover code
out_uri - a uri to which the converted landcover will be written
Returns nothing."""
utils.assert_files_exist([lulc_uri])
nodata = pygeoprocessing.get_nodata_from_uri(lulc_uri)
# unless there's a nodata value, we want to change all pixels to the new
# lulc code.
def _convert(pixels):
return numpy.where(pixels != nodata, new_code, nodata)
pixel_size = pygeoprocessing.get_cell_size_from_uri(lulc_uri)
datatype = pygeoprocessing.get_datatype_from_uri(lulc_uri)
pygeoprocessing.vectorize_datasets([lulc_uri], _convert,
dataset_out_uri=out_uri, datatype_out=datatype,
nodata_out=nodata, pixel_size_out=pixel_size,
bounding_box_mode='intersection', vectorize_op=False)
def unzip_static_zipfile(zipfile_uri):
"""Unzip the given file to the static maps folder."""
utils.assert_files_exist([zipfile_uri])
with zipfile.ZipFile('test.zip', 'r') as zip_archive:
static_data_dir = os.path.join(os.getcwd(), 'data',
'colombia_static_data')
zip_archive.extractall(static_data_dir)
def execute_model(model_name, landcover_uri, workspace_uri, config=None):
assert model_name in MODELS.keys()
# if config is None, then we load the static_map parameters from the
# correct internal json file.
# if it's not None, then it must be a configuration dictionary.
if config is None:
config = get_static_data_json(model_name)
else:
assert isinstance(config, DictType), ("Found %s: %s" % (
type(config), config))
# - loop through each key in the configuration file.
# * if it's a path, make it relative to the CWD
# * if it's not a path, leave it alone.
new_config = {}
for key, value in config.iteritems():
if isinstance(value, UnicodeType):
try:
new_value = float(value)
except ValueError:
if len(value) > 0:
if not os.path.exists(value):
new_value = os.path.join(os.getcwd(), '..', value)
else:
# if the user-defined file does exist, just use that.
new_value = value
else:
new_value = value
else:
new_value = value
new_config[key] = new_value
# - add the new keys needed for the model run:
# * workspace_uri - use the URI passed in to this function.
# * landcover_uri - use the URI passed in to this function.
new_config['workspace_dir'] = workspace_uri
new_config[MODELS[model_name]['landcover_key']] = landcover_uri
LOGGER.debug(
'Executing model with arguments: %s',
json.dumps(
new_config,
sort_keys=True,
indent=4))
MODELS[model_name]['module'].execute(new_config)
def build_static_map(
model_name,
landcover_uri,
landcover_code,
static_map_uri,
base_run,
config=None,
workspace=None,
convert_landcover=True,
num_simulations=None,
invert=False):
"""Build the static map for the target ecosystem service. Currently assumes
we're doing sediment only.
model_name - a string. Must be a key in MODELS.
landcover_uri - a URI to the user's input landcover. A raster.
landcover_core - an integer landcover code to convert to.
static_map_uri - a URI to where the output static map should be written
base_run - a URI to the output map from a base run of the target model.
config=None - a python dictionary with arguments for the target model. If
None, the defaults will be loaded from internal defaults.
workspace=None - the output workspace to which the model runs should be
written. If None, they will be saved to a temporary folder and
deleted at the end of the static map generation.
base_run=None - A uri to a workspace of the target model.
num_simulations=None - number of simulations to run per watershed. If
None, no simulations will be run.
invert=False - whether to invert the subtraction. When invert==False,
the static map produced will be the difference of `base_run` -
`converted`, where `converted` is the converted or input landcover.
When invert==True, the static map produced will be the differece
of `converted` - `base_run`.
"""
assert invert in [True, False], '%s found instead' % type(invert)
assert model_name in MODELS.keys()
LOGGER.info('Building static map for the %s model', model_name)
if workspace is not None:
LOGGER.debug('Using workspace %s', workspace)
if not os.path.exists(workspace):
LOGGER.debug('Creating workspace folder %s', workspace)
os.makedirs(workspace)
else:
workspace = pygeoprocessing.temporary_folder()
LOGGER.debug('Writing model workspace data to %s', workspace)
# convert the LULC to the correct landcover code
if convert_landcover:
converted_lulc = os.path.join(workspace, 'converted_lulc.tif')
LOGGER.info('Creating converted landcover raster: %s', converted_lulc)
convert_lulc(landcover_uri, landcover_code, converted_lulc)
landcover_label = str(landcover_code)
else:
converted_lulc = landcover_uri
landcover_label = 'transformed'
LOGGER.info('Running the model on the converted landcover')
# run the sediment model on the converted LULC
target_raster = MODELS[model_name]['target_raster']
converted_workspace = os.path.join(workspace, '%s_converted' % model_name)
LOGGER.debug('Converted workspace: %s', converted_workspace)
execute_model(model_name, converted_lulc, converted_workspace, config)
converted_es_map = os.path.join(converted_workspace, target_raster)
# subtract the two rasters.
# If we're running the carbon model, the service we're measuring (carbon
# storage) has positive values being 'good' For sediment and nutrient,
# however, positive values represent sediment/nutrient export to the steam,
# which is 'bad'. For sediment and nutrient, we want to invert the result.
# This is done by just reversing the order of the subtraction.
LOGGER.info('Subtracting the two rasters. Invert=%s', invert)
if invert is True:
subtract_rasters(converted_es_map, base_run, static_map_uri)
else:
subtract_rasters(base_run, converted_es_map, static_map_uri)
if num_simulations is not None:
if workspace is None:
# if the user created the workspace in a temporary folder, create a
# folder in CWD for the quality workspace.
workspace = os.getcwd()
if config is None:
# in case the user has not provided the config dictionary
config = get_static_data_json(model_name)
watersheds = config[MODELS[model_name]['watersheds_key']]
simulation_workspace = os.path.join(workspace, 'simulations_%s' %
landcover_label)
test_static_map_quality(
base_run,
static_map_uri,
landcover_uri,
landcover_code,
watersheds,
model_name,
simulation_workspace,
config,
num_simulations,
invert=invert)
simulations_csv = os.path.join(simulation_workspace,
'impact_site_simulation.csv')
out_png = os.path.join(simulation_workspace, 'simulations.png')
graph_it(simulations_csv, out_png)
LOGGER.info('Finished')
def subtract_rasters(raster_a, raster_b, out_uri):
utils.assert_files_exist([raster_a, raster_b])
LOGGER.debug('Subtracting rasters %s and %s', raster_a, raster_b)
LOGGER.debug('Saving difference to %s', out_uri)
pixel_size = pygeoprocessing.get_cell_size_from_uri(raster_a)
nodata = pygeoprocessing.get_nodata_from_uri(raster_b)
datatype = pygeoprocessing.get_datatype_from_uri(raster_b)
LOGGER.debug('Output pixel size: %s', pixel_size)
LOGGER.debug('Output nodata value: %s', nodata)
LOGGER.debug('Output datatype: %s', datatype)
pygeoprocessing.vectorize_datasets([raster_a, raster_b],
lambda c, o: numpy.where(c != nodata, numpy.subtract(c, o), nodata),
dataset_out_uri = out_uri,
datatype_out=datatype, nodata_out=nodata,
pixel_size_out=pixel_size, bounding_box_mode='intersection',
vectorize_op=False)
def get_static_data_json(model_name):
"""Get the absolute path to the static data JSON file for the target model.
model_name - a python string model name. Must be a key in MODELS.
Returns a python dictionary with the configuration."""
assert model_name in MODELS.keys()
if model_name == 'sediment':
model_name = 'sdr'
json_name = '%s_parameters.json' % model_name
return _load_json(json_name)
def get_common_data_json(data_dir=None):
"""Return a dictionary with paths to common data (e.g. hydrozones, etc.).
Returns a python dictionary"""
common_data_name = 'common_data.json'
config = _load_json(common_data_name)
if data_dir is None:
if getattr(sys, 'frozen', False):
data_dir = os.path.dirname(sys.executable)
else:
data_dir = os.getcwd()
def _render_dict(dictionary):
output_dict = {}
for key, item in dictionary.iteritems():
if isinstance(item, DictType):
rendered_item = _render_dict(item)
else:
rendered_item = os.path.join(data_dir, item)
output_dict[key] = rendered_item
return output_dict
return _render_dict(config)
def _load_json(filename):
"""Fetch a json file from the adept package's static_data folder.
Returns a python dictionary of the json data found in the target json
file."""
if getattr(sys, 'frozen', False):
# we are running in a |PyInstaller| bundle
# basedir = sys._MEIPASS #suggested by pyinstaller
basedir = os.path.join(sys._MEIPASS, 'natcap', 'opal')
else:
basedir = os.path.dirname(__file__)
LOGGER.debug('__file__ == %s', __file__)
LOGGER.debug('sys.executable == %s', sys.executable)
LOGGER.debug('site-packages == %s', distutils.sysconfig.get_python_lib())
LOGGER.debug('Looking for common static data in %s', basedir)
config_file = os.path.join(basedir, 'static_data', filename)
config = json.load(open(config_file))
return config
def get_json_md5(json_uri):
"""Get an md5 hash for the python dictionary stored in a json file. This
function tries to be aware of whether a value points to a file on disk.
When this happens, we fetch an MD5sum for the file and use that in the
digest instead of the URI.
Returns a python string with an MD5sum digest of the json object."""
utils.assert_files_exist(json_uri)
LOGGER.debug('Loading json from %s', json_uri)
config = json.load(open(json_uri))
config_md5sum = hashlib.md5()
# assume that this is a flat dictionary.
for key, value in config.iteritems():
# if the value is a unicode string that is a URI to a file on disk, we
# want to get the md5sum of the file and use that as the value in the
# config's md5sum. If it's not a URI to a file on disk, we'll just use
# the value as is.
if isinstance(value, UnicodeType):
if os.path.exists(value):
LOGGER.debug('Value %s is a URI', value)
file_handler = open(value, 'rb')
file_md5 = hashlib.md5()
for chunk in iter(lambda: file_handler.read(2**20), ''):
file_md5.update(chunk)
value = file_md5.hexdigest()
LOGGER.debug('Updating digest with %s: %s', key, value)
config_md5sum.update(key)
config_md5sum.update(value)
return config_md5sum.hexdigest()
def clip_raster_to_watershed(in_raster, ws_vector, out_uri, clip_raster=None):
"""Clip the input raster to ws_vector, saving the output raster to out_uri.
in_raster - a URI to an input GDAL raster.
ws_vector - a URI to an OGR vector that contains a single polygon of a
watershed.
out_uri - a URI to where the output raster should be saved.
"""
datatype = pygeoprocessing.get_datatype_from_uri(in_raster)
nodata = pygeoprocessing.get_nodata_from_uri(in_raster)
pixel_size = pygeoprocessing.get_cell_size_from_uri(in_raster)
if clip_raster is not None:
rasters = [in_raster, clip_raster]
clip_nodata = pygeoprocessing.get_nodata_from_uri(clip_raster)
def operation(in_values, clip_values):
return numpy.where(
clip_values == clip_nodata,
clip_nodata,
in_values)
else:
rasters = [in_raster]
operation = lambda x: x
pygeoprocessing.vectorize_datasets(
rasters,
operation,
out_uri,
datatype,
nodata,
pixel_size,
'intersection',
dataset_to_align_index=0,
aoi_uri=ws_vector,
vectorize_op=False)
def make_random_impact_vector(new_vector, base_vector, side_length):
"""Create a new vector with a single, squarish polygon. This polygon will
be created within the spatial envelope of the first polygon in base_vector.
The new squarish polygon will have a side length of side_length.
new_vector - a URI to the new vector to be created. The new vector will
be an ESRI Shapefile.
base_vector - a URI to the vector we'll use as a base (for its spatial
information).
side_length - a python int or float describing the side length of the
new polygon to be created.
Returns nothing."""
base_datasource = ogr.Open(base_vector)
base_layer = base_datasource.GetLayer()
base_feature = base_layer.GetFeature(0)
base_geometry = base_feature.GetGeometryRef()
spat_ref = base_layer.GetSpatialRef()
# feature_extent = [xmin, xmax, ymin, ymax]
feature_extent = base_geometry.GetEnvelope()
driver = ogr.GetDriverByName('ESRI Shapefile')
datasource = driver.CreateDataSource(new_vector)
uri_basename = os.path.basename(new_vector)
layer_name = str(os.path.splitext(uri_basename)[0])
layer = datasource.CreateLayer(layer_name, spat_ref, ogr.wkbPolygon)
# Add a single ID field
field = ogr.FieldDefn('id', ogr.OFTInteger)
layer.CreateField(field)
while True:
poly_ring = ogr.Geometry(type=ogr.wkbLinearRing)
bbox_width = feature_extent[1]-feature_extent[0]
bbox_height = feature_extent[3]-feature_extent[2]
rand_width_percent = random.random()
xmin = feature_extent[0] + bbox_width * rand_width_percent
xmax = xmin + side_length
# Make it squarish
rand_height_percent = random.random()
ymin = feature_extent[2] + bbox_height * rand_height_percent
ymax = ymin + side_length
poly_ring.AddPoint(xmin, ymin)
poly_ring.AddPoint(xmin, ymax)
poly_ring.AddPoint(xmax, ymax)
poly_ring.AddPoint(xmax, ymin)
poly_ring.AddPoint(xmin, ymin)
polygon = ogr.Geometry(ogr.wkbPolygon)
polygon.AddGeometry(poly_ring)
# See if the watershed contains the permitting polygon
contained = base_geometry.Contains(polygon)
if contained:
break
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(polygon)
feature.SetField(0, 1)
layer.CreateFeature(feature)
feature = None
layer = None
def get_watershed_id(watershed_uri):
"""Get the ID from the watershed specified by the user.
watershed_uri (string) - A String URI to the watershed vector. This
vector is assumed to have exactly one watershed polygon in it.
Returns an int watershed ID."""
# get this watershed's ws_id
watershed_vector = ogr.Open(watershed_uri)
watershed_layer = watershed_vector.GetLayer()
watershed = watershed_layer.GetFeature(0)
watershed_id = watershed.GetField('ws_id')
LOGGER.debug('This watershed\'s ws_id: %s', watershed_id)
return watershed_id
def test_static_map_quality(
base_run,
base_static_map,
landuse_uri,
impact_lucode,
watersheds_uri,
model_name,
workspace,
config,
num_iterations=5,
clean_workspaces=False,
start_ws=0,
start_impact=0,
invert=None):
"""Test the quality of the provided static map.
Args:
base_run (filepath): The base run of the target model on the base lulc.
base_static_map (filepath): The static map generated from the difference
between the base_run raster and the entire landscape converted to the
target impact type.
landuse_uri (filepath): A URI to the LULC used for the base static map.
impact_lucode (int or float): The numeric land use code to use to convert
the underlying lulc raster to the target impact type.
watersheds_uri (filepath): A filepath to the watersheds vector to use for
testing. Must have a column of integers in its attribute table
labeled "ws_id".
model_name (string): The string model name to run.
workspace (filepath): The path to the folder to use as a workspace. If
this folder does not already exist, it will be created.
config (dict): The arguments dictionary to use for running the model.
See `static_maps.execute_model()` for details, or else
`natcap/opal/static_data/<model_name>_parameters.json` for sample
argument dictionaries (albeit serialized as JSON).
num_iterations=5 (int, optional): The number of simulated impacts to run per
watershed.
clean_workspace=False (boolean, optional): Whether to remove the
workspace before starting to test the inputs.
start_ws=0 (int, optional): The watershed index to start on. If 0, all
watersheds will be tested. Useful for resuming testing after
failure (such as when running out of disk space).
start_impact=0 (int, optional): The integer impact ID to start on.
This must be less than `num_interations`.
invert=None (boolean): Whether to invert the static map calculation.
Returns:
Nothing.
"""
assert invert in [True, False], '%s found instead' % type(invert)
old_tempdir = tempfile.tempdir
temp_dir = os.path.join(workspace, 'tmp') # for ALL tempfiles
tempfile.tempdir = temp_dir # all tempfiles will be saved here.
# make a copy of the configuration dictionary so that we don't modify it
# accidentally.
config = config.copy()
# make all the folders we know about at the moment
pygeoprocessing.create_directories([workspace, temp_dir])
# Open a logfile so we can incrementally write model data we care about
logfile_uri = os.path.join(workspace, 'impact_site_simulation.csv')
logfile = open(logfile_uri, 'a')
labels = ['ws_id', 'Impact ID', 'Impact Area', 'Static Estimate',
'InVEST Estimate', 'Estimate Ratio']
logfile.write("%s\n" % ','.join(labels))
logfile.close()
lulc_nodata = pygeoprocessing.get_nodata_from_uri(landuse_uri)
lulc_pixel_size = pygeoprocessing.get_cell_size_from_uri(landuse_uri)
# limit the watersheds to just those that intersect the input lulc.
current_watersheds = os.path.join(temp_dir, 'current_watersheds.shp')
preprocessing.filter_by_raster(landuse_uri, watersheds_uri,
current_watersheds, clip=True)
# get the sediment export from the base raster, passed in from the user.
# calculate for each watershed, so I can access these later.
#base_export = pygeoprocessing.aggregate_raster_values_uri(
# base_run, current_watersheds, 'ws_id', 'sum').total
#LOGGER.debug('All watershed ids: %s', base_export.keys())
# split the watersheds so I can use each watershed as an AOI for the
# correct model later on.
watersheds_dir = os.path.join(workspace, 'watershed_vectors')
split_watersheds = split_datasource(
current_watersheds,
watersheds_dir,
['ws_id'])
for ws_index, watershed_uri in enumerate(split_watersheds):
if ws_index < start_ws:
LOGGER.debug(
'Watershed %s is less than start index %s. skipping',
ws_index, start_ws)
continue
watershed_workspace = os.path.join(
workspace, 'watershed_%s' % ws_index)
if not os.path.exists(watershed_workspace):
os.makedirs(watershed_workspace)
# get this watershed's ws_id
watershed_vector = ogr.Open(watershed_uri)
watershed_layer = watershed_vector.GetLayer()
watershed = watershed_layer.GetFeature(0)
watershed_id = watershed.GetField('ws_id')
LOGGER.debug('This watershed\'s ws_id: %s', watershed_id)
watershed_lulc = os.path.join(watershed_workspace,
'watershed_lulc.tif')
lulc_datatype = pygeoprocessing.get_datatype_from_uri(landuse_uri)
pygeoprocessing.vectorize_datasets([landuse_uri], lambda x: x,
watershed_lulc, lulc_datatype, lulc_nodata, lulc_pixel_size,
'intersection', dataset_to_align_index=0, aoi_uri=watershed_uri,
vectorize_op=False)
ws_base_export_uri = os.path.join(watershed_workspace,
'watershed_' + os.path.basename(base_run))
base_nodata = pygeoprocessing.get_nodata_from_uri(base_run)
base_pixel_size = pygeoprocessing.get_cell_size_from_uri(base_run)
base_export_datatype = pygeoprocessing.get_datatype_from_uri(base_run)
pygeoprocessing.vectorize_datasets([base_run], lambda x: x,
ws_base_export_uri, base_export_datatype, base_nodata, base_pixel_size,
'intersection', dataset_to_align_index=0, aoi_uri=watershed_uri,
vectorize_op=False)
base_ws_export = pygeoprocessing.aggregate_raster_values_uri(
ws_base_export_uri, watershed_uri, 'ws_id',
'sum').total[watershed_id]
# if the model uses watersheds, we only want to run the model using
# the one current watershed.
watersheds_key = MODELS[model_name]['watersheds_key']
if watersheds_key is not None:
config[watersheds_key] = watershed_uri
watershed_lulc = os.path.join(watershed_workspace,
'watershed_lulc.tif')
clip_raster_to_watershed(landuse_uri, watershed_uri, watershed_lulc)
watershed_base_workspace = os.path.join(watershed_workspace, 'base')
execute_model(model_name, watershed_lulc, watershed_base_workspace, config)
ws_base_export_uri = os.path.join(watershed_base_workspace,
MODELS[model_name]['target_raster'])
ws_base_static_map = os.path.join(
watershed_workspace, 'watershed_' + os.path.basename(base_static_map))
clip_raster_to_watershed(base_static_map, watershed_uri, ws_base_static_map)
# If we're not in the starting watershed, then reset the starting index
# of the impact site.
start_impact = 0 if ws_index != start_impact else start_impact
for run_number in range(start_impact, num_iterations):
impact_site_length = random.uniform(500, 3000)
impact_workspace = os.path.join(watershed_workspace,
'random_impact_%s' % run_number)
if os.path.exists(impact_workspace):
shutil.rmtree(impact_workspace)
os.makedirs(impact_workspace)
# make a random impact vector somewhere in the current watershed.
impact_site = os.path.join(
impact_workspace,
'impact_%s.shp' %
run_number)
make_random_impact_vector(impact_site, watershed_uri,
impact_site_length)
# convert the area under the impact to the correct landcover
# code(s), run the target model and analyze the outputs.
converted_landcover = os.path.join(impact_workspace,
'converted_lulc.tif')
# If the landcover is a string, we convert to the area under the
# impact. If the landcover is a number, that's the conversion
# type.
convert_impact(impact_site, watershed_lulc, impact_lucode,
converted_landcover, impact_workspace)
execute_model(model_name, converted_landcover, impact_workspace,
config)
estimates = aggregate_test_results(
impact_workspace,
model_name,
watershed_uri,
impact_site,
ws_base_static_map,
ws_base_export_uri,
invert=invert)
# ability to sort based on area of impact site.
# also record which watershed this run is in, impact site ID as well
impact_site_area = get_polygon_area(impact_site)
values_to_write = [
watershed_id,
run_number,
impact_site_area,
estimates['static_est'],
estimates['invest_est'],
estimates['export_ratio'],
]
logfile = open(logfile_uri, 'a')
logfile.write("%s\n" % ','.join(map(str, values_to_write)))
logfile.close()
def compute_impact_stats(impact_dir, model_name, watershed_vector,
base_ws_export, base_static_map):
"""Take an impact directory and the target model name and extract the
correct information from it.
impact_dir - a URI to a folder that has been used as an impact workspace.
model_name - the string name of the model we're using.
watershed_vector - a URI to an OGR vector of the watershed this impact
belongs to.
base_ws_export - the base watershed export (a number)
base_static_map - a URI to the static map generated from the difference
between the base sediment model run and when the landscape is converted
completely over to the target impact type.
Returns a python dictionary containing extracted stats about the impact."""
impact_vector = 'impact_%s.shp' % os.path.basename(
impact_dir).split('_')[-1]
impact_site = os.path.join(impact_dir, impact_vector)
impact_site_area = get_polygon_area(impact_site)
export_raster = os.path.join(impact_dir,
MODELS[model_name]['target_raster'])
# aggregate this impact over the target watershed.
impact_ws_export = pygeoprocessing.aggregate_raster_values_uri(export_raster,
watershed_vector, 'ws_id').total.values()[0] # just get the only ws
# get the sediment export from the base static map under the impacted area.
# only 1 feature in the impacted area, so we access this number with index
# 1.
static_estimate = pygeoprocessing.aggregate_raster_values_uri(
base_static_map, impact_site, 'id').total[1]
# If we're running the nutrient model, multiply the sum of the
# sed_export by the max percent_to_stream under the impact site.
if model_name in ['nutrient']:
LOGGER.info('Adjusting export by the % to stream')
# the percent-to-stream raster for Nitrogen is named
# "n_percent_to_stream.tif", sediment is just "percent_to_stream.tif"
# nutrient percent-to-stream is prefixed by 'n_'
pts_prefix = 'n_'
percent_to_stream = os.path.join(impact_dir,
'intermediate', '%spercent_to_stream.tif' % pts_prefix)
max_percent = pygeoprocessing.aggregate_raster_values_uri(
percent_to_stream, impact_site,
'id').pixel_max[1]
if max_percent is None:
LOGGER.debug('Max percent is None, setting to 0')
max_percent = 0.0
static_estimate = static_estimate * max_percent
else:
LOGGER.info('Not running a routed model, running %s', model_name)
invest_estimate = impact_ws_export - base_ws_export
#invest_estimate = base_ws_export - impact_ws_export
export_ratio = static_estimate / invest_estimate
return {
'impact_dir': impact_dir,
'impact_site_area': impact_site_area,
'static_estimate': static_estimate,
'invest_estimate': invest_estimate,
'export_ratio': export_ratio,
'impact_ws_export': impact_ws_export,
}
def get_polygon_area(vector):
# ONLY returns the area of the first polygon.
datasource = ogr.Open(vector)
layer = datasource.GetLayer()
feature = layer.GetFeature(0)
geometry = feature.GetGeometryRef()
area = geometry.Area()
geometry = None
feature = None
layer = None
datasource = None
return area
def graph_it(log_file, out_file):
import matplotlib
matplotlib.use('Agg') # for rendering plots without $DISPLAY set.
import matplotlib.pyplot as plt
LOGGER.info('Creating graph from results at %s', log_file)
LOGGER.debug('Saving image to %s', out_file)
all_rows = []
out_of_bounds = []
opened_log_file = open(log_file)
opened_log_file.next() # skip the column headers.
for line in opened_log_file:
try:
values = map(float, line.split(','))
except ValueError as error:
# when there's a column with string data that can't be cast to
# a float (like a column header), skip the row.
LOGGER.warn(error)
continue
ws_id, run_num, impact_area, static_est, invest_est, ratio = values
# if ratio > 3 or ratio < -3:
# out_of_bounds.append(ratio)
# else:
# all_rows.append((impact_area, ratio))
all_rows.append((impact_area, ratio))
# smoother with 95 % confidence intervals
all_rows = sorted(all_rows, key=lambda x: x[0])
areas = [r[0] for r in all_rows]
ratios = [r[1] for r in all_rows]
# LOGGER.debug('These values were outliers: %s', out_of_bounds)
plt.plot(areas, ratios, 'ro')
plt.xlabel('Impact Site Area (m^2)')
plt.ylabel('(Static Est. / InVEST Est)')
areas_np = numpy.array(areas)
ratios_np = numpy.array(ratios)
n = len(ratios_np)
t = scipy.linspace(0, max(areas), n)
# Linear regressison -polyfit - polyfit can be used other orders polys
(ar, br) = scipy.polyfit(areas_np, ratios_np, 1)
xr = scipy.polyval([ar, br], t)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.plot(t, xr, 'g--') # plot the linear regression line.
plt.savefig(out_file)
def split_datasource(ds_uri, workspace, include_fields=[],
template_str='feature_%s.shp'):
"""Split the input OGR datasource into a list of datasources, each with a
single layer containing a single feature.
ds_uri - a URI to an OGR datasource.
workspace - a folder to which the output vectors should be saved.
include_fields=[] - a list of string fields to be copied from the source
datsource to the destination datasources.
template_str - a template string with a placeholder for a single value.
All file uris will be named according to this pattern.
Returns a list of URIs, one for each new vector created."""
if os.path.exists(workspace):
shutil.rmtree(workspace)
os.makedirs(workspace)
LOGGER.debug('Opening vector at %s', ds_uri)
ds = ogr.Open(ds_uri)
driver_string = 'ESRI Shapefile'
LOGGER.debug('Splitting datasource into separate shapefiles')
LOGGER.debug('Vectors will be saved to %s', workspace)
output_vectors = []
for layer in ds:
layer_defn = layer.GetLayerDefn()
for feature in layer:
uri_index = feature.GetFID()
new_vector_uri = os.path.join(workspace, template_str %
uri_index)
output_vectors.append(new_vector_uri)
LOGGER.debug('Creating new shapefile at %s' % new_vector_uri)
ogr_driver = ogr.GetDriverByName(driver_string)
temp_shapefile = ogr_driver.CreateDataSource(new_vector_uri)
LOGGER.debug('SRS: %s, %s', layer.GetSpatialRef(),
type(layer.GetSpatialRef()))
layer_name = os.path.splitext(os.path.basename(
new_vector_uri))[0]
if isinstance(layer_name, UnicodeType):
LOGGER.debug('Decoding layer name %s to ASCII', layer_name)
#layer_name = layer_name.decode('utf-8')
layer_name = str(layer_name)
LOGGER.debug('Layer name: %s', layer_name)
temp_layer = temp_shapefile.CreateLayer(
layer_name, layer.GetSpatialRef())
temp_layer_defn = temp_layer.GetLayerDefn()
for field_index in range(layer_defn.GetFieldCount()):
original_field = layer_defn.GetFieldDefn(field_index)
output_field = ogr.FieldDefn(original_field.GetName(),
original_field.GetType())
temp_layer.CreateField(output_field)
# Create the obligatory ID field.
# If I don't create the ID field, I can't properly select other
# fields later on, when I need to set their values.
id_field = ogr.FieldDefn('id', ogr.OFTInteger)
temp_layer.CreateField(id_field)
# Create the new feature with all of the characteristics of the old
# field except for the fields. Those are brought along separately.
LOGGER.debug('Creating new feature with duplicate geometry')
feature_geom = feature.GetGeometryRef()
temp_feature = ogr.Feature(temp_layer_defn)
temp_feature.SetFrom(feature)
# Since there's only one feature in this shapefile, set id to 0.
id_field_index = temp_feature.GetFieldIndex('id')
temp_feature.SetField(id_field_index, 0)
LOGGER.debug('Copying over fields %s', include_fields)
for field_index in range(layer_defn.GetFieldCount()):
field_defn = layer_defn.GetFieldDefn(field_index)
field = field_defn.GetName()
LOGGER.debug('Adding field "%s"', field)
# Create the new field in the temp feature
field_type = field_defn.GetType()
LOGGER.debug('Field type=%s', field_type)
LOGGER.debug('Copying field "%s" value to new feature',
field)
temp_feature.SetField(field, feature.GetField(field))
temp_layer.CreateFeature(temp_feature)
temp_layer.SyncToDisk()
temp_layer = None
temp_shapefile = None
layer.ResetReading()
layer = None
ds = None
ogr_driver = None
LOGGER.debug('Finished creating the new shapefiles')
return output_vectors
def convert_impact(impact_uri, base_lulc, impacted_value, converted_lulc_uri,
workspace):
"""Convert the area under the impact vector to be the value of
impact_value.
impact_uri (string) - a filepath to an impact site vector on disk.
base_lulc (string) - a filepath to the base lulc on disk.
impacted_value (string or int) - The value to convert to. If an int,
the value under the impact site will be this landcover code. If
a string, the value under the impact site will be the pixel values
of this raster under the impact site.
converted_lulc_uri (string) - a filepath to where the converted raster
should be stored.
workspace (string) - a filepath to a folder where some output rasters
will be written.
Returns nothing."""
# Create a raster mask for the randomized impact site.
# Any non-nodata pixels underneath the impact site are marked by 1.
impact_mask = os.path.join(workspace, 'impact_mask.tif')
lulc_nodata = pygeoprocessing.get_nodata_from_uri(base_lulc)
lulc_pixel_size = pygeoprocessing.get_cell_size_from_uri(base_lulc)
lulc_datatype = pygeoprocessing.get_datatype_from_uri(base_lulc)
def mask_op(values):
return numpy.where(values != lulc_nodata, 1.0,
lulc_nodata)
pygeoprocessing.vectorize_datasets(
[base_lulc],
mask_op,
impact_mask,
lulc_datatype,
lulc_nodata,
lulc_pixel_size,
'intersection',
dataset_to_align_index=0,
aoi_uri=impact_uri,
vectorize_op=False)
# attept to cast to an int, since the UI provides the impacted value as a
# string.
try:
impacted_value = int(impacted_value)
except ValueError:
pass
if isinstance(impacted_value, basestring):
LOGGER.debug('Converting values to those of %s', impacted_value)
def _convert_impact(mask_values, lulc_values, impacted_lulc_values):
"""Convert values under the mask to the future lulc values."""
return numpy.where(mask_values == 1, impacted_lulc_values,
lulc_values)
rasters_list = [impact_mask, base_lulc, impacted_value]
else:
LOGGER.debug('Converting values to scalar: %s', impacted_value)
def _convert_impact(mask_values, lulc_values):
"""Convert values under the mask to the scalar impacted value."""
return numpy.where(mask_values == 1, impacted_value,
lulc_values)
rasters_list = [impact_mask, base_lulc]
pygeoprocessing.vectorize_datasets(
rasters_list, _convert_impact, converted_lulc_uri,
lulc_datatype, lulc_nodata, lulc_pixel_size, 'union',
dataset_to_align_index=0, vectorize_op=False)
def aggregate_test_results(impact_workspace, model_name, watershed_uri,
impact_site, base_static_map, base_export, invert):
# get the target raster for the selected ecosystem service.
export = os.path.join(impact_workspace,
MODELS[model_name]['target_raster'])
def _mask_out_pixels(in_raster, comp_raster, out_raster):
comp_nodata = pygeoprocessing.get_nodata_from_uri(comp_raster)
pixel_size = pygeoprocessing.get_cell_size_from_uri(comp_raster)
def _pixel_mask(_in_values, _out_values):
return numpy.where(_in_values == comp_nodata,
comp_nodata, _out_values)
pygeoprocessing.vectorize_datasets([comp_raster, in_raster],
_pixel_mask, out_raster, gdal.GDT_Float32, comp_nodata,
pixel_size, 'union', dataset_to_align_index=0,
vectorize_op=False)
# mutually mask out the impacted/base export rasters.
masked_impact_export = os.path.join(impact_workspace,
'masked_impacted_export.tif')
masked_base_export = os.path.join(impact_workspace,
'masked_base_export.tif')
_mask_out_pixels(export, base_export, masked_impact_export)
_mask_out_pixels(base_export, export, masked_base_export)
# Aggregate the sediment export from this impact simulation over
# the target watershed
impact_ws_export = pygeoprocessing.aggregate_raster_values_uri(
masked_impact_export, watershed_uri, 'ws_id').total.values()[0]
# Get the sediment export from the static map under the impacted area.
# only 1 feature in the impactd area, so we access that number with
# index 1.
static_estimate = pygeoprocessing.aggregate_raster_values_uri(
base_static_map, impact_site, 'id').total[1]
# Get the watershed's base export from the masked version of the
# watershed's export raster.
watershed_id = get_watershed_id(watershed_uri)
base_ws_export = pygeoprocessing.aggregate_raster_values_uri(
masked_base_export, watershed_uri, 'ws_id').total[watershed_id]
LOGGER.warning('NOT adjusting by %%-to-stream. model=%s', model_name)
# This conditional makes the outputs all
# represent the same thing: positive values are desireable,
# negative values are not desireable.
if invert:
invest_estimate = impact_ws_export - base_ws_export
else:
invest_estimate = base_ws_export - impact_ws_export
export_ratio = static_estimate / invest_estimate
return {
'static_est': static_estimate,
'invest_est': invest_estimate,
'export_ratio': export_ratio,
'base_export': base_ws_export,
'impacted_export': impact_ws_export,
}
def clip_static_map(map_uri, aoi_uri, out_uri):
"""Clip the input static map by the single polygon in aoi_uri. Saves the
output raster to out_uri. Values outside of the aoi will be set to nodata.
map_uri - a URI to a GDAL raster.
aoi_uri - a URI to an OGR vector. May only contain one polygon.
out_uri - the URI to which the output raster should be saved.
Returns nothing."""
nodata = pygeoprocessing.get_nodata_from_uri(map_uri)
pixel_size = pygeoprocessing.get_cell_size_from_uri(map_uri)
pygeoprocessing.vectorize_datasets([map_uri], lambda x: x, out_uri,
gdal.GDT_Float32, nodata, pixel_size, 'intersection', aoi_uri=aoi_uri,
vectorize_op=False)
| StarcoderdataPython |
6688341 | i = 0
while i < 10:
i = i + 1
if (i > 5):
break
print i
| StarcoderdataPython |
11206556 | <gh_stars>1-10
x = y = 1
print x
print y
print x + y
y = 3.8
x = 4.2
print x + y
x = 1/2
print 4**x
| StarcoderdataPython |
1986290 | <filename>visual/static_plot.py
"""This script creates a simple static plot of data from the DtssHost via a DtsClient."""
import sys
import os
from tempfile import NamedTemporaryFile
import logging
from shyft.time_series import DtsClient, UtcPeriod, Calendar, TsVector, utctime_now, TimeSeries
from bokeh.plotting import figure, show, output_file
from bokeh.models import DatetimeTickFormatter, Range1d, LinearAxis
import numpy as np
from visual.utils import bokeh_time_from_timestamp, get_xy
from weather.data_sources.netatmo.domain import NetatmoDomain, types
from weather.data_sources.netatmo.repository import NetatmoEncryptedEnvVarConfig
from weather.data_sources.heartbeat import create_heartbeat_request
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
handlers=[
logging.StreamHandler()
])
heartbeat = TimeSeries(create_heartbeat_request('static_plot'))
env_pass = sys.argv[1]
env_salt = sys.argv[2]
print(f'salt: {env_salt}\npass: {env_pass}')
config = NetatmoEncryptedEnvVarConfig(
username_var='NETATMO_USER',
password_var='<PASSWORD>',
client_id_var='NETATMO_ID',
client_secret_var='NETATMO_SECRET',
password=<PASSWORD>,
salt=env_salt,
)
# Get measurements form domain:
domain = NetatmoDomain(
username=config.username,
password=config.password,
client_id=config.client_id,
client_secret=config.client_secret
)
station = 'Eftasåsen'
module = 'Stua'
plot_data = [
{'data': domain.get_measurement(station_name=station, data_type=types.temperature.name, module_name=module),
'color': '#E64C3E'}, # red
{'data': domain.get_measurement(station_name=station, data_type=types.co2.name, module_name=module),
'color': '#B0CA55'}, # green
{'data': domain.get_measurement(station_name=station, data_type=types.humidity.name, module_name=module),
'color': '#0F2933'}, # dark green
]
# ('Pressure', 'mbar', point_fx.POINT_INSTANT_VALUE, '#33120F'), # brown
# ('Noise', 'db', point_fx.POINT_INSTANT_VALUE, '#E39C30'), # yellow
# ('Rain', 'mm', point_fx.POINT_INSTANT_VALUE, '#448098'), # light blue
# ('WindStrength', 'km / h', point_fx.POINT_INSTANT_VALUE, '#8816AB'), # purple
# Get timeseries from measurements:
client = DtsClient(f'{os.environ["DTSS_SERVER"]}:{os.environ["DTSS_PORT_NUM"]}')
# client = DtsClient(f'{socket.gethostname()}:{os.environ["DTSS_PORT_NUM"]}')
tsv = TsVector([meas['data'].time_series for meas in plot_data])
cal = Calendar('Europe/Oslo')
epsilon = 0.1
now = utctime_now()
period = UtcPeriod(now - cal.DAY*3, now)
data = client.evaluate(tsv, period)
try:
fig = figure(title=f'Demo plot {cal.to_string(now)}', height=400, width=1400, x_axis_type='datetime')
fig.line([1, 2, 3, 4, 5], [5, 3, 4, 2, 1])
fig.yaxis.visible = False
fig.xaxis.formatter = DatetimeTickFormatter(
months=["%Y %b"],
days=["%F %H:%M"],
hours=["%a %H:%M"],
minutes=["%H:%M"]
)
axis_switch = ['left', 'right']
# Create axes:
for variable in plot_data:
axis_side = axis_switch[0]
axis_switch.reverse()
fig.extra_y_ranges[variable['data'].data_type.name_lower] = Range1d()
fig.add_layout(
obj=LinearAxis(
y_range_name=variable['data'].data_type.name_lower,
axis_label=f"{variable['data'].data_type.name} [{variable['data'].data_type.unit}]",
major_label_text_color=variable['color'],
major_tick_line_color=variable['color'],
minor_tick_line_color=variable['color'],
axis_line_color=variable['color'],
axis_label_text_color=variable['color'],
axis_label_text_font_style='bold',
),
place=axis_side
)
# Plot data:
x_ranges = []
for ts, variable in zip(data, plot_data):
x, y = get_xy(cal, ts)
x_ranges.extend([min(x), max(x)])
fig.line(x=x, y=y,
color=variable['color'],
legend_label=variable['data'].data_type.name,
y_range_name=variable['data'].data_type.name_lower,
line_width=3)
fig.extra_y_ranges[variable['data'].data_type.name_lower].start = np.nanmin(y) - epsilon * (np.nanmax(y) - np.nanmin(y))
fig.extra_y_ranges[variable['data'].data_type.name_lower].end = np.nanmax(y) + epsilon * (np.nanmax(y) - np.nanmin(y))
fig.x_range = Range1d(bokeh_time_from_timestamp(cal, period.start), bokeh_time_from_timestamp(cal, period.end))
output_file(NamedTemporaryFile(prefix='netatmo_demo_plot_', suffix='.html').name)
show(fig)
finally:
del client
| StarcoderdataPython |
8174699 | from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_published = models.BooleanField(default=False)
title = models.CharField(max_length=100, unique=True)
author = models.ForeignKey(User)
content = models.TextField()
slug = models.SlugField()
| StarcoderdataPython |
3246896 | def done_or_not(board): #board[i][j]
for row in range(0,len(board)):
if len(set(board[row]))!=9:
return 'Try again!'
temp=[]
for row in range(0,9):
for col in range(0,9):
temp.append(board[col][row])
if len(set(temp))!=9:
return 'Try again!'
temp=[]
index1=0
index2=0
while True:
try:
for k in range(3):
for l in range(3):
for i in range(3):
for j in range(3):
temp.append(board[index1+i][index2+j])
if len(set(temp))!=9:
return "Try again!"
temp=[]
index2+=3
index1+=3
index2=0
except:
return "Finished!" | StarcoderdataPython |
9681753 | from abc import ABC, abstractmethod
class ICriteriaPayload(ABC):
@abstractmethod
def getPagination(self) -> dict:
pass
@abstractmethod
def getFilter(self) -> dict:
pass
@abstractmethod
def getSort(self) -> dict:
pass
@abstractmethod
def getCurrentUrl(self) -> str:
pass | StarcoderdataPython |
6672858 | <reponame>zduguid/slocum-nav
# micron_plotter.py
#
# Plotting utilities for Micron Sonar
# 2020-05-22 <EMAIL> initial implementation
import math
import datetime
import numpy as np
import utm
import pandas as pd
import seaborn as sns
import earthpy.plot as ep
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import BathymetryMap
unit_name = {"sentinel" : "Unit 250",
"unit_770" : "Unit 770" }
###############################################################################
# PLOT PROFILE
###############################################################################
def plot_profile(ts, glider, save_name=None):
# pitch data
sns.set(font_scale = 1.5)
pitch = ts.df['pitch']
line_plot = pitch.plot(figsize=(15,8), linewidth=3, color='tab:green')
# depth data
depth = -1 * ts.df['depth']
line_plot = depth.plot(figsize=(15,8), linewidth=3, color='tab:orange')
# compute altitude estimate from the four vertical range estimates
# - does not account for pitch and roll of the vehicle
h1 = ts.df['btm_beam0_range']
h2 = ts.df['btm_beam1_range']
h3 = ts.df['btm_beam2_range']
h4 = ts.df['btm_beam3_range']
altitude = depth - ((h1*h2)/(h1 + h2) + (h3*h4)/(h3 + h4))
altitude.plot(linewidth=3, color='tab:blue', zorder=1)
# bottom_track slant range data
bt_ranges = [
'btm_beam0_range',
'btm_beam1_range',
'btm_beam2_range',
'btm_beam3_range'
]
bt_colors = ['powderblue','darkturquoise','lightsteelblue','deepskyblue']
for i in range(len(bt_ranges)):
bt_range = depth - ts.df[bt_ranges[i]]
bt_range.plot(linewidth=1, color=bt_colors[i], zorder=0)
# plot moments in time where the glider gets dangerously close to bottom
window = 5
danger = 20
danger = ts.df[(ts.df.btm_beam0_range < danger) &
(ts.df.btm_beam1_range < danger) &
(ts.df.btm_beam2_range < danger) &
(ts.df.btm_beam3_range < danger)]
for time_stamp in danger.index:
plt.axvspan(time_stamp, time_stamp + pd.Timedelta(seconds=window),
color='tab:red', alpha=0.05)
# plotting labels
dt = datetime.datetime.fromtimestamp(ts.df.time[0]).replace(microsecond=0)
plt.legend(['Pitch [deg]', 'Depth [m]', 'Altitude [m]',
'Vertical Ranges [m]'], fontsize='small', loc='lower left',
framealpha=1)
plt.suptitle('Deployment Profile', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.ylabel('Depth [m]')
plt.xlabel('Time')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT ODOMETRY
###############################################################################
def plot_odometry(ts, glider, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(10,8))
sns.scatterplot(
x=ts.df.rel_pos_x,
y=ts.df.rel_pos_y,
palette='viridis_r',
hue=ts.df.depth,
linewidth=0,
s=10,
data=ts.df)
dt = datetime.datetime.fromtimestamp(ts.df.time[0]).replace(microsecond=0)
plt.axis('equal')
plt.suptitle('DVL Odometry', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('x position [m]')
plt.ylabel('y position [m]')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT ODOMETRY (DEAD-RECKONED)
###############################################################################
def plot_m_odometry_dr(ts_flight, glider, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(10,8))
sns.scatterplot(
ts_flight.df.m_gps_fix_x_lmc,
ts_flight.df.m_gps_fix_y_lmc,
marker='X',
color='tab:red',
s=300
)
sns.scatterplot(
x=ts_flight.df.m_x_lmc,
y=ts_flight.df.m_y_lmc,
palette='viridis_r',
hue=ts_flight.df.m_depth,
linewidth=0,
s=10,
data=ts_flight.df
)
dt = datetime.datetime.fromtimestamp(
ts_flight.df.m_present_time[0]).replace(microsecond=0)
plt.axis('equal')
plt.suptitle('Dead Reckoned Trajectory', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('x position [m]')
plt.ylabel('y position [m]')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT ODOMETRY AND DEAD-RECKONED
###############################################################################
def plot_odometry_and_dr_utm(df_all, glider, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(10,8))
sns.scatterplot(
x=df.utm_dr_x,
y=df.utm_dr_y,
color='tab:blue',
label='Dead-Reckoned',
linewidth=0,
s=8,
data=df_all
)
sns.scatterplot(
x=df_all.utm_odo_x,
y=df_all.utm_odo_y,
color='tab:orange',
label='DVL Odometry',
linewidth=0,
s=8,
data=df_all
)
sns.scatterplot(
x=df_all.utm_gps_x,
y=df_all.utm_gps_y,
marker='X',
color='tab:red',
label='GPS Fix',
s=200,
data=df_all,
)
sns.scatterplot(
x=df_all.utm_wpt_x,
y=df_all.utm_wpt_y,
marker='o',
color='tab:green',
label='Waypoint Target',
s=100,
data=df_all,
)
# TODO -- can add marker for when TAN is able to recognize a feature
lgnd = ax.legend(frameon=True)
lgnd.legendHandles[0]._sizes = [60]
lgnd.legendHandles[1]._sizes = [60]
lgnd.legendHandles[2]._sizes = [200]
if len(lgnd.legendHandles) == 4:
lgnd.legendHandles[3]._sizes = [100]
dt = df.index[0].replace(microsecond=0)
plt.axis('equal')
plt.suptitle('DVL Odometry', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('x position [m]')
plt.ylabel('y position [m]')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT ODOMETRY AND DEAD-RECKONED
###############################################################################
def plot_odometry_and_dr(ts_pd0, ts_dbd_all, glider, save_name=None):
# sub-select a portion of glider flight computer variables
start_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[0])
end_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[-1])
dur = end_t - start_t
df_dbd = ts_dbd_all.df[str(start_t):str(end_t)].copy()
# initialize the plot
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(10,8))
sns.scatterplot(
x=df_dbd.m_x_lmc,
y=df_dbd.m_y_lmc,
color='tab:blue',
label='Dead-Reckoned',
linewidth=0,
s=8,
data=df_dbd
)
sns.scatterplot(
x=ts_pd0.df.rel_pos_x,
y=ts_pd0.df.rel_pos_y,
color='tab:orange',
label='DVL Odometry',
linewidth=0,
s=8,
data=ts_pd0.df
)
sns.scatterplot(
x=df_dbd.m_gps_x_lmc,
y=df_dbd.m_gps_y_lmc,
marker='X',
color='tab:red',
label='GPS Fix',
s=200,
data=df_dbd,
)
# TODO -- can add marker for when TAN is able to recognize a feature
lgnd = ax.legend(frameon=True)
lgnd.legendHandles[0]._sizes = [60]
lgnd.legendHandles[1]._sizes = [60]
lgnd.legendHandles[2]._sizes = [200]
if len(lgnd.legendHandles) == 4:
lgnd.legendHandles[3]._sizes = [100]
dt = df_dbd.index[0].replace(microsecond=0)
plt.axis('equal')
plt.suptitle('DVL Odometry', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('x position [m]')
plt.ylabel('y position [m]')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT PROFILE AND ODOMETRY
###############################################################################
def plot_profile_and_odometry(ts, glider, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1,2, figsize=(15,8))
# profile
depth = -1 * ts.df['depth']
line_plot = depth.plot(figsize=(15,8), linewidth=3, color='tab:orange', ax=ax[0])
# compute altitude estimate from the four vertical range estimates
# - does not account for pitch and roll of the vehicle
h1 = ts.df['btm_beam0_range']
h2 = ts.df['btm_beam1_range']
h3 = ts.df['btm_beam2_range']
h4 = ts.df['btm_beam3_range']
altitude = depth - ((h1*h2)/(h1 + h2) + (h3*h4)/(h3 + h4))
altitude.plot(linewidth=3, color='tab:blue', zorder=1, ax=ax[0])
# bottom_track slant range data
bt_ranges = [
'btm_beam0_range',
'btm_beam1_range',
'btm_beam2_range',
'btm_beam3_range'
]
bt_colors = ['powderblue','darkturquoise','lightsteelblue','deepskyblue']
for i in range(len(bt_ranges)):
bt_range = depth - ts.df[bt_ranges[i]]
bt_range.plot(linewidth=1, color=bt_colors[i], zorder=0, ax=ax[0])
ax[0].set_ylabel('depth [m]')
ax[0].set_xlabel('time')
ax[0].set_title('Dive Profile')
ax[0].legend(['Depth [m]', 'Altitude [m]'], fontsize='small',
loc='lower left', framealpha=0.5)
# odometry
sns.scatterplot(
x=ts.df.rel_pos_x,
y=ts.df.rel_pos_y,
palette='viridis_r',
hue=ts.df.depth,
linewidth=0,
s=10,
data=ts.df,
ax=ax[1])
dt = datetime.datetime.fromtimestamp(ts.df.time[0]).replace(microsecond=0)
plt.axis('equal')
plt.suptitle('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),), fontweight='bold')
plt.title('DVL Odometry')
plt.xlabel('x position [m]')
plt.ylabel('y position [m]')
plt.legend(loc='lower right')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT PROFILE AND ODOMETRY AND DEAD-RECKONED
###############################################################################
def plot_profile_and_odometry_and_dr(ts_pd0, ts_dbd_all, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1,2, figsize=(15,8))
#############################################
# PLOT PROFILE ##############################
#############################################
depth = -1 * ts_pd0.df['depth']
line_plot = depth.plot(figsize=(15,8), linewidth=3, color='tab:orange', ax=ax[0])
# compute altitude estimate from the four vertical range estimates
# - does not account for pitch and roll of the vehicle
h1 = ts_pd0.df['btm_beam0_range']
h2 = ts_pd0.df['btm_beam1_range']
h3 = ts_pd0.df['btm_beam2_range']
h4 = ts_pd0.df['btm_beam3_range']
altitude = depth - ((h1*h2)/(h1 + h2) + (h3*h4)/(h3 + h4))
altitude.plot(linewidth=3, color='tab:blue', zorder=1, ax=ax[0])
# bottom_track slant range data
bt_ranges = [
'btm_beam0_range',
'btm_beam1_range',
'btm_beam2_range',
'btm_beam3_range'
]
bt_colors = ['powderblue','darkturquoise','lightsteelblue','deepskyblue']
for i in range(len(bt_ranges)):
bt_range = depth - ts_pd0.df[bt_ranges[i]]
bt_range.plot(linewidth=1, color=bt_colors[i], zorder=0, ax=ax[0])
ax[0].set_ylabel('Depth [m]')
ax[0].set_xlabel('Time')
ax[0].set_title('Dive Profile')
ax[0].legend(['Depth [m]', 'Altitude [m]'], loc='best',
frameon=True, framealpha=0.6, fontsize='small')
#############################################
# PLOT ODOMETRY AND DEAD-RECKONED ###########
#############################################
# sub-select a portion of glider flight computer variables
start_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[0])
end_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[-1])
dur = end_t - start_t
df_dbd = ts_dbd_all.df[str(start_t):str(end_t)].copy()
# extract start_t position "origin" from the glider flight data
for t in range(len(df_dbd)):
if not np.isnan(df_dbd.m_x_lmc[t]):
dbd_origin_x_lmc = df_dbd.m_x_lmc[t]
dbd_origin_y_lmc = df_dbd.m_y_lmc[t]
break
sns.scatterplot(
ts_pd0.df.rel_pos_x,
ts_pd0.df.rel_pos_y,
color='tab:orange',
label='DVL Odometry',
linewidth=0,
s=8,
data=ts_pd0.df,
ax=ax[1],
zorder=2,
)
sns.scatterplot(
x=df_dbd.m_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_y_lmc - dbd_origin_y_lmc,
color='tab:blue',
label='Dead-Reckoned',
linewidth=0,
s=8,
data=df_dbd,
ax=ax[1],
zorder=1,
)
sns.scatterplot(
x=df_dbd.m_gps_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_gps_y_lmc - dbd_origin_y_lmc,
marker='X',
color='tab:red',
label='GPS Fix',
s=200,
data=df_dbd,
ax=ax[1],
zorder=5,
)
# TODO -- can add marker for when TAN is able to recognize a feature
lgnd = ax[1].legend(frameon=True, framealpha=0.6, loc='best',
fontsize='small')
lgnd.legendHandles[0]._sizes = [60]
lgnd.legendHandles[1]._sizes = [60]
lgnd.legendHandles[2]._sizes = [200]
if len(lgnd.legendHandles) == 4:
lgnd.legendHandles[3]._sizes = [100]
dt = df_dbd.index[0].replace(microsecond=0)
plt.axis('equal')
plt.suptitle('DVL Odometry with Water Column Sensing', fontweight='bold')
plt.title('Odometry in LMC')
plt.xlabel('X position [m]')
plt.ylabel('Y position [m]')
plt.subplots_adjust(wspace=0.3)
if save_name: plt.savefig('/Users/zduguid/Desktop/fig/%s' % save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT PROFILE AND ODOMETRY AND DEAD-RECKONED
###############################################################################
def plot_profile_and_navigation(ts_pd0, ts_dbd_all, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1,2, figsize=(15,8))
#############################################
# PLOT PROFILE ##############################
#############################################
linewidth=8
sns.scatterplot(
ts_pd0.df.time,
-ts_pd0.df.depth,
ax=ax[0],
linewidth=0,
s=linewidth,
label='AUG Depth',
color='tab:orange',
)
sns.scatterplot(
ts_pd0.df.time,
-ts_pd0.df.pc_bathy_depth,
ax=ax[0],
linewidth=0,
s=linewidth,
label='Seafloor Depth',
color='tab:blue',
)
#############################################
# PLOT ODOMETRY AND DEAD-RECKONED ###########
#############################################
# sub-select a portion of glider flight computer variables
start_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[0])
end_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[-1])
dur = end_t - start_t
df_dbd = ts_dbd_all.df[str(start_t):str(end_t)].copy()
# extract start_t position "origin" from the glider flight data
for t in range(len(df_dbd)):
if not np.isnan(df_dbd.m_x_lmc[t]):
dbd_origin_x_lmc = df_dbd.m_x_lmc[t]
dbd_origin_y_lmc = df_dbd.m_y_lmc[t]
break
sns.scatterplot(
ts_pd0.df.tan_pos_x,
ts_pd0.df.tan_pos_y,
color='tab:orange',
label='Multi-Factor TAN',
linewidth=0,
s=linewidth,
data=ts_pd0.df,
ax=ax[1],
zorder=2,
)
sns.scatterplot(
ts_pd0.df.rel_pos_x,
ts_pd0.df.rel_pos_y,
color='limegreen',
label='DVL Odometry',
linewidth=0,
s=linewidth,
data=ts_pd0.df,
ax=ax[1],
zorder=2,
)
sns.scatterplot(
x=df_dbd.m_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_y_lmc - dbd_origin_y_lmc,
color='mediumorchid',
label='Dead Reckoned',
linewidth=0,
s=linewidth,
data=df_dbd,
ax=ax[1],
zorder=1,
)
sns.scatterplot(
x=df_dbd.m_gps_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_gps_y_lmc - dbd_origin_y_lmc,
marker='X',
color='tab:red',
label='GPS Fix',
s=100,
data=df_dbd,
ax=ax[1],
zorder=5,
)
# TODO -- can add marker for when TAN is able to recognize a feature
lgnd = ax[0].legend(frameon=True, framealpha=0.6, loc='best',
fontsize='small')
lgnd.legendHandles[0]._sizes = [100]
lgnd.legendHandles[1]._sizes = [100]
lgnd = ax[1].legend(frameon=True, framealpha=0.6, loc='best',
fontsize='small')
lgnd.legendHandles[0]._sizes = [100]
lgnd.legendHandles[1]._sizes = [100]
lgnd.legendHandles[2]._sizes = [100]
lgnd.legendHandles[3]._sizes = [200]
ticks = ax[0].get_xticks()
labels = [str(datetime.datetime.fromtimestamp(l)) for l in ticks]
labels = [l.split(' ',1)[1].rsplit(':',1)[0] for l in labels]
ax[0].set_xticklabels(labels)
dt = df_dbd.index[0].replace(microsecond=0)
plt.axis('equal')
plt.suptitle('DVL Odometry with Water Column Sensing', fontweight='bold')
ax[0].set_title('Dive Profile')
ax[1].set_title('Odometry in LMC')
plt.xlabel('X position [m]')
plt.ylabel('Y position [m]')
plt.subplots_adjust(wspace=0.3)
if save_name: plt.savefig('/Users/zduguid/Desktop/fig/%s' % save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT PROFILE AND ODOMETRY AND DEAD-RECKONED AND THREE-FACTORS
###############################################################################
def plot_profile_and_odometry_and_dr_and_three_factors(ts_pd0, ts_dbd_all,
bathy_df, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(15,15))
#############################################
# HELPER ####################################
#############################################
def get_utm_coords_from_glider_lat_lon(m_lat, m_lon):
SECS_IN_MIN = 60
MIN_OFFSET = 100
lat_min = m_lat % MIN_OFFSET
lon_min = m_lon % MIN_OFFSET
lat_dec = (m_lat - lat_min)/MIN_OFFSET + lat_min/SECS_IN_MIN
lon_dec = (m_lon - lon_min)/MIN_OFFSET + lon_min/SECS_IN_MIN
utm_pos = utm.from_latlon(lat_dec, lon_dec)
easting = round(utm_pos[0],2)
northing = round(utm_pos[1],2)
zone = utm_pos[2]
return(easting, northing, zone)
#############################################
# PLOT PROFILE ##############################
#############################################
ax0=plt.subplot(3,2,1)
ax1=plt.subplot(3,2,3)
ax2=plt.subplot(3,2,5)
ax3=plt.subplot(3,2,2)
ax4=plt.subplot(3,2,4)
ax5=plt.subplot(3,2,6)
roll_len = 20
marker_size = 15
factor_d = -ts_pd0.df.pc_bathy_depth
factor_s = ts_pd0.df.pc_bathy_slope
factor_o = ts_pd0.df.pc_bathy_orient
sns.scatterplot(ts_pd0.df.time, factor_d, ax=ax0, s=marker_size,
linewidth=0, color='tab:blue', zorder=3)
sns.scatterplot(ts_pd0.df.time, factor_s, ax=ax1, s=marker_size,
linewidth=0, color='tab:purple')
sns.scatterplot(ts_pd0.df.time, factor_o, ax=ax2, s=marker_size,
linewidth=0, color='tab:red')
ticks = ax0.get_xticks()
labels = [str(datetime.datetime.fromtimestamp(l)) for l in ticks]
labels = [l.split(' ',1)[1].rsplit(':',1)[0] for l in labels]
ax0.set_title('Three-Factors of Seafloor')
ax0.set_xticklabels([])
ax1.set_xticklabels([])
ax2.set_xticklabels(labels)
ax0.set_xlabel('')
ax1.set_xlabel('')
ax2.set_xlabel('Time [hh:mm]')
ax0.set_ylabel('Depth [m]')
ax1.set_ylabel('Slope [deg]')
ax2.set_ylabel('Orientation [deg]')
xlim0 = ax0.get_xlim()
# set axis limits so legends will fit
max_altitude = np.nanmax(ts_pd0.df.bathy_factor_depth)
max_slope = np.nanmax(ts_pd0.df.bathy_factor_slope)
ax1.set_xlim(xlim0)
ax2.set_xlim(xlim0)
ax0.set_ylim([-max_altitude*1.05, max_altitude*0.2])
ax1.set_ylim([-5, 75])
ax2.set_ylim([-200, 240])
lgnd = ax0.legend(['Seafloor Depth [m]'],
fontsize='small', loc='upper left', framealpha=0.8)
lgnd.legendHandles[0]._sizes = [60]
lgnd = ax1.legend(['Seafloor Slope [deg]'],
fontsize='small', loc='upper left', framealpha=0.8)
lgnd.legendHandles[0]._sizes = [60]
lgnd = ax2.legend(['Seafloor Orientation [deg]'],
fontsize='small', loc='upper left', framealpha=0.8)
lgnd.legendHandles[0]._sizes = [60]
#############################################
# PLOT ODOMETRY AND DEAD-RECKONED ###########
#############################################
# sub-select a portion of glider flight computer variables
start_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[0])
end_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[-1])
dur = end_t - start_t
df_dbd = ts_dbd_all.df[str(start_t):str(end_t)].copy()
# extract start_t position "origin" from the glider flight data
for t in range(len(df_dbd)):
if not np.isnan(df_dbd.m_x_lmc[t]):
dbd_origin_x_lmc = df_dbd.m_x_lmc[t]
dbd_origin_y_lmc = df_dbd.m_y_lmc[t]
dbd_origin_m_lat = df_dbd.m_lat[t]
dbd_origin_m_lon = df_dbd.m_lon[t]
break
dbd_utm_x, dbd_utm_y, _ = get_utm_coords_from_glider_lat_lon(
dbd_origin_m_lat,
dbd_origin_m_lon
)
# TODO temp plotting helper
pitch_threshold = 30
tmp_slope = np.array(bathy_df.slope_list)
tmp_slope[tmp_slope >= pitch_threshold] = pitch_threshold
# TODO depth filter
tmp_depth = bathy_df.depth_list.copy()
depth_filter = np.nanmax(ts_pd0.df.depth)*3
tmp_depth[tmp_depth > depth_filter] = depth_filter
nav_axs = [ax3, ax4, ax5]
nav_palletes = ['Blues', 'Purples', 'twilight_shifted']
nav_hues = [tmp_depth, tmp_slope, bathy_df.orient_list]
nav_xlims = []
nav_ylims = []
for i in range(len(nav_axs)):
sns.scatterplot(
ts_pd0.df.tan_pos_x,
ts_pd0.df.tan_pos_y,
color='tab:orange',
label='MF-TAN',
linewidth=0,
s=8,
data=ts_pd0.df,
ax=nav_axs[i],
zorder=2,
)
sns.scatterplot(
ts_pd0.df.rel_pos_x,
ts_pd0.df.rel_pos_y,
color='limegreen',
label='DVL-Odo',
linewidth=0,
s=8,
data=ts_pd0.df,
ax=nav_axs[i],
zorder=2,
)
sns.scatterplot(
x=df_dbd.m_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_y_lmc - dbd_origin_y_lmc,
color='hotpink',
label='DR-DACC',
linewidth=0,
s=8,
data=df_dbd,
ax=nav_axs[i],
zorder=1,
)
sns.scatterplot(
x=df_dbd.m_gps_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_gps_y_lmc - dbd_origin_y_lmc,
marker='X',
color='tab:red',
label='GPS Fix',
s=200,
data=df_dbd,
ax=nav_axs[i],
zorder=5,
)
nav_axs[i].axis('equal')
nav_xlims.append(nav_axs[i].get_xlim())
nav_ylims.append(nav_axs[i].get_ylim())
sns.scatterplot(
bathy_df.utm_x_list - dbd_utm_x,
bathy_df.utm_y_list - dbd_utm_y,
nav_hues[i],
marker='s',
# s=100,
palette=nav_palletes[i],
linewidth=0,
ax=nav_axs[i],
zorder=0,
legend=False,
)
for i in range(len(nav_axs)):
# TODO -- can add marker for when TAN is able to recognize a feature
lgnd = nav_axs[i].legend(frameon=True, framealpha=1,loc='lower left',
fontsize='small')
for j in range(3):
lgnd.legendHandles[j]._sizes = [60]
nav_axs[i].set_xlim(nav_xlims[i])
nav_axs[i].set_ylim(nav_ylims[i])
nav_axs[i].set_ylabel('Y Position [m]')
nav_axs[i].set_xlabel('')
ax3.set_title('Navigation in LMC')
ax5.set_xlabel('X Position [m]')
# TODO
# plt.suptitle('Multi-Factor Terrain-Aided Navigation', fontweight='bold')
plt.suptitle('Bathymetric Factor Extraction for MF-TAN',fontweight='bold')
plt.subplots_adjust(wspace=0.3)
if save_name: plt.savefig('/Users/zduguid/Desktop/fig/%s' % save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
plt.close()
###############################################################################
# PLOT PROFILE AND ODOMETRY AND DEAD-RECKONED AND SLOPE FACTOR
###############################################################################
def plot_profile_and_odometry_and_dr_and_slope_factor(ts_pd0, ts_dbd_all,
bathy_df, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1,2, figsize=(15,8))
#############################################
# HELPER ####################################
#############################################
def get_utm_coords_from_glider_lat_lon(m_lat, m_lon):
SECS_IN_MIN = 60
MIN_OFFSET = 100
lat_min = m_lat % MIN_OFFSET
lon_min = m_lon % MIN_OFFSET
lat_dec = (m_lat - lat_min)/MIN_OFFSET + lat_min/SECS_IN_MIN
lon_dec = (m_lon - lon_min)/MIN_OFFSET + lon_min/SECS_IN_MIN
utm_pos = utm.from_latlon(lat_dec, lon_dec)
easting = round(utm_pos[0],2)
northing = round(utm_pos[1],2)
zone = utm_pos[2]
return(easting, northing, zone)
#############################################
# PLOT PROFILE ##############################
#############################################
depth = -1 * ts_pd0.df['depth']
line_plot = depth.plot(figsize=(15,8), linewidth=3, color='tab:orange', ax=ax[0])
# compute altitude estimate from the four vertical range estimates
# - does not account for pitch and roll of the vehicle
h1 = ts_pd0.df['btm_beam0_range']
h2 = ts_pd0.df['btm_beam1_range']
h3 = ts_pd0.df['btm_beam2_range']
h4 = ts_pd0.df['btm_beam3_range']
altitude = depth - ((h1*h2)/(h1 + h2) + (h3*h4)/(h3 + h4))
altitude.plot(linewidth=3, color='tab:blue', zorder=1, ax=ax[0])
# bottom_track slant range data
bt_ranges = [
'btm_beam0_range',
'btm_beam1_range',
'btm_beam2_range',
'btm_beam3_range'
]
bt_colors = ['powderblue','darkturquoise','lightsteelblue','deepskyblue']
for i in range(len(bt_ranges)):
bt_range = depth - ts_pd0.df[bt_ranges[i]]
bt_range.plot(linewidth=1, color=bt_colors[i], zorder=0, ax=ax[0])
ax[0].set_ylabel('Depth [m]')
ax[0].set_xlabel('Time')
ax[0].set_title('Dive Profile')
ax[0].legend(['Depth [m]', 'Altitude [m]'], loc='best',
frameon=True, framealpha=0.6, fontsize='small')
#############################################
# PLOT ODOMETRY AND DEAD-RECKONED ###########
#############################################
# sub-select a portion of glider flight computer variables
start_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[0])
end_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[-1])
dur = end_t - start_t
df_dbd = ts_dbd_all.df[str(start_t):str(end_t)].copy()
# extract start_t position "origin" from the glider flight data
for t in range(len(df_dbd)):
if not np.isnan(df_dbd.m_x_lmc[t]):
dbd_origin_x_lmc = df_dbd.m_x_lmc[t]
dbd_origin_y_lmc = df_dbd.m_y_lmc[t]
dbd_origin_m_lat = df_dbd.m_lat[t]
dbd_origin_m_lon = df_dbd.m_lon[t]
break
dbd_utm_x, dbd_utm_y, _ = get_utm_coords_from_glider_lat_lon(
dbd_origin_m_lat,
dbd_origin_m_lon
)
sns.scatterplot(
ts_pd0.df.rel_pos_x,
ts_pd0.df.rel_pos_y,
color='tab:orange',
label='DVL Odometry',
linewidth=0,
s=8,
data=ts_pd0.df,
ax=ax[1],
zorder=2,
)
sns.scatterplot(
x=df_dbd.m_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_y_lmc - dbd_origin_y_lmc,
color='tab:blue',
label='Dead-Reckoned',
linewidth=0,
s=8,
data=df_dbd,
ax=ax[1],
zorder=1,
)
sns.scatterplot(
x=df_dbd.m_gps_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_gps_y_lmc - dbd_origin_y_lmc,
marker='X',
color='tab:red',
label='GPS Fix',
s=200,
data=df_dbd,
ax=ax[1],
zorder=5,
)
# TODO temp plotting helper
pitch_threshold = 30
tmp_slope_list = np.array(bathy_df.slope_list)
tmp_slope_list[tmp_slope_list >= pitch_threshold] = pitch_threshold
plt.axis('equal')
x_lim = ax[1].get_xlim()
y_lim = ax[1].get_ylim()
sns.scatterplot(
bathy_df.utm_x_list - dbd_utm_x,
bathy_df.utm_y_list - dbd_utm_y,
tmp_slope_list,
marker='s',
palette='Purples',
linewidth=0,
ax=ax[1],
zorder=0,
legend=False,
)
ax[1].set_xlim(x_lim)
ax[1].set_ylim(y_lim)
lgnd = ax[1].legend(frameon=True, framealpha=0.6, loc='best',
fontsize='small')
lgnd.legendHandles[0]._sizes = [60]
lgnd.legendHandles[1]._sizes = [60]
lgnd.legendHandles[2]._sizes = [200]
if len(lgnd.legendHandles) == 4:
lgnd.legendHandles[3]._sizes = [100]
dt = df_dbd.index[0].replace(microsecond=0)
plt.suptitle('DVL Odometry with Water Column Sensing', fontweight='bold')
plt.title('Odometry in LMC')
plt.xlabel('X position [m]')
plt.ylabel('Y position [m]')
plt.subplots_adjust(wspace=0.3)
if save_name: plt.savefig('/Users/zduguid/Desktop/fig/%s' % save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
plt.close()
###############################################################################
# PLOT PROFILE AND ODOMETRY AND DEAD-RECKONED AND BATHYMETRY
###############################################################################
def plot_profile_and_odometry_and_dr_and_bathymetry(ts_pd0, ts_dbd_all,
bathy_df, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1,2, figsize=(15,8))
#############################################
# HELPER ####################################
#############################################
def get_utm_coords_from_glider_lat_lon(m_lat, m_lon):
SECS_IN_MIN = 60
MIN_OFFSET = 100
lat_min = m_lat % MIN_OFFSET
lon_min = m_lon % MIN_OFFSET
lat_dec = (m_lat - lat_min)/MIN_OFFSET + lat_min/SECS_IN_MIN
lon_dec = (m_lon - lon_min)/MIN_OFFSET + lon_min/SECS_IN_MIN
utm_pos = utm.from_latlon(lat_dec, lon_dec)
easting = round(utm_pos[0],2)
northing = round(utm_pos[1],2)
zone = utm_pos[2]
return(easting, northing, zone)
#############################################
# PLOT PROFILE ##############################
#############################################
depth = -1 * ts_pd0.df['depth']
line_plot = depth.plot(figsize=(15,8), linewidth=3, color='tab:orange',
ax=ax[0])
# compute altitude estimate from the four vertical range estimates
# - does not account for pitch and roll of the vehicle
h1 = ts_pd0.df['btm_beam0_range']
h2 = ts_pd0.df['btm_beam1_range']
h3 = ts_pd0.df['btm_beam2_range']
h4 = ts_pd0.df['btm_beam3_range']
altitude = depth - ((h1*h2)/(h1 + h2) + (h3*h4)/(h3 + h4))
altitude.plot(linewidth=3, color='tab:blue', zorder=1, ax=ax[0])
# bottom_track slant range data
bt_ranges = [
'btm_beam0_range',
'btm_beam1_range',
'btm_beam2_range',
'btm_beam3_range'
]
bt_colors = ['powderblue','darkturquoise','lightsteelblue','deepskyblue']
for i in range(len(bt_ranges)):
bt_range = depth - ts_pd0.df[bt_ranges[i]]
bt_range.plot(linewidth=1, color=bt_colors[i], zorder=0, ax=ax[0])
ax[0].set_ylabel('Depth [m]')
ax[0].set_xlabel('Time')
ax[0].set_title('Dive Profile')
ax[0].legend(['Depth [m]', 'Altitude [m]'], loc='best',
frameon=True, framealpha=0.6, fontsize='small')
#############################################
# PLOT ODOMETRY AND DEAD-RECKONED ###########
#############################################
# sub-select a portion of glider flight computer variables
start_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[0])
end_t = datetime.datetime.fromtimestamp(ts_pd0.df.time[-1])
dur = end_t - start_t
df_dbd = ts_dbd_all.df[str(start_t):str(end_t)].copy()
# extract start_t position "origin" from the glider flight data
for t in range(len(df_dbd)):
if not np.isnan(df_dbd.m_x_lmc[t]):
dbd_origin_x_lmc = df_dbd.m_x_lmc[t]
dbd_origin_y_lmc = df_dbd.m_y_lmc[t]
dbd_origin_m_lat = df_dbd.m_lat[t]
dbd_origin_m_lon = df_dbd.m_lon[t]
break
dbd_utm_x, dbd_utm_y, _ = get_utm_coords_from_glider_lat_lon(
dbd_origin_m_lat,
dbd_origin_m_lon
)
sns.scatterplot(
x=ts_pd0.df.rel_pos_x,
y=ts_pd0.df.rel_pos_y,
color='tab:orange',
label='DVL Odometry',
linewidth=0,
s=8,
data=ts_pd0.df,
ax=ax[1],
zorder=2,
)
sns.scatterplot(
x=df_dbd.m_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_y_lmc - dbd_origin_y_lmc,
color='tab:blue',
label='Dead-Reckoned',
linewidth=0,
s=8,
data=df_dbd,
ax=ax[1],
zorder=1,
)
sns.scatterplot(
x=df_dbd.m_gps_x_lmc - dbd_origin_x_lmc,
y=df_dbd.m_gps_y_lmc - dbd_origin_y_lmc,
marker='X',
color='tab:red',
label='GPS Fix',
s=200,
data=df_dbd,
ax=ax[1],
zorder=5,
)
plt.axis('equal')
x_lim = ax[1].get_xlim()
y_lim = ax[1].get_ylim()
tmp_depth = bathy_df.depth_list.copy()
tmp_depth[tmp_depth>300] = 300
sns.scatterplot(
bathy_df.utm_x_list - dbd_utm_x,
bathy_df.utm_y_list - dbd_utm_y,
# bathy_df.slope_list,
# palette='Purples',
tmp_depth,
palette='Blues',
# bathy_df.orient_list,
# palette='twilight_shifted',
marker='s',
linewidth=0,
ax=ax[1],
zorder=0,
legend=False,
)
ax[1].set_xlim(x_lim)
ax[1].set_ylim(y_lim)
# TODO -- can add marker for when TAN is able to recognize a feature
lgnd = ax[1].legend(frameon=True, framealpha=0.8, loc='best',
fontsize='small')
lgnd.legendHandles[0]._sizes = [60]
lgnd.legendHandles[1]._sizes = [60]
lgnd.legendHandles[2]._sizes = [200]
if len(lgnd.legendHandles) == 4:
lgnd.legendHandles[3]._sizes = [100]
dt = df_dbd.index[0].replace(microsecond=0)
plt.suptitle('DVL Odometry with Water Column Sensing', fontweight='bold')
plt.title('Odometry in LMC')
plt.xlabel('X position [m]')
plt.ylabel('Y position [m]')
plt.subplots_adjust(wspace=0.3)
if save_name: plt.savefig('/Users/zduguid/Desktop/fig/%s' % save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
plt.close()
###############################################################################
# PLOT WATER COLUMN
###############################################################################
def plot_water_column_currents(voc_u_list, voc_v_list, voc_w_list, voc_z_list,
save_name=None):
sns.set(font_scale = 1.5)
fig = plt.figure(figsize=(15,8))
max_current = 1.0
# plot ocean currents in u-v plane
ax = fig.add_subplot(1, 2, 1, aspect='equal')
c = np.arctan2(voc_u_list,voc_v_list)
sns.scatterplot(
voc_u_list,
voc_v_list,
voc_z_list,
s=50,
palette='inferno_r',
)
plt.title('Water Column, 2D View')
plt.xlabel('Eastward [m/s]')
plt.ylabel('Northward [m/s]')
ax.set_xlim(-max_current,max_current)
ax.set_ylim(-max_current,max_current)
handles, labels = ax.get_legend_handles_labels()
plt.legend(title='Depth [m]', fontsize='small', loc='best',
framealpha=0.6, handles=handles[:-1],
labels=labels[:-1]).get_title().set_fontsize('small')
# plot 3D quiver plot
ax = fig.add_subplot(1, 2, 2, projection='3d')
# voc_u,voc_v,voc_w,voc_z
u = voc_u_list[pd.notnull(voc_u_list)]
v = voc_v_list[pd.notnull(voc_u_list)]
w = voc_w_list[pd.notnull(voc_u_list)]
z = voc_z_list[pd.notnull(voc_u_list)]
x = np.zeros(u.shape)
y = np.zeros(u.shape)
# convert data to RGB color map for quiver plot
c = (np.arctan2(u,v) + np.pi)/(2*np.pi)
c = np.concatenate((c, np.repeat(c, 2)))
c = plt.cm.twilight_shifted(c)
# generate quiver plot
ax.quiver(x, y, -z, u, v, w, colors=c,length=1,normalize=False)
ax.patch.set_facecolor('white')
ax.w_xaxis.set_pane_color((234/255, 234/255, 242/255, 1.0))
ax.w_yaxis.set_pane_color((234/255, 234/255, 242/255, 1.0))
ax.w_zaxis.set_pane_color((234/255, 234/255, 242/255, 1.0))
ax.set_xlabel('\n\nEastward [m/s]')
ax.set_ylabel('\n\nNorthward [m/s]')
ax.set_zlabel('\n\nDepth [m]')
ax.azim = -110 # [deg]
ax.elev = 30 # [deg]
plt.xlim(-max_current,max_current)
plt.ylim(-max_current,max_current)
plt.title('Water Column, 3D View')
plt.suptitle('Water Column Currents', fontweight='bold')
if save_name: plt.savefig('/Users/zduguid/Desktop/fig/%s' % save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT EXPLOITATIVE DEPTH BAND SELECTION
###############################################################################
def plot_exploitative_depth_bands(dive_list, climb_list, TC_list,
glider_heading, voc_u_list, voc_v_list, voc_w_list, voc_z_list,
save_name=None):
sns.set(font_scale = 1.5)
fig = plt.figure(figsize=(15,8))
max_current = 1.0
# find optimum from the list
idx_min = np.argmin(TC_list)
opt_z_dive = dive_list[idx_min]
opt_z_climb = climb_list[idx_min]
# filter out particularly bad values
TC_list_plot = np.array(TC_list)
TC_mean = np.mean(TC_list_plot)
TC_std = np.std(TC_list_plot)
TC_upper = TC_mean
TC_list_plot[TC_list_plot>TC_upper] = TC_upper
# plot ocean currents in u-v plane
color_list_log = np.log10(TC_list_plot)
ax = fig.add_subplot(1, 2, 1, aspect='equal')
x_str = '-'
y_str = '-'
if opt_z_climb == 0: x_str = ' '
if opt_z_dive == 0: y_str = ' '
sns.scatterplot(
np.array(climb_list)*-1,
np.array(dive_list)*-1,
TC_list_plot,
s=45, marker='s', linewidth=0,
palette='viridis_r', legend=False
)
sns.scatterplot(
[-opt_z_climb],
[-opt_z_dive],
color='tab:red',
s=600,
marker='*',
label=r"[%s%2d, %s%2d]" % (x_str, opt_z_climb, y_str, opt_z_dive)
)
plt.xlabel("Climb Depth [m]")
plt.ylabel("Dive Depth [m]")
plt.axis('equal')
lgnd = plt.legend(title=r"$[z_{climb}, z_{dive}]^*$", framealpha=1)
lgnd.legendHandles[-1]._sizes = [300]
plt.setp(lgnd.texts, family="monospace")
ax.set_title("Transport Cost of Depth Band", fontfamily='monospace')
# plot 3D quiver plot
ax = fig.add_subplot(1, 2, 2, projection='3d')
# voc_u,voc_v,voc_w,voc_z
u = voc_u_list[pd.notnull(voc_u_list)]
v = voc_v_list[pd.notnull(voc_u_list)]
w = voc_w_list[pd.notnull(voc_u_list)]
z = voc_z_list[pd.notnull(voc_u_list)]
x = np.zeros(u.shape)
y = np.zeros(u.shape)
# convert data to RGB color map for quiver plot
c = (np.arctan2(u,v) + np.pi)/(2*np.pi)
c = np.concatenate((c, np.repeat(c, 2)))
c = plt.cm.twilight_shifted(c)
# generate quiver plot
heading_x = np.sin(glider_heading*np.pi/180)
heading_y = np.cos(glider_heading*np.pi/180)
ax.quiver(x, y, -z, u, v, w, colors=c,length=1,normalize=False)
ax.quiver(
0, 0, -np.max(voc_z_list),
heading_x, heading_y, 0,
colors='k', linewidth=5, arrow_length_ratio=0.3
)
ax.quiver(
0, 0, -opt_z_climb,
0, 0, -(opt_z_dive - opt_z_climb),
colors='tab:orange', linewidth=10, alpha=0.3,
arrow_length_ratio=0
)
ax.patch.set_facecolor('white')
sns_gray = (234/255, 234/255, 242/255, 1.0)
ax.w_xaxis.set_pane_color(sns_gray)
ax.w_yaxis.set_pane_color(sns_gray)
ax.w_zaxis.set_pane_color(sns_gray)
ax.set_xlabel('\n\nEastward [m/s]')
ax.set_ylabel('\n\nNorthward [m/s]')
ax.set_zlabel('\n\nDepth [m]')
ax.azim = -110 # [deg]
ax.elev = 30 # [deg]
plt.xlim(-max_current,max_current)
plt.ylim(-max_current,max_current)
plt.title(r"AUG Heading: %3d$^\circ$" % glider_heading,
fontfamily='monospace')
plt.suptitle('Exploitative Depth Band Selection', fontweight='bold')
if glider_heading >= 0 and glider_heading < 10:
leading_zeros ='00'
elif glider_heading >= 10 and glider_heading < 100:
leading_zeros = '0'
else:
leading_zeros = ''
plt.savefig('/Users/zduguid/Desktop/fig/depth-band-%s%d.png' %
(leading_zeros, glider_heading))
plt.close()
###############################################################################
# PLOT VELOCITIES (BOTTOM TRACK)
###############################################################################
def plot_velocity_bottom_track(ts, glider, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(10,8))
sns.scatterplot(x=ts.df.abs_vel_btm_u,
y=ts.df.abs_vel_btm_v,
s=30,
hue=ts.df.heading,
palette='viridis_r',
data=ts.df)
dt = datetime.datetime.fromtimestamp(ts.df.time[0]).replace(microsecond=0)
plt.axis('equal')
plt.suptitle('Bottom Track Velocities', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('East Velocity [m/s]')
plt.ylabel('North Velocity [m/s]')
tick_spacing = np.arange(-1.2,1.4,0.2)
plt.xticks(tick_spacing)
plt.yticks(tick_spacing)
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT VELOCITIES (EASTWARD)
###############################################################################
def plot_velocity_eastward(ts, glider, save_name=None):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(15,8))
filter_len = 30
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin0_beam0.rolling(filter_len).mean(),
color='lightblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 0'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin1_beam0.rolling(filter_len).mean(),
color='deepskyblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 1'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin2_beam0.rolling(filter_len).mean(),
color='cornflowerblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 2'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin3_beam0.rolling(filter_len).mean(),
color='royalblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 3'
)
sns.scatterplot(
x=ts.df.time,
y=ts.df.abs_vel_btm_u.rolling(filter_len).mean(),
color='tab:orange',
data=ts.df,
s=10,
linewidth=0,
label='btm'
)
sns.scatterplot(
x=ts.df.time,
y=ts.df.rel_vel_pressure_u.rolling(filter_len).mean(),
color='magenta',
data=ts.df,
s=10,
linewidth=0,
label='$\Delta$z/$\Delta$t'
)
sns.scatterplot(
x=ts.df.time,
y=ts.df.pitch.rolling(filter_len).median()/100,
color='red',
data=ts.df,
s=10,
linewidth=0,
label='pitch'
)
dt = datetime.datetime.fromtimestamp(ts.df.time[0]).replace(microsecond=0)
plt.suptitle('Eastward Component of Velocity', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('Time')
plt.ylabel('Velocity [m/s]')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT VELOCITIES (NORTHWARD)
###############################################################################
def plot_velocity_northward(ts, glider, save_name=None, roll_size=10,
plt_pressure=True, plt_pitch=True):
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(figsize=(15,8))
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin0_beam1.rolling(roll_size).median(),
color='lightblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 0'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin1_beam1.rolling(roll_size).median(),
color='deepskyblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 1'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin2_beam1.rolling(roll_size).median(),
color='cornflowerblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 2'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin3_beam1.rolling(roll_size).median(),
color='royalblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 3'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin4_beam1.rolling(roll_size).median(),
color='blue',
data=ts.df,
s=10,
linewidth=0,
label='bin 4'
)
sns.scatterplot(
x=ts.df.time,
y=-ts.df.vel_bin5_beam1.rolling(roll_size).median(),
color='darkblue',
data=ts.df,
s=10,
linewidth=0,
label='bin 5'
)
sns.scatterplot(
x=ts.df.time,
y=ts.df.abs_vel_btm_v.rolling(roll_size).median(),
color='tab:orange',
data=ts.df,
s=10,
linewidth=0,
label='btm'
)
if plt_pressure:
sns.scatterplot(
x=ts.df.time,
y=ts.df.rel_vel_pressure_v.rolling(roll_size).median(),
color='magenta',
data=ts.df,
s=10,
linewidth=0,
label='$\Delta$z/$\Delta$t'
)
if plt_pitch:
sns.scatterplot(
x=ts.df.time,
y=ts.df.pitch.rolling(roll_size).median()/100,
color='tab:green',
data=ts.df,
s=10,
linewidth=0,
label='pitch'
)
dt = datetime.datetime.fromtimestamp(ts.df.time[0]).replace(microsecond=0)
plt.suptitle('Northward Component of Velocity', fontweight='bold')
plt.title('%s Kolumbo Volcano %s' % (unit_name[glider], dt.isoformat(),))
plt.xlabel('Time')
plt.ylabel('Velocity [m/s]')
plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
###############################################################################
# PLOT CORRELATIONS
###############################################################################
def plot_correlations(ts, glider, save_name=None):
cols = [
'pitch',
'heading',
'roll',
'depth',
'temperature',
'speed_of_sound',
'abs_vel_btm_u',
'abs_vel_btm_v',
'abs_vel_btm_w',
'rel_vel_dvl_u',
'rel_vel_dvl_v',
'rel_vel_dvl_w',
'rel_vel_pressure_u',
'rel_vel_pressure_v',
'rel_vel_pressure_w',
'rel_pos_x',
'rel_pos_y',
'rel_pos_z',
]
df = ts.df[cols]
# compute correlations
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
mask[np.diag_indices_from(mask)] = False
# plot heatmap given the correlations
fig, (ax) = plt.subplots(1, 1, figsize=(15,8))
hm = sns.heatmap(
corr,
ax=ax,
#mask=mask,
cmap="coolwarm",
#square=True,
annot=True,
fmt='.2f',
#annot_kws={"size": 14},
linewidths=.05
)
# fig.subplots_adjust(top=0.93)
ax.set_title('DVL Feature Correlations', fontsize=22, fontweight='bold')
if save_name: plt.savefig(save_name)
else: plt.savefig('/Users/zduguid/Desktop/fig/tmp.png')
| StarcoderdataPython |
4877862 | """
Construction site -- a place for incrementally complex modules that buildup to
something we actually want.
There's a package for it so we can have it available for demo/tutorial purposes later.
"""
| StarcoderdataPython |
254820 | import pygame
from enum import EnumMeta
class ControlType(EnumMeta):
KEYBOARD = "ControlType.KEYBOARD"
BUTTON = "ControlType.BUTTON"
HAT = "ControlType.HAT"
AXIS = "ControlType.AXIS"
class Control:
def __init__(self, control_type, number, value):
self.control_type = control_type
self.number = number
if type(value) is list:
self.value = tuple(value)
else:
self.value = value
def __eq__(self, other):
if type(other) is type(self):
if self.control_type == ControlType.AXIS and other.control_type == ControlType.AXIS:
if self.value < 0 and other.value < 0 and abs(self.value - other.value) < 0.1:
return True
elif self.value > 0 and other.value > 0 and abs(self.value - other.value) < 0.1:
return True
return False
return vars(self) == vars(other)
return False
@classmethod
def from_json(cls, data):
return cls(**data)
class ControlDetector:
@staticmethod
def detect_control():
pygame.init()
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
for j in joysticks:
j.init()
AXIS_MINIMUM_TRESHOLD = 0.8
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.JOYBUTTONDOWN:
result = Control(ControlType.BUTTON, 0, event.button)
return result
elif event.type == pygame.JOYHATMOTION:
result = Control(ControlType.HAT, event.hat, event.value)
return result
elif event.type == pygame.JOYAXISMOTION:
# introduce minimum value to process the event to prevent accidental trigger
if event.value >= AXIS_MINIMUM_TRESHOLD or event.value <= -AXIS_MINIMUM_TRESHOLD:
result = Control(ControlType.AXIS, event.axis, event.value)
return result
elif event.type == pygame.KEYDOWN:
result = Control(ControlType.KEYBOARD, 0, pygame.key.name(event.key))
return result
| StarcoderdataPython |
5151724 | # Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
# basic linear circuit solved by itself
add_circuit_node(name="n1", variable_update="default")
add_circuit_node(name="n2", variable_update="default")
circuit_element(name="V1", n1="n1", n2="0", value=1.0)
def myassemble(what, timemode):
n1 = get_circuit_equation_number(node="n1")
n2 = get_circuit_equation_number(node="n2")
G1=1.0
G2=1.0
rcv=[]
rv=[]
if timemode != "DC":
return [rcv, rv]
if what != "MATRIXONLY":
v1 = get_circuit_node_value(node="n1", solution="dcop")
v2 = get_circuit_node_value(node="n2", solution="dcop")
I1 = G1 * (v1 - v2)
I2 = G2 * v2
rv.extend([n1, I1])
rv.extend([n2, I2])
rv.extend([n2, -I1])
if what !="RHS" :
mG1 = -G1
mG2 = -G2
rcv.extend([n1, n1, G1])
rcv.extend([n2, n2, G1])
rcv.extend([n1, n2, mG1])
rcv.extend([n2, n1, mG2])
rcv.extend([n2, n2, G2])
print(rcv)
print(rv)
return rcv, rv, False
custom_equation(name="test1", procedure=myassemble)
solve(type="dc", absolute_error=1.0, relative_error=1e-14, maximum_iterations=3)
circuit_alter(name="V1", value=2.0)
solve(type="dc", absolute_error=1.0, relative_error=1e-14, maximum_iterations=3)
| StarcoderdataPython |
40700 | <gh_stars>0
def remove_duplicated_keep_order(value_in_tuple):
new_tuple = []
for i in value_in_tuple:
if not (i in new_tuple):
new_tuple.append(i)
return new_tuple
# return tuple(set(value_in_tuple))
| StarcoderdataPython |
8079437 | <gh_stars>0
from flask import jsonify, request, make_response
from flask_restful import Resource
from flask_expects_json import expects_json
from werkzeug.security import generate_password_hash, check_password_hash
from instance.config import app_config
import datetime
import jwt
from ..utils.user_validations import User_validator
from ..models.user_model import User_Model
from .token import Token
from .main import Initialize
from .json_schema import USER_LOGIN_JSON, USER_JSON
class SignUp(Resource, Initialize):
'''Signup endpont'''
@expects_json(USER_JSON)
@Token.token_required
def post(current_user, self):
'''Method to create a new user'''
self.restrict1.checkUserStatus(current_user)
self.restrict1.checkAdminStatus(current_user)
data = self.restrict1.getJsonData()
valid = User_validator(data)
valid.validate_missing_data_signup()
valid.validate_signup_password()
valid.check_digits()
user2 = valid.space_strip()
valid.validate_user_exists(user2)
email = data["email"].strip().lower()
password = generate_password_hash(
data["password"], method='sha256').strip()
admin = data["admin"]
self.restrict1.checkAdmin(admin)
user = User_Model(email, password, admin)
user.save()
return make_response(jsonify({
"Message": "User registered",
"Email": email,
"Admin": admin
}), 201)
class Signout(Resource, Initialize):
@Token.token_required
def post(current_user, self):
self.restrict1.checkUserStatus(current_user)
if 'x-access-token' in request.headers:
token = request.headers["x-access-token"]
date = datetime.datetime.now()
self.item.logout(token, date)
return make_response(jsonify({
"Message": "Successfully logged out"
}), 200)
class UpdateUser(Resource, Initialize):
@Token.token_required
def put(current_user, self, userId):
'''Update user endpoint'''
self.restrict1.checkAdminStatus(current_user)
users = self.item.get()
for user in users:
if user["id"] == userId:
if not user["admin"]:
self.item.update(userId)
response = make_response(jsonify({
"Message": "User updated to Administrator"
}), 201)
else:
response = make_response(jsonify({
"message": "User already an admin"
}), 401)
return response
return self.no_user
class GetUsers(Resource, Initialize):
@Token.token_required
def get(current_user, self):
self.restrict1.checkUserStatus(current_user)
users = self.item.get()
if len(users) < 1:
return self.no_user
return make_response(jsonify({
"Message": "Success",
"Users": users
}), 401)
class Login(Resource, Initialize):
'''Login endpoint'''
@expects_json(USER_LOGIN_JSON)
def post(self):
'''Method to login a user and create a unique JWT token'''
data = self.restrict1.getJsonData()
email = data["email"].strip()
password = data["password"].strip()
valid = User_validator(data)
valid.validate_empty_items_login()
users = self.item.get()
for user in users:
if email == user["email"] and check_password_hash(user["password"],
password):
token = jwt.encode({"email": email, "password": password,
'exp': datetime.datetime.utcnow() +
datetime.timedelta(minutes=180000)},
app_config["development"].SECRET_KEY,
algorithm='HS256')
return make_response(jsonify({
"message": "Login success",
"token": token.decode("UTF-8"
)}), 200)
return self.fail_login
| StarcoderdataPython |
12863983 | from marshmallow import Schema, fields, validate
class ChangePasswordSchema(Schema):
id = fields.Number(attribute="id")
oldPassword = fields.String(attribute="old_password", validate=validate.Length(min=8, max=256), required=True)
password = fields.String(attribute="password", validate=validate.Length(min=8, max=256), required=True)
| StarcoderdataPython |
6409802 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
class RevealDimension(NeuralLayer):
"""
Operation for revealing dimension.
After some dimension-unclear layers such as convolution, the dimension information will be lost.
Use this layer to redefine the input dimension.
"""
def __init__(self, dim):
super(RevealDimension, self).__init__("reveal_dimension")
self.dim = dim
def prepare(self):
self.output_dim = self.dim
def compute_tensor(self, x):
return x | StarcoderdataPython |
11339392 | <filename>DataObjects/Architecture/ArchitectureClassification.py
# Copyright (c) 2021. <NAME>
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
#
class ArchitectureClassification:
k_cache = "Cache"
k_dir = "Dir"
k_mem = "Memory"
def __init__(self, parser, arch_name: str):
self.arch_type = self._test_arch(parser, arch_name)
pass
def _test_arch(self, parser, arch_name: str):
if arch_name == parser.getCacheIdentifier():
return self.k_cache
elif arch_name == parser.getDirIdentifier():
return self.k_dir
elif arch_name == parser.getMemIdentifier():
return self.k_mem
elif arch_name == self.k_cache or arch_name == self.k_dir or arch_name == self.k_mem:
return arch_name
else:
assert 0, "Unknown architecture classification"
def test_cache(self) -> bool:
if self.arch_type == self.k_cache:
return True
return False
def test_dir(self) -> bool:
if self.arch_type == self.k_dir:
return True
return False
def test_mem(self) -> bool:
if self.arch_type == self.k_mem:
return True
return False
| StarcoderdataPython |
327861 | <reponame>asidwell/parcellation_fragmenter
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 15:31:06 2016
@author: kristianeschenburg
"""
import networkx as nx
import numpy as np
class SurfaceAdjacency(object):
"""
Class to generate an adjacency list of a surface mesh representation
of the brain.
Initialize SurfaceAdjacency object.
Parameters:
- - - - -
vertices : array
vertex coordinates
faces : list
list of faces in surface
"""
def __init__(self, vertices, faces):
self.vertices = vertices
self.faces = faces
def generate(self, indices=None):
"""
Method to create surface adjacency list.
"""
# Get faces attribute
faces = self.faces.tolist()
accepted = np.zeros((self.vertices.shape[0]))
# get indices of interest
if not np.any(indices):
indices = list(np.unique(np.concatenate(faces)))
indices = np.sort(indices)
# create array of whether indices are included
# cancels out search time
accepted[indices] = 1
accepted = accepted.astype(bool)
# Initialize adjacency list
adj = {k: [] for k in indices}
# loop over faces in mesh
for face in faces:
for j, vertex in enumerate(face):
idx = (np.asarray(face) != vertex)
if accepted[vertex]:
nbs = [n for n in np.asarray(face)[idx] if accepted[n]]
adj[face[j]].append(nbs)
for k in adj.keys():
if adj[k]:
adj[k] = list(set(np.concatenate(adj[k])))
# Set adjacency list field
self.adj = adj
def filtration(self, filter_indices=None, toArray=False, remap=False):
"""
Generate a local adjacency list, constrained to a subset of vertices on
the surface. For each vertex in 'vertices', retain neighbors
only if they also exist in 'vertices'.
Parameters:
- - - - -
fitler_indices : array
indices to include in sub-graph. If none, returns original graph.
to_array : bool
return adjacency matrix of filter_indices
remap : bool
remap indices to 0-len(filter_indices)
Returns:
- - - -
G : array / dictionary
down-sampled adjacency list / matrix
"""
assert hasattr(self, 'adj')
if not np.any(filter_indices):
G = self.adj.copy()
else:
filter_indices = np.sort(filter_indices)
G = {}.fromkeys(filter_indices)
for v in filter_indices:
G[v] = list(set(self.adj[v]).intersection(set(filter_indices)))
ind2sort = dict(zip(
filter_indices,
np.arange(len(filter_indices))))
if remap:
remapped = {
ind2sort[fi]: [ind2sort[nb] for nb in G[fi]]
for fi in filter_indices}
G = remapped
if toArray:
G = nx.from_dict_of_lists(G)
nodes = G.nodes()
nodes = np.argsort(nodes)
G = nx.to_numpy_array(G)
G = G[nodes, :][:, nodes]
return G
| StarcoderdataPython |
12827847 | <reponame>ovs-code/HRNet-Human-Pose-Estimation
from . import config, core, models, utils
| StarcoderdataPython |
12832401 | <reponame>crschmidt/crapssim
from sim import TABLE_MIN
def place_6_8(player, point):
if not 'place-6' in player.current_bets:
player.bet('place-6', 18)
if not 'place-8' in player.current_bets:
player.bet('place-8', 18)
def place_inside(player, point):
for i in [6, 8]:
if not 'place-%s' % i in player.current_bets:
player.bet('place-%s' % i, 18)
for i in [5, 10]:
if not 'place-%s' % i in player.current_bets:
player.bet('place-%s' % i, 15)
| StarcoderdataPython |
11340477 | <reponame>IDEHCO3/kanban-backend
import requests, os, sys
#se'rvidor = ''
#servidor = 'http://LUC00557347.ibge.gov.br/'
SERVER = 'http://LUC00557196:8000/'
#SERVER = "http://172.30.11.72:8000/"
class RequestTest():
def __init__(self, uri, expec_status_code, method='GET', default_server=SERVER):
self.method = method
self.uri = default_server + uri
self.expec_status_code = expec_status_code
arr_get_for_non_spatial_resource = [
RequestTest("controle-list/usuario-list/1/", 200),
RequestTest("controle-list/usuario-list/1/nome,email", 200),
RequestTest("controle-list/usuario-list/1/projection/nome,email", 200),
]
arr_get_for_collection = [
RequestTest('controle-list/gasto-list/count-resource', 200),
RequestTest('controle-list/gasto-list/offset-limit/1&10', 200),
RequestTest('controle-list/gasto-list/offset-limit/1&10/data,valor', 400),
RequestTest('controle-list/gasto-list/group-by-count/tipo_gasto', 200),
RequestTest('controle-list/gasto-list/filter/tipo_gasto/eq/3', 200),
RequestTest('api/bcim/unidades-federativas/filter/geom/within/' + SERVER + 'api/bcim/municipios/3159407/geom/*', 200),
RequestTest('api/bcim/unidades-federativas/?*contains=POINT(-42 -21)', 200),
RequestTest('api/bcim/unidades-federativas/?*contains=POINT(-42 -21)&sigla=RJ', 200),
RequestTest('api/bcim/unidades-federativas/?*contains=URL&sigla=RJ', 200),
RequestTest('api/bcim/unidades-federativas/contains/POINT(-42 -21)', 200),
RequestTest('api/bcim/aldeias-indigenas/within/POLYGON((-41.8 -21.2,-41.8 -17.8,-28.8 -17.8,-28.8 -21.,-41.8 -21.2))/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/PA/*', 200),
RequestTest('api/bcim/unidades-federativas/filter/sigla/in/ES&PA/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*or/within/' + SERVER + 'api/bcim/unidades-federativas/PA/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/PA/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*and/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*and/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*and/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*or/' + SERVER + 'api/bcim/unidades-federativas/PR/*', 200),
RequestTest('api/bcim/municipios/within/{"type":"Polygon","coordinates":[[[-48.759514611370854,-28.3426735036349],[-48.631647133384185,-28.3426735036349],[-48.631647133384185,-28.082673631081306],[-48.759514611370854,-28.082673631081306],[-48.759514611370854,-28.3426735036349]]]}', 200),
RequestTest('api/bcim/municipios/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*', 200),
RequestTest('api/bcim/municipios/filter/geom/overlaps/' + SERVER + 'api/bcim/unidades-federativas/ES/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*and/geocodigo/startswith/32/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/PA/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/PA', 200),
RequestTest('api/bcim/aldeias-indigenas/collect/nome&geom/buffer/0.5', 200),
RequestTest('api/bcim/unidades-federativas/filter/sigla/in/RJ&ES/*collect/nome&geom/buffer/0.2', 200),
RequestTest('api/bcim/aldeias-indigenas/offset-limit/0&2/nome,geom,nomeabrev/*collect/nome&geom/buffer/0.5', 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/aldeias-indigenas/offset-limit/0/2/ and ignore the rest - act as offset-limit operation)
RequestTest('api/bcim/aldeias-indigenas/offset-limit/0&2/nome,geom/*collect/geom/buffer/0.5', 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/aldeias-indigenas/offset-limit/0/2/ and ignore the rest - act as offset-limit operation)
]
arr_get_for_spatial_operations = [
RequestTest("api/bcim/unidades-federativas/ES/area", 200),
RequestTest("api/bcim/unidades-federativas/ES/boundary", 200),
RequestTest("api/bcim/unidades-federativas/ES/buffer/0.2", 200),
RequestTest("api/bcim/unidades-federativas/ES/centroid", 200),
RequestTest("api/bcim/unidades-federativas/ES/contains/" + SERVER + "api/bcim/aldeias-indigenas/587/", 200),
RequestTest("api/bcim/unidades-federativas/ES/convex_hull", 200),
RequestTest("api/bcim/aldeias-indigenas/587/coords", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/crosses/" + SERVER + "api/bcim/municipios/3126406", 200),
RequestTest("api/bcim/unidades-federativas/RJ/difference/" + SERVER + "api/bcim/municipios/3304300/", 200),
RequestTest("api/bcim/unidades-federativas/ES/dims", 200),
RequestTest("api/bcim/aldeias-indigenas/589/disjoint/" + SERVER + "api/bcim/unidades-federativas/RJ/", 200),
RequestTest("api/bcim/unidades-federativas/ES/distance/" + SERVER + "api/bcim/unidades-federativas/AM/", 200),
RequestTest("api/bcim/unidades-federativas/ES/empty", 200),
RequestTest("api/bcim/unidades-federativas/ES/envelope", 200),
RequestTest("api/bcim/unidades-federativas/ES/equals/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/ES/equals_exact/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/ES/ewkb", 200),
RequestTest("api/bcim/unidades-federativas/ES/ewkt", 200),
RequestTest("api/bcim/unidades-federativas/ES/extent", 200),
RequestTest("api/bcim/unidades-federativas/ES/geom_type", 200),
RequestTest("api/bcim/unidades-federativas/ES/geom_typeid", 200),
RequestTest("api/bcim/unidades-federativas/ES/hasz", 200),
RequestTest("api/bcim/unidades-federativas/ES/hex", 200),
RequestTest("api/bcim/unidades-federativas/ES/hexewkb", 200),
RequestTest("api/bcim/unidades-federativas/ES/intersection/" + SERVER + "api/bcim/unidades-federativas/RJ/envelope/", 200),
RequestTest("api/bcim/unidades-federativas/ES/intersects/" + SERVER + "api/bcim/unidades-federativas/RJ/", 200),
RequestTest("api/bcim/aldeias-indigenas/587/json", 200),
RequestTest("api/bcim/aldeias-indigenas/587/kml", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/length", 200),
RequestTest("api/bcim/unidades-federativas/ES/num_geom", 200),
RequestTest("api/bcim/municipios/3301009/overlaps/" + SERVER + "api/bcim/unidades-federativas/ES", 200),
RequestTest("api/bcim/unidades-federativas/ES/point_on_surface", 200),
RequestTest("api/bcim/unidades-federativas/ES/relate/" + SERVER + "api/bcim/unidades-federativas/GO/", 200),
RequestTest("api/bcim/unidades-federativas/ES/relate_pattern/" + SERVER + "api/bcim/unidades-federativas/GO/&FF*FF****", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/ring", 200),
RequestTest("api/bcim/unidades-federativas/ES/simple", 200),
RequestTest("api/bcim/unidades-federativas/ES/simplify/0.0&False", 200),
RequestTest("api/bcim/unidades-federativas/ES/srid", 200),
RequestTest("api/bcim/unidades-federativas/ES/srs", 200),
RequestTest("api/bcim/vegetacoes-de-restinga/2947/sym_difference/" + SERVER + "api/bcim/unidades-federativas/ES", 200),
RequestTest("api/bcim/unidades-federativas/AM/touches/" + SERVER + "api/bcim/unidades-federativas/RJ/", 200),
RequestTest("api/bcim/unidades-federativas/ES/transform/4326&false", 200),
RequestTest("api/bcim/unidades-federativas/ES/union/" + SERVER + "api/bcim/unidades-federativas/RJ", 200),
RequestTest("api/bcim/unidades-federativas/ES/valid", 200),
RequestTest("api/bcim/unidades-federativas/ES/valid_reason", 200),
RequestTest("api/bcim/aldeias-indigenas/587/within/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/ES/wkb", 200),
RequestTest("api/bcim/unidades-federativas/ES/wkt", 200),
RequestTest("api/bcim/aldeias-indigenas/589/x", 200),
RequestTest("api/bcim/aldeias-indigenas/589/y", 200),
RequestTest("api/bcim/aldeias-indigenas/589/z", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/x", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/y", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/z", 200),
]
arr_get_for_projection = [
# only attributes
RequestTest("api/bcim/unidades-federativas/nome", 200),
RequestTest("api/bcim/unidades-federativas/nome/", 200),
RequestTest("api/bcim/unidades-federativas/nome,geom", 200),
RequestTest("api/bcim/unidades-federativas/nome,geom/", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo", 200), # attributes and projection
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/", 200),
# filter
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/filter/sigla/in/RJ&ES", 200),
# collect
RequestTest("api/bcim/unidades-federativas/collect/geom&nome/upper", 200),
RequestTest("api/bcim/unidades-federativas/projection/geom,nome/collect/geom&nome/upper", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geocodigo/collect/geom&nome/upper", 400), # collected attributes not in projection (must fail)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geocodigo/collect/geom&sigla/lower", 400), # operated attribute in projection but lists differs (priorize projection in this case)
# count_resource
RequestTest("api/bcim/unidades-federativas/count-resource", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/count-resource", 200),
# filter_and_collect
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES/*collect/geocodigo&sigla/lower", 200),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/filter/sigla/in/RJ&ES/*collect/geocodigo&sigla/lower", 200),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/filter/sigla/in/RJ&ES/*collect/sigla&geom/buffer/0.2", 400), # (must return status code 400)
# filter_and_count_resource
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES/*count-resource", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/filter/sigla/in/RJ&ES/*count-resource", 200),
# offset_limit
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/", 200),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,geocodigo/", 400),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0&2/", 200),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0&2/sigla,geocodigo/", 400),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0&2/nome,geocodigo,sigla/", 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0/2/ and ignore the rest - act as offset-limit operation)
# distinct
RequestTest("controle-list/usuario-list/distinct/email", 200),
RequestTest("controle-list/usuario-list/distinct/id&nome&email", 200),
RequestTest("controle-list/usuario-list/projection/nome,email,data_nascimento/distinct/nome&email", 200),
# offset_limit_and_collect
RequestTest("api/bcim/unidades-federativas/offset-limit/5&2/collect/sigla&geom/buffer/0.8", 200),
RequestTest("api/bcim/unidades-federativas/offset-limit/5&2/geom,sigla/*collect/sigla&geom/buffer/0.8", 400),
RequestTest("api/bcim/unidades-federativas/offset-limit/5&2/sigla,geom,nome/*collect/sigla&geom/buffer/0.8", 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/collect/sigla&geom/buffer/0.8", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/sigla,geocodigo/*collect/sigla&geom/buffer/0.8", 400), # projection list == collect list != offset_limit list # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/sigla,geom/*collect/nome&sigla&geom/buffer/0.8", 400), # projection list == offset_limit list != collect list # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/sigla,geom/collect/sigla&geom/buffer/0.8", 400), # projection list == offset_limit list == collect list # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
#FeatureCollection operations
RequestTest("api/bcim/aldeias-indigenas/within/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,nomeabrev/within/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/contains/" + SERVER + "api/bcim/aldeias-indigenas/623", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/contains/" + SERVER + "api/bcim/aldeias-indigenas/623", 200),
]
arr_get_for_complex_requests = [
#("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/ES/*collect/geom/buffer/0.2/!union/(" + SERVER + "api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/geom/buffer/0.2), 200),"
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/ES/*collect/geom/buffer/0.2/!union!/" + SERVER + "api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/geom/buffer/0.2", 200),
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/ES/*collect/nome&geom/buffer/0.2/!union!/" + SERVER + "api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/nome&geom/buffer/0.2", 200),
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/nome&geom/buffer/0.2/!union!/" + SERVER + "api/bcim/unidades-federativas/MG/envelope/", 200),
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/nome&geom/buffer/0.2/!union!/Polygon((-51.04196101779323 -22.915330279829785, -39.86109832699603 -22.915330279829785, -39.86109832699603 -14.227537498798952, -51.04196101779323 -14.227537498798952, -51.04196101779323 -22.915330279829785))", 200),
]
arr_get_for_geometry_collection_operation = [
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/geom,nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*count-resource", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*count-resource", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*count-resource", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom&nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom/buffer/1.2", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome&geom/buffer/1.2", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom/buffer/1.2", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome&geom/buffer/1.2", 400),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom&nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome/upper", 400),
RequestTest("api/bcim/aldeias-indigenas/projection/geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom&nome/upper", 400),
]
arr_get_for_join_operation = [
# NonSpatialResource (1 resource) join FeatureResource (1 resource) (Not joinable)
#RequestTest("controle-list/usuario-list/1/join/data_nascimento&geocodigo/" + SERVER + "api/bcim/unidades-federativas/ES", 400),
# NonSpatialResource (1 resource) join FeatureResource (n resources) (Not joinable)
#RequestTest("controle-list/usuario-list/1/join/data_nascimento&geocodigo/" + SERVER + "api/bcim/unidades-federativas/", 400),
# FeatureResource (1 resource) join NonSpatialResource (1 resource)
RequestTest("api/bcim/municipios/3304557/join/geocodigo&geocodigo/http://172.30.10.86/api/munic-2015/planejamento-urbano-list/3243/", 200),
RequestTest('api/bcim/unidades-federativas/ES/join/geocodigo&uf_geocodigo/{"uf_geocodigo":"32","pib_estimado":1000000000}', 200),
#("api/bcim/unidades-federativas/ES/join/geocodigo&geocodigo/http://gabriel:8880/estados-list/unidade-federativa-list/2/", 200),
# FeatureResource (1 resource) join CollectionResource (n resources)
RequestTest("api/bcim/municipios/3304557/join/geocodigo&cod_municipio/http://172.30.10.86/api/pib-municipio/faturamento-list/filter/cod_municipio/eq/3304557", 200),
#("api/bcim/unidades-federativas/ES/join/geocodigo&geocodigo/http://gabriel:8880/estados-list/unidade-federativa-list/", 200),
# FeatureResource join NonSpatialResource (Not joinable)
RequestTest("api/bcim/municipios/3304557/join/geocodigo&nome/http://172.30.10.86/api/munic-2015/planejamento-urbano-list/3243/", 400),
#("api/bcim/unidades-federativas/ES/join/geocodigo&nome/http://gabriel:8880/estados-list/unidade-federativa-list/2/", 400),
# FeatureCollection (n resources) join CollectionResource (n resources)
RequestTest("api/bcim/unidades-federativas/join/geocodigo&cod_estado/http://172.30.10.86/esporte-list/cond-funcionamento-list/", 200),
#("api/bcim/unidades-federativas/join/geocodigo&geocodigo/http://gabriel:8880/estados-list/unidade-federativa-list/", 200),
# CollectionResource (n resources) join FeatureCollection (n resources)
#("esporte-list/cond-funcionamento-list/join/cod_estado&geocodigo/http://172.30.10.86/api/bcim/unidades-federativas/offset_limit/0&2/geocodigo,nome,geom", 200),
# FeatureCollection (n resources) join CollectionResource (n resources)
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG/join/geocodigo&cod_estado/http://172.30.10.86/esporte-list/cond-funcionamento-list/filter/cod_estado/in/31&32&33&35/", 200),
]
arr_options_for_collection_operation = [
RequestTest("controle-list/usuario-list/filter/id/gt/5/", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/collect/id&email/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/count-resource", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome,email", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/distinct/nome", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/group-by/nome", 400, method="OPTIONS"), # the operation 'group_by' doesn't exists anymore
RequestTest("controle-list/usuario-list/group-by-count/nome", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*collect/nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*collect/id&email/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*count-resource", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/collect/id&nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome/collect/nome/upper", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome,id/collect/id&nome/upper", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome/collect/id&nome/upper", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*count-resource", 200, method="OPTIONS"),
# Collection operation used by FeatureCollection
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/nome&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/count-resource", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,sigla", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,geom", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/distinct/nome", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/group-by/nome", 400, method="OPTIONS"), # the operation 'group_by' doesn't exists anymore
RequestTest("api/bcim/unidades-federativas/group-by-count/nome", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/nome/upper", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/nome&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/sigla&geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/sigla&geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/sigla&geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/nome&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/sigla&geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/sigla&geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/sigla&geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome/collect/nome/upper", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,sigla/collect/nome&sigla/lower", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/geom&sigla/lower", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/sigla&geom/buffer/0.2", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom/collect/geom/buffer/0.2", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom/collect/geom/area", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/sigla&geom/area", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/sigla&geom/point_on_surface", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom/collect/geom/point_on_surface", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*count-resource", 200, method="OPTIONS"),
]
# The suffixed requests just need simple tests (once requests suffixed with '.jsonld' is just repassed to options() method)
# More complex tests must be applied in OPTIONS requests (without suffix)
arr_get_for_collect_operation_context = [
RequestTest("controle-list/usuario-list/filter/id/gt/5.jsonld", 200),
RequestTest("controle-list/usuario-list/collect/nome/upper.jsonld", 200),
RequestTest("controle-list/usuario-list/collect/id&email/upper.jsonld", 200),
RequestTest("controle-list/usuario-list/projection/id,email/collect/id&email/upper.jsonld", 200),
RequestTest("controle-list/usuario-list/projection/email/collect/id&email/upper.jsonld", 400),
RequestTest("api/bcim/unidades-federativas/collect/nome/upper.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/nome&sigla/lower.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom&sigla/lower.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/buffer/0.2.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom/buffer/0.2.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom/area.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/area.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/point_on_surface.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom/point_on_surface.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/collect/sigla&geom/area.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla/collect/sigla&geom/area.jsonld", 400),
]
arr_get_for_tiff_resource = [
RequestTest('raster/imagem-exemplo-tile1-list/61/', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/bands', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/destructor', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/driver', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/extent', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/geotransform', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/height', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/info', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/metadata', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/name', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/origin', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/ptr', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/ptr_type', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/scale', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/skew', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/srid', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/srs', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/transform/3086', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/vsi_buffer', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/warp', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/width', 200),
]
arr_options_for_tiff_resource = [
RequestTest('raster/imagem-exemplo-tile1-list/61/', 200, method='OPTIONS')
]
def test_requests(request_test_list, test_label=''):
default_init_test_label = "Initializing test set:"
init_label_len = len(test_label) + len(default_init_test_label) + 5
print("\n\n" + init_label_len * "*" + "\n* " + default_init_test_label + " " + test_label + " *\n" + init_label_len * "*" + "\n\n")
requests_with_error = []
for request_test in request_test_list:
print('Executing: ' + request_test.uri)
if request_test.method == 'OPTIONS':
res = requests.options(request_test.uri)
else:
res = requests.get(request_test.uri)
if res.status_code != request_test.expec_status_code:#not in (200, 201,202,203,204,300,301,302,303):
print('Failed: ' + request_test.uri + ' ' + str(res.status_code) + ' != ' + str(request_test.expec_status_code) + ' (Expected)')
#url_status_code_status_ret = list(request_test)
#url_status_code_status_ret.append(res.status_code)
requests_with_error.append( (request_test.uri, request_test.expec_status_code, res.status_code) )
if len(requests_with_error) > 0:
print("***************The urls below failed****************")
for req_str_error in requests_with_error:
print(req_str_error[0] + ' ' + str(req_str_error[2]) + ' != ' + str(req_str_error[1]) + ' (Expected)')
print("***************failed urls****************")
else:
print("*********Sucess***********")
default_fin_test_label = "End of test set:"
fin_label_len = len(test_label) + len(default_fin_test_label) + 5
print("\n\n" + fin_label_len * "*" + "\n* " + default_fin_test_label + " " + test_label + " *\n" + fin_label_len * "*" + "\n\n")
'''
test_requests(arr_get_for_non_spatial_resource, test_label = "Tests for NonSpatialResource")
test_requests(arr_get_for_collection, test_label="Generic tests to collection operations")
test_requests(arr_get_for_spatial_operations, test_label="Tests for spatial operations")
test_requests(arr_get_for_complex_requests, test_label="Tests for complex requests")
test_requests(arr_get_for_projection, test_label="Tests for FeatureCollection with and without projection")
test_requests(arr_get_for_geometry_collection_operation, test_label="Tests for spatial collection operations")
#test_requests(arr_get_for_join_operation, test_label="Tests for join operation")
test_requests(arr_options_for_collection_operation, test_label = "Tests OPTIONS for Collection operations")
test_requests(arr_get_for_collect_operation_context, test_label = "Tests GET for Collect operation context")
test_requests(arr_get_for_tiff_resource, test_label = "Tests GET for TiffResource")
test_requests(arr_options_for_tiff_resource, test_label = "Tests OPTIONS for TiffResource")
'''
print("\n\n" + 25 * "X" + "\nX End of all test sets X\n" + 25 * "X" + "\n\n")
args = sys.argv
if '-a' in args:
'''
print("\n\n\n<<< INITIALIZING SINTAX CHECK TEST SET >>>\n")
print("\n\n<<< Testing GenericOperationsSintaxTest >>>")
os.system("python manage.py test hyper_resource.tests.GenericOperationsSintaxTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing CollectionOperationsSintaxTest >>>")
os.system("python manage.py test hyper_resource.tests.CollectionOperationsSintaxTest --testrunner=hyper_resource.tests.NoDbTestRunner")
# GET Tests
print("\n\n\n<<< INITIALIZING GET TEST SET >>>\n")
print("\n\n<<< Testing CollectOperationTest >>>")
os.system("python manage.py test hyper_resource.tests.CollectOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing GroupBySumOperationTest >>>")
os.system("python manage.py test hyper_resource.tests.GroupBySumOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing ProjectionOperationTest >>>")
os.system("python manage.py test hyper_resource.tests.ProjectionOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing FilterOperationTest >>>")
os.system("python manage.py test hyper_resource.tests.FilterOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
#print("\n\n<<< Testing JoinOperationTest >>>")
#os.system("python manage.py test hyper_resource.tests.JoinOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing EntryPointTest >>>")
os.system("python manage.py test hyper_resource.tests.EntryPointTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing RasterTest >>>")
os.system("python manage.py test hyper_resource.tests.RasterTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing FeatureCollectionTest >>>")
os.system("python manage.py test hyper_resource.tests.FeatureCollectionTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing FeatureResourceTest >>>")
os.system("python manage.py test hyper_resource.tests.FeatureResourceTest --testrunner=hyper_resource.tests.NoDbTestRunner")
'''
# OPTIONS Tests
print("\n\n\n<<< INITIALIZING OPTIONS TEST SET >>>\n")
print("\n\n<<< Testing OptionsForCollectOperationTest >>>")
os.system("python manage.py test hyper_resource.tests.OptionsForCollectOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
#print("\n\n<<< Testing OptionsForProjectionOperation >>>")
#os.system("python manage.py test hyper_resource.tests.OptionsForProjectionOperation --testrunner=hyper_resource.tests.NoDbTestRunner")
#print("\n\n<<< Testing OptionsForJoinOperationTest >>>")
#os.system("python manage.py test hyper_resource.tests.OptionsForJoinOperationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing OptionsEntryPointTest >>>")
os.system("python manage.py test hyper_resource.tests.OptionsEntryPointTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing OptionsForRasterTest >>>")
os.system("python manage.py test hyper_resource.tests.OptionsForRasterTest --testrunner=hyper_resource.tests.NoDbTestRunner")
#print("\n\n<<< Testing OptionsFeatureCollectionTest >>>")
#os.system("python manage.py test hyper_resource.tests.OptionsFeatureCollectionTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing RequestOptionsTest >>>")
os.system("python manage.py test hyper_resource.tests.RequestOptionsTest --testrunner=hyper_resource.tests.NoDbTestRunner")
#print("\n\n<<< Testing GetRequestContextTest >>>")
#os.system("python manage.py test hyper_resource.tests.GetRequestContextTest --testrunner=hyper_resource.tests.NoDbTestRunner")
#print("\n\n<<< Testing OptionsFeatureResourceTest >>>")
#os.system("python manage.py test hyper_resource.tests.OptionsFeatureResourceTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing OptionsCollectionResource >>>")
os.system("python manage.py test hyper_resource.tests.OptionsCollectionResource --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing OptionsNonSpatialResource >>>")
os.system("python manage.py test hyper_resource.tests.OptionsNonSpatialResource --testrunner=hyper_resource.tests.NoDbTestRunner")
# HEAD Tests
print("\n\n\n<<< INITIALIZING HEAD TEST SET >>>\n")
print("\n\n<<< Testing HeadEntryPointTest >>>")
os.system("python manage.py test hyper_resource.tests.HeadEntryPointTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing HeadFeatureCollectionTest >>>")
os.system("python manage.py test hyper_resource.tests.HeadFeatureCollectionTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForEntryPoint >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForEntryPoint --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForNonSpatialResource >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForNonSpatialResource --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForCollectionResource >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForCollectionResource --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForTiffCollectionResource >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForTiffCollectionResource --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForTiffResourceTest >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForTiffResourceTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForFeatureResourceTest >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForFeatureResourceTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing AllowedMethodsForFeatureCollectionResourceTest >>>")
os.system("python manage.py test hyper_resource.tests.AllowedMethodsForFeatureCollectionResourceTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing HeadFeatureResourceTest >>>")
os.system("python manage.py test hyper_resource.tests.HeadFeatureResourceTest --testrunner=hyper_resource.tests.NoDbTestRunner")
# Not classified
print("\n\n\n<<< INITIALIZING NOT CLASSIFIED TEST SET >>>\n")
print("\n\n<<< Testing PaginationTest >>>")
os.system("python manage.py test hyper_resource.tests.PaginationTest --testrunner=hyper_resource.tests.NoDbTestRunner")
print("\n\n<<< Testing LinkHeaderTest >>>")
os.system("python manage.py test hyper_resource.tests.LinkHeaderTest --testrunner=hyper_resource.tests.NoDbTestRunner") | StarcoderdataPython |
9629612 | import random
import unittest
import uuid
from collections import defaultdict
from pathlib import Path
from typing import List
import numpy as np
from mapel.main.features.common import extract_selected_coordinates_from_experiment, extract_selected_distances, \
extract_calculated_distances, MockExperiment
from mapel.main.objects.Experiment import Experiment
def _close_zero(number, e=1e-6):
return e if number <= e else number
def calculate_distortion(experiment: Experiment, election_ids: List[str] = None, max_distance_percentage=1.0):
"""
:param max_distance_percentage:
:param experiment
:param election_ids: list of elections to take into consideration. If none, takes all.
:return: dict {election_id: mean distortion}
"""
if election_ids is None:
election_ids = list(experiment.distances.keys())
n = len(election_ids)
coordinates = extract_selected_coordinates_from_experiment(experiment, election_ids)
distances = extract_selected_distances(experiment, election_ids)
calculated_distances = np.linalg.norm(coordinates[:, np.newaxis] - coordinates[np.newaxis, :], axis=2)
max_distance_matrix = np.max([distances, calculated_distances], axis=0)
min_distance_matrix = np.min([distances, calculated_distances], axis=0)
max_distance = np.max(distances)
bad_distances_mask = distances > max_distance * max_distance_percentage
np.fill_diagonal(min_distance_matrix, 1)
distortion_matrix = max_distance_matrix / min_distance_matrix
np.fill_diagonal(distortion_matrix, 0)
distortion_matrix[bad_distances_mask] = 0
mean_distortion = np.sum(distortion_matrix, axis=1) / (n - 1 - bad_distances_mask.sum(axis=1))
return {
election: mean_distortion[i] for i, election in enumerate(election_ids)
}
def calculate_distortion_naive(experiment: Experiment, election_ids: List[str] = None, max_distance_percentage=1.0):
coordinates = extract_selected_coordinates_from_experiment(experiment, election_ids)
desired_distances = extract_selected_distances(experiment, election_ids)
calculated_distances = extract_calculated_distances(coordinates)
max_distance = np.max(desired_distances)
allowed_distance = max_distance * max_distance_percentage
distortions = defaultdict(list)
n = len(election_ids)
for i in range(n):
for j in range(i + 1, n):
d1 = desired_distances[i, j]
if d1 <= allowed_distance:
d2 = calculated_distances[i, j]
if d1 > d2:
my_distortion = d1 / d2
else:
my_distortion = d2 / d1
distortions[i].append(my_distortion)
distortions[j].append(my_distortion)
return {
election: np.mean(distortions[i]) for i, election in enumerate(election_ids)
}
class TestDistortion(unittest.TestCase):
def test_calculate_monotonicity(self):
n = 500
election_ids = [str(uuid.uuid4()) for _ in range(n)]
experiment = MockExperiment(election_ids)
elections_subset = random.sample(election_ids, 300)
m1 = calculate_distortion(experiment, elections_subset, 0.9)
print("m1 done")
m2 = calculate_distortion_naive(experiment, elections_subset, 0.9)
print("m2 done")
for el_id in elections_subset:
self.assertAlmostEqual(m1[el_id], m2[el_id])
| StarcoderdataPython |
6401795 | from flyplanner import geom
class City:
def __init__(self, name: str, center: geom.Point):
self.name = name
self.center = center
class Cities:
def __init__(self, cities: set[City]):
self.cities = cities
| StarcoderdataPython |
68161 | # -*- coding: utf-8 -*-
import pytest
from mtpylon import long, int128
from mtpylon.messages import UnencryptedMessage, EncryptedMessage
from mtpylon.serialization import CallableFunc
from mtpylon.message_handler.strategies.utils import (
is_unencrypted_message,
is_rpc_call_message,
is_container_message,
is_msgs_ack,
)
from mtpylon.service_schema.functions import req_pq, ping
from mtpylon.service_schema.constructors import (
MsgsAck,
MessageContainer,
Message
)
from tests.simpleschema import set_task
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
]
)
def test_is_unencrypted_message_true(message):
assert is_unencrypted_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data='wrong data',
),
id='unencrypted message wrong rpc call'
),
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world!'}
),
),
id='unencrypted message wrong rpc call'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data='Wrong message data'
),
id='encrypted message'
)
]
)
def test_is_unencrypted_message_false(message):
assert not is_unencrypted_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world!'}
)
),
id='encrypted message'
),
pytest.param(
Message(
msg_id=long(0x60a4d9830000001c),
seqno=9,
bytes=16,
body=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
id='message constructor'
),
]
)
def test_is_rpc_call_true(message):
assert is_rpc_call_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data='Wrong message data'
),
id='encrypted message wrong data'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data='some un expected data'
),
id='encrypted message ping call'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=ping,
params={'ping_id': long(111)},
)
),
id='encrypted message ping call'
)
]
)
def test_is_rpc_call_message_false(message):
assert not is_rpc_call_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=ping,
params={'ping_id': long(111)},
)
),
id='encrypted message ping call'
)
]
)
def test_is_not_container_message(message):
assert not is_container_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=MessageContainer(
messages=[
Message(
msg_id=long(0x5e0b700a00000000),
seqno=7,
bytes=20,
body=MsgsAck(
msg_ids=[
long(1621416313)
]
),
),
Message(
msg_id=long(0x60a4d9830000001c),
seqno=9,
bytes=16,
body=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
]
)
),
),
]
)
def test_is_container_message(message):
assert is_container_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=ping,
params={'ping_id': long(111)},
)
),
id='encrypted message ping call'
)
]
)
def test_is_not_msgs_ack(message):
assert not is_msgs_ack(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=MsgsAck(
msg_ids=[
long(0x51e57ac42770964a),
long(0x60a4d9830000001c),
]
)
),
id='encrypted msgs ack'
)
]
)
def test_is_msgs_ack(message):
assert is_msgs_ack(message)
| StarcoderdataPython |
9618468 | import setuptools
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
requirements = []
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name='chatzy.py',
version='0.0.1',
description='A chatzy interface in Python',
long_description=readme,
long_description_content_type="text/markdown",
author='Spooky',
author_email='<EMAIL>',
url='https://github.com/NeonWizard/chatzy.py',
license=license,
packages=setuptools.find_packages(exclude=('tests', 'docs')),
install_requires=requirements,
python_requires='>=3'
)
| StarcoderdataPython |
11352170 | <filename>test/test_integration_text_to_speech_v1.py<gh_stars>1-10
import pytest
import unittest
import watson_developer_cloud
import os
@pytest.mark.skip("These are destructive, so run them manually")
class TestIntegrationTextToSpeechV1(unittest.TestCase):
def setUp(self):
self.text_to_speech = watson_developer_cloud.TextToSpeechV1(username=os.getenv('TEXT_TO_SPEECH_USERNAME'),
password=os.getenv('TEXT_TO_SPEECH_PASSWORD'))
self.original_customizations = self.text_to_speech.customizations()
self.created_customization = self.text_to_speech.create_customization(name="test_integration_customization",
description="customization for tests")
def tearDown(self):
custid = self.created_customization['customization_id']
self.text_to_speech.delete_customization(customization_id=custid)
def test_customizations(self):
old_length = len(self.original_customizations['customizations'])
new_length = len(self.text_to_speech.customizations()['customizations'])
assert new_length - old_length == 1
def test_speak(self):
output = self.text_to_speech.synthesize(text="my voice is my passport")
assert not output
| StarcoderdataPython |
6555097 | import os
import sys
import unittest
import numpy as np
sys.path.append(os.path.abspath("../src"))
import utility as ut
class TestUtility(unittest.TestCase):
def test_complex_matrix(self):
a = 5
b = 10
n = np.random.default_rng().integers(low = 1, high = 10)
self.assertEqual(ut.complex_matrix(n, a, b).shape[0], n)
self.assertEqual(ut.complex_matrix(n, a, b, np.complex128).dtype, np.complex128)
def test_sign(self):
z = 3 + 4j
a = 0
b = -3.4
self.assertEqual(ut.sign(z), 3/5 + (4/5) * 1j)
self.assertEqual(ut.sign(a), 1)
self.assertEqual(ut.sign(b), -1)
def test_closeness(self):
with self.assertRaises(ValueError) as ctx:
ut.closeness([-1], [-2 + 6j, 0], 1e-6)
self.assertTrue("Length of input arrays do not match" in str(ctx.exception))
a = [1.00, 2.00, 3.00]
b = [1.01, 2.02, 3.00]
self.assertTrue(ut.closeness(a, b, 1e-1)[0])
self.assertFalse(ut.closeness(a, b, 1e-3)[0])
c = [-1.8 + 2.7j, 3.1890 + 4.2j]
d = [-1.8 + 2.734j, 3.1 + 4.2j]
self.assertTrue(ut.closeness(c, d, 1e-1)[0])
self.assertFalse(ut.closeness(c, d, 1e-2)[0])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1883678 | from typing import List
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.components import mqtt
from esphome.const import (
CONF_ID,
CONF_ON_VALUE,
CONF_OPTION,
CONF_TRIGGER_ID,
CONF_MQTT_ID,
CONF_CYCLE,
CONF_MODE,
CONF_OPERATION,
CONF_INDEX,
)
from esphome.core import CORE, coroutine_with_priority
from esphome.cpp_helpers import setup_entity
CODEOWNERS = ["@esphome/core"]
IS_PLATFORM_COMPONENT = True
select_ns = cg.esphome_ns.namespace("select")
Select = select_ns.class_("Select", cg.EntityBase)
SelectPtr = Select.operator("ptr")
# Triggers
SelectStateTrigger = select_ns.class_(
"SelectStateTrigger",
automation.Trigger.template(cg.std_string, cg.size_t),
)
# Actions
SelectSetAction = select_ns.class_("SelectSetAction", automation.Action)
SelectSetIndexAction = select_ns.class_("SelectSetIndexAction", automation.Action)
SelectOperationAction = select_ns.class_("SelectOperationAction", automation.Action)
# Enums
SelectOperation = select_ns.enum("SelectOperation")
SELECT_OPERATION_OPTIONS = {
"NEXT": SelectOperation.SELECT_OP_NEXT,
"PREVIOUS": SelectOperation.SELECT_OP_PREVIOUS,
"FIRST": SelectOperation.SELECT_OP_FIRST,
"LAST": SelectOperation.SELECT_OP_LAST,
}
icon = cv.icon
SELECT_SCHEMA = cv.ENTITY_BASE_SCHEMA.extend(cv.MQTT_COMMAND_COMPONENT_SCHEMA).extend(
{
cv.OnlyWith(CONF_MQTT_ID, "mqtt"): cv.declare_id(mqtt.MQTTSelectComponent),
cv.GenerateID(): cv.declare_id(Select),
cv.Optional(CONF_ON_VALUE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(SelectStateTrigger),
}
),
}
)
async def setup_select_core_(var, config, *, options: List[str]):
await setup_entity(var, config)
cg.add(var.traits.set_options(options))
for conf in config.get(CONF_ON_VALUE, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(
trigger, [(cg.std_string, "x"), (cg.size_t, "i")], conf
)
if CONF_MQTT_ID in config:
mqtt_ = cg.new_Pvariable(config[CONF_MQTT_ID], var)
await mqtt.register_mqtt_component(mqtt_, config)
async def register_select(var, config, *, options: List[str]):
if not CORE.has_id(config[CONF_ID]):
var = cg.Pvariable(config[CONF_ID], var)
cg.add(cg.App.register_select(var))
await setup_select_core_(var, config, options=options)
async def new_select(config, *, options: List[str]):
var = cg.new_Pvariable(config[CONF_ID])
await register_select(var, config, options=options)
return var
@coroutine_with_priority(40.0)
async def to_code(config):
cg.add_define("USE_SELECT")
cg.add_global(select_ns.using)
OPERATION_BASE_SCHEMA = cv.Schema(
{
cv.Required(CONF_ID): cv.use_id(Select),
}
)
@automation.register_action(
"select.set",
SelectSetAction,
OPERATION_BASE_SCHEMA.extend(
{
cv.Required(CONF_OPTION): cv.templatable(cv.string_strict),
}
),
)
async def select_set_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_OPTION], args, cg.std_string)
cg.add(var.set_option(template_))
return var
@automation.register_action(
"select.set_index",
SelectSetIndexAction,
OPERATION_BASE_SCHEMA.extend(
{
cv.Required(CONF_INDEX): cv.templatable(cv.positive_int),
}
),
)
async def select_set_index_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_INDEX], args, cg.size_t)
cg.add(var.set_index(template_))
return var
@automation.register_action(
"select.operation",
SelectOperationAction,
OPERATION_BASE_SCHEMA.extend(
{
cv.Required(CONF_OPERATION): cv.templatable(
cv.enum(SELECT_OPERATION_OPTIONS, upper=True)
),
cv.Optional(CONF_CYCLE, default=True): cv.templatable(cv.boolean),
}
),
)
@automation.register_action(
"select.next",
SelectOperationAction,
automation.maybe_simple_id(
OPERATION_BASE_SCHEMA.extend(
{
cv.Optional(CONF_MODE, default="NEXT"): cv.one_of("NEXT", upper=True),
cv.Optional(CONF_CYCLE, default=True): cv.boolean,
}
)
),
)
@automation.register_action(
"select.previous",
SelectOperationAction,
automation.maybe_simple_id(
OPERATION_BASE_SCHEMA.extend(
{
cv.Optional(CONF_MODE, default="PREVIOUS"): cv.one_of(
"PREVIOUS", upper=True
),
cv.Optional(CONF_CYCLE, default=True): cv.boolean,
}
)
),
)
@automation.register_action(
"select.first",
SelectOperationAction,
automation.maybe_simple_id(
OPERATION_BASE_SCHEMA.extend(
{
cv.Optional(CONF_MODE, default="FIRST"): cv.one_of("FIRST", upper=True),
}
)
),
)
@automation.register_action(
"select.last",
SelectOperationAction,
automation.maybe_simple_id(
OPERATION_BASE_SCHEMA.extend(
{
cv.Optional(CONF_MODE, default="LAST"): cv.one_of("LAST", upper=True),
}
)
),
)
async def select_operation_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
if CONF_OPERATION in config:
op_ = await cg.templatable(config[CONF_OPERATION], args, SelectOperation)
cg.add(var.set_operation(op_))
if CONF_CYCLE in config:
cycle_ = await cg.templatable(config[CONF_CYCLE], args, bool)
cg.add(var.set_cycle(cycle_))
if CONF_MODE in config:
cg.add(var.set_operation(SELECT_OPERATION_OPTIONS[config[CONF_MODE]]))
if CONF_CYCLE in config:
cg.add(var.set_cycle(config[CONF_CYCLE]))
return var
| StarcoderdataPython |
6669297 | import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from image import SuspiciousImage
from base import BaseDetectorMachine, BaseFeatureExtractor, DrawFlags
class NoiseFeatureExtractor(BaseFeatureExtractor):
"""
Parameters
----------
size : int, (default=256)
Length of one side of image.
t : int, (default=4)
Truncation threshold.
"""
def __init__(self, size=256, t=4):
self.size = size
self.t = t
def extract(self, imgs):
"""
:param list | np.ndarray | SuspiciousImage imgs: suspicious image(s)
:return: X, feature matrix of suspicious image(s)
:rtype: np.ndarray
"""
X = np.stack([self.feature(i.gray) for i in imgs])
return X
def feature(self, img):
"""
:param np.ndarray img: laplacian of suspicious image
:return: X, feature vector of suspect image
:rtype: np.ndarray
"""
lap = cv.resize(img, dsize=(self.size, self.size))
lap = abs(cv.filter2D(lap, -1,
np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]], np.float32), delta=100).astype('int') - 100)
lap = np.where(lap > self.t, self.t, lap) / self.t
lap = lap.flatten()
return lap
class Noise(BaseDetectorMachine):
"""
Parameters
----------
feature_extractor : FeatureExtractor class, (default=NoiseFeatureExtractor)
model_name : str,
Path to trained model.
trainable : bool, (default=False)
size : int, (default=256)
Length of one side of image.
color : Tuple[int, int, int], (default=(0,255,255))
Attributes
----------
dist_ : array-like, shape (n_samples,)
Signed distance to the separating hyperplane.
"""
def __init__(
self,
feature_extractor=NoiseFeatureExtractor,
model_name='./model/noise_oneclass_42.sav',
trainable=False,
size=256,
color=(0, 255, 255),
flags=DrawFlags.SHOW_RESULT):
super().__init__(feature_extractor, model_name, trainable, flags)
self.size = size
self.color = color
def detect(self, imgs):
"""
:param imgs: list of SuspiciousImage
:return: Suspect(1) or not(0)
:rtype: int
"""
self.image_ = []
X = super().detect(imgs)
pred = self.clf.predict(X)
pred = np.where(pred == -1, 0, pred)
if self.flags != DrawFlags.RETURN_RESULT:
self.dist_ = self.clf.decision_function(X)
if self.flags == DrawFlags.SHOW_FULL_RESULT:
for i, p in enumerate(pred):
img = imgs[i]
img_noise = np.where(img.lap > 4, 4, img.lap) / 4 * 255
img_noise = (255 - img_noise)
img_noise = cv.cvtColor(
img_noise.astype('uint8'), cv.COLOR_GRAY2BGR)
if p:
img_noise = cv.rectangle(
img_noise, (0, 0), img.gray.T.shape, self.color, thickness=5)
self.image_.append(img_noise)
plt.close()
return pred
def fit_X(self, train_X, train_y, test_X, test_y,
model='onesvm', gamma=0.0001, nu=0.01, **kwargs):
super().fit_X(train_X, train_y, test_X, test_y,
model=model, gamma=gamma, nu=nu, **kwargs)
| StarcoderdataPython |
1922847 | <gh_stars>0
from flask import Flask, render_template, request, abort
import logging
import os
import sched
import yaml
import sys
from datetime import datetime, timedelta
from asset_manager import Spreadsheet, AssetFolder, authenticate_gdrive
app = Flask(__name__)
# load configuration
with open('config.yaml', 'r') as stream:
config = yaml.load(stream)
# authenticate user
if config['live_update']:
credentials = authenticate_gdrive()
assets_base_dir = os.path.join(os.getcwd(), 'static/assets')
staff_datasheet = Spreadsheet(config['staff_datasheet'], credentials, True, 60*60)
lecture_datasheet = Spreadsheet(config['lecture_datasheet'], credentials, True, 60*60)
news_datasheet = Spreadsheet(config['news_datasheet'], credentials, True, 60*60)
sponsor_datasheet = Spreadsheet(config['sponsor_datasheet'], credentials, True, 60*60)
#
# API Endpoints
#
@app.route("/webhook", methods=["POST"])
def webhook():
if not request.json or "ref" not in request.json:
return abort(400)
elif request.json["ref"] == "refs/heads/master":
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError
func()
else:
logging.error("non-master branch updated; no reload")
return "success"
@app.route("/")
def home():
# format news data
if config['live_update']:
news_data = news_datasheet.get_data()
else:
news_data = []
news_arr = []
for news in news_data:
news_arr.append("""
<div>
<b>%s</b> : <span>%s</span>
</div>
""" % (news['date'], news['text']))
news_formatted = "\n".join(news_arr)
# format sponsor data
if config['live_update']:
sponsor_data = sponsor_datasheet.get_data()
else:
sponsor_data = []
sponsor_arr = []
for sponsor in sponsor_data:
sponsor_arr.append("""
<section class="col-sm-3">
<a href = "%s" style="color:white;text-decoration: none">
<img src="%s"/>
<h2>%s</h2>
</a>
</section>
""" % (sponsor['link'], sponsor['image'], sponsor['name']))
sponsor_formatted = "\n".join(sponsor_arr)
return render_template(
"home.html",
news_formatted=news_formatted,
sponsor_formatted=sponsor_formatted,
)
@app.route("/resources")
def resources():
# format lecture data
if config['live_update']:
lecture_data = lecture_datasheet.get_data()
else:
lecture_data = [] # for local development only
lecture_arr = []
for lecture in lecture_data:
# mark empty resources in red
color = {}
for field in ['prelecture', 'slides', 'video', 'demo']:
color[field] = 'resource_missing' if (lecture[field] == "") else 'resource_present'
lecture_arr.append("""
<tr>
<td><span><b>Lecture %s: </b>%s</span></td>
<td><a href="%s" class="%s"><i class="fas fa-book"></i></a></td>
<td><a href="%s" class="%s"><i class="fas fa-chalkboard"></i></a></td>
<td><a href="%s" class="%s"><i class="fas fa-video"></i></a></td>
<td><a href="%s" class="%s"><i class="fas fa-code"></i></a></td>
</tr>
""" % (lecture['lecture_id'], lecture['title'], lecture['prelecture'],
color['prelecture'], lecture['slides'], color['slides'],
lecture['video'], color['video'], lecture['demo'], color['demo']))
lecture_formatted = "\n".join(lecture_arr)
return render_template(
"resources.html",
lecture_formatted=lecture_formatted,
)
@app.route("/staff")
def staff():
# format staff data
if config['live_update']:
staff_data = staff_datasheet.get_data()
else:
staff_data = [] # for local development only
staff_arr = []
for index, staff in enumerate(staff_data):
start_row = ""
end_row = ""
if index % 4 == 0:
start_row = """<div class="row">"""
elif index % 4 == 3:
end_row = """</div>"""
border = "instructor-bg" if staff["officer"] else "staff-bg"
staff_arr.append("""
%s
<div class="col-sm-3">
<div class="%s">
<img src = "%s"/>
<h4>%s</h4>
<p>%s</p>
</div>
</div>
%s
""" % (start_row, border, staff['profile'], staff['name'], staff['desc'], end_row))
staff_formatted = "\n".join(staff_arr)
return render_template(
"staff.html",
staff_formatted=staff_formatted,
)
@app.route("/projects")
def project():
return render_template(
"projects.html",
)
if __name__ == "__main__":
app.run("0.0.0.0", port=80, threaded=True)
| StarcoderdataPython |
3222382 | type(Key.F4, KeyModifier.ALT)
exit(0) | StarcoderdataPython |
9789896 | #!/usr/bin/env python
"""
untangle
Converts xml to python objects.
The only method you need to call is parse()
Partially inspired by xml2obj
(http://code.activestate.com/recipes/149368-xml2obj/)
Author: <NAME> (http://0chris.com)
License: MIT License - http://www.opensource.org/licenses/mit-license.php
"""
import os
from xml.sax import make_parser, handler
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__version__ = '1.1.0'
class Element():
"""
Representation of an XML element.
"""
def __init__(self, name, attributes):
self._name = name
self._attributes = attributes
self.children = []
self.is_root = False
self.cdata = ''
def add_child(self, element):
self.children.append(element)
def add_cdata(self, cdata):
self.cdata = self.cdata + cdata
def get_attribute(self, key):
return self._attributes.get(key)
def get_elements(self, name=None):
if name:
return [e for e in self.children if e._name == name]
else:
return self.children
def __getitem__(self, key):
return self.get_attribute(key)
def __getattr__(self, key):
matching_children = [x for x in self.children if x._name == key]
if matching_children:
if len(matching_children) == 1:
self.__dict__[key] = matching_children[0]
return matching_children[0]
else:
self.__dict__[key] = matching_children
return matching_children
else:
raise IndexError('Unknown key <%s>' % key)
def __iter__(self):
yield self
def __str__(self):
return (
"Element <%s> with attributes %s and children %s" %
(self._name, self._attributes, self.children)
)
def __repr__(self):
return (
"Element(name = %s, attributes = %s, cdata = %s)" %
(self._name, self._attributes, self.cdata)
)
def __nonzero__(self):
return self.is_root or self._name is not None
def __eq__(self, val):
return self.cdata == val
def __dir__(self):
children_names = [x._name for x in self.children]
return children_names
class Handler(handler.ContentHandler):
"""
SAX handler which creates the Python object structure out of ``Element``s
"""
def __init__(self):
self.root = Element(None, None)
self.root.is_root = True
self.elements = []
def startElement(self, name, attributes):
name = name.replace('-', '_')
name = name.replace('.', '_')
name = name.replace(':', '_')
attrs = dict()
for k, v in attributes.items():
attrs[k] = v
element = Element(name, attrs)
if len(self.elements) > 0:
self.elements[-1].add_child(element)
else:
self.root.add_child(element)
self.elements.append(element)
def endElement(self, name):
self.elements.pop()
def characters(self, cdata):
self.elements[-1].add_cdata(cdata)
def parse(filename):
"""
Interprets the given string as a filename, URL or XML data string,
parses it and returns a Python object which represents the given
document.
Raises ``ValueError`` if the argument is None / empty string.
Raises ``xml.sax.SAXParseException`` if something goes wrong
during parsing.s
"""
if filename is None or filename.strip() == '':
raise ValueError('parse() takes a filename, URL or XML string')
parser = make_parser()
sax_handler = Handler()
parser.setContentHandler(sax_handler)
if os.path.exists(filename) or is_url(filename):
parser.parse(filename)
else:
parser.parse(StringIO(filename))
return sax_handler.root
def is_url(string):
return string.startswith('http://') or string.startswith('https://')
# vim: set expandtab ts=4 sw=4:
| StarcoderdataPython |
8059362 | <filename>sweetpea/core/__init__.py
"""This module provides the fundamental functionality needed for SweetPea to
actually *do* anything. Primarily, this involves handling data representation
and making calls to external utilities for solving logic problems via
SAT-solving.
Data Representation
===================
SweetPea works by representing constraints on experimental designs as
`propositional logic formulas
<https://en.wikipedia.org/wiki/Propositional_formula>`_. These formulas are
converted into `conjunctive normal form
<https://en.wikipedia.org/wiki/Conjunctive_normal_form>`_ and are then passed
to an external SAT solver to either be solved or sampled.
Internally to :mod:`.core`, these formulas are represented as :class:`.CNF`
instances. These are comprised of :class:`Clauses <.Clause>`, which are in turn
comprised of :class:`Vars <.Var>`. The :class:`.Var`, :class:`.Clause`, and
:class:`.CNF` classes are very expressive and can easily be used to manipulate
advanced logic problems.
External Utilities
==================
Once the data is in a compatible formulaic representation, it must be shipped
to an external utility to be solved or sampled from. SweetPea Core makes use of
the following utilities:
* `CryptoMiniSAT <https://github.com/msoos/cryptominisat>`_, an advanced
incremental SAT solver.
* `Unigen <https://github.com/meelgroup/unigen>`_, a state-of-the-art,
almost-uniform sampler that uses CryptoMiniSAT.
Using Core
==========
There are only a few functions exported from :mod:`.core`, as well as a small
number of classes to support using those functions.
Functions
---------
* :func:`~sweetpea.core.generate.is_satisfiable.cnf_is_satisfiable`
* :func:`~sweetpea.core.generate.sample_non_uniform.sample_non_uniform`
* :func:`~sweetpea.core.generate.sample_non_uniform.sample_non_uniform_from_specification`
* :func:`~sweetpea.core.generate.sample_uniform.sample_uniform`
* :func:`~sweetpea.core.generate.utility.combine_cnf_with_requests`
Classes
-------
* :class:`~sweetpea.core.cnf.Var`
* :class:`~sweetpea.core.cnf.Clause`
* :class:`~sweetpea.core.cnf.CNF`
* :class:`~sweetpea.core.generate.utility.AssertionType`
* :class:`~sweetpea.core.generate.utility.GenerationRequest`
* :class:`~sweetpea.core.generate.utility.Solution`
"""
from .cnf import Clause, CNF, Var
from .generate import (
AssertionType, GenerationRequest, Solution,
cnf_is_satisfiable, sample_non_uniform, sample_non_uniform_from_specification, sample_uniform,
combine_cnf_with_requests
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.