id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
99469
|
from sqlalchemy import Boolean, Column, String
from sqlalchemy.dialects.mysql import INTEGER as Integer, TINYINT as TinyInteger
from ichnaea.models.base import _Model
class ApiKey(_Model):
"""
ApiKey model.
The allow_fallback and fallback columns determine if and what
fallback location provider should be used.
The url specifies the external endpoint supporting the
:ref:`api_geolocate_latest` API.
Requests to the fallback service can optionally be rate limited.
Two settings control the rate limit:
``ratelimit`` specifies how many requests are allowed to be made.
``ratelimit_interval`` specifies the interval in seconds for which
the ``ratelimit`` number applies, so for example one could
configure 60 requests per 60 seconds, or 86400 requests per
86400 seconds (one day). Both would on average allow one request
per second.
Finally the fallback service might allow caching of results inside
the projects own Redis cache. ``cache_expire`` specifies the number
of seconds for which entries are allowed to be cached.
"""
__tablename__ = "api_key"
valid_key = Column(String(40), primary_key=True) # UUID API key.
maxreq = Column(Integer) # Maximum number of requests per day.
allow_fallback = Column(Boolean) # Use the fallback source?
allow_locate = Column(Boolean) # Allow locate queries?
allow_region = Column(Boolean) # Allow region queries?
fallback_name = Column(String(40)) # Fallback metric name.
fallback_schema = Column(String(64)) # Fallback API schema.
fallback_url = Column(String(256)) # URL of the fallback provider.
fallback_ratelimit = Column(Integer) # Fallback rate limit count.
fallback_ratelimit_interval = Column(Integer) # Interval in seconds.
fallback_cache_expire = Column(Integer) # Cache expiry in seconds.
store_sample_locate = Column(TinyInteger) # Sample rate 0-100.
store_sample_submit = Column(TinyInteger) # Sample rate 0-100.
|
99501
|
import codecs
from distutils.core import setup
def read(fname):
'''
Read a file from the directory where setup.py resides
'''
with codecs.open(fname, encoding='utf-8') as rfh:
return rfh.read()
try:
description = read('README.txt')
except:
description = read('README.md')
setup(
name='python-ldap-test',
version='0.3.1',
author='<NAME>',
author_email='<EMAIL>',
packages=['ldap_test', 'ldap_test.test'],
url='https://github.com/zoldar/python-ldap-test/',
license='LICENSE.txt',
description=('Tool for testing code speaking with LDAP server. Allows to easily'
' configure and run an embedded, in-memory LDAP server. Uses'
' UnboundID LDAP SDK through Py4J.'),
keywords=['testing', 'tests', 'test', 'ldap'],
long_description=description,
install_requires=[
"py4j >= 0.10.2.1",
],
package_data={
'': ['*.txt'],
'ldap_test': ['*.jar'],
},
options={
'bdist_rpm': {
'build_requires':[
'python',
'python-setuptools',
'py4j',
],
'requires':[
'python',
'py4j',
],
},
},
)
|
99589
|
def test():
a = 10
fun1 = lambda: a
fun1()
print(a)
a += 1
fun1()
print(a)
return fun1
fun = test()
print(f"Fun: {fun()}")
|
99591
|
import jinja2
from sanic import response
from .util import get_banner
j2 = jinja2.Environment(
loader=jinja2.PackageLoader(__name__, "templates"),
autoescape=jinja2.select_autoescape(["html", "xml"]),
)
j2.globals.update({"len": len, "get_banner": get_banner})
def renderpost_filter(t, show_thread_link=False):
return jinja2.Markup(
j2.get_template("post.html").render(show_thread_link=show_thread_link, **t)
)
j2.filters.update({"renderpost": renderpost_filter})
def render_template(name, **kwargs):
return response.html(j2.get_template(name).render(**kwargs))
|
99659
|
description = 'system setup'
group = 'lowlevel'
sysconfig = dict(
cache='localhost',
instrument='ESTIA',
experiment='Exp',
datasinks=['conssink', 'filesink', 'daemonsink'],
)
modules = ['nicos.commands.standard']
includes = ['temp']
devices = dict(
ESTIA=device('nicos.devices.instrument.Instrument',
description='instrument object',
instrument='estia',
responsible='<NAME> <<EMAIL>>',
website='https://confluence.esss.lu.se/display/ESTIA',
operators=['ESS', 'PSI'],
facility='Paul Scherrer Institut (PSI)',
),
Sample=device('nicos.devices.sample.Sample',
description='The currently used sample',
),
Exp=device('nicos.devices.experiment.Experiment',
description='experiment object',
dataroot='/opt/nicos-data',
sendmail=True,
serviceexp='p0',
sample='Sample',
),
filesink=device('nicos.devices.datasinks.AsciiScanfileSink',
),
conssink=device('nicos.devices.datasinks.ConsoleScanSink',
),
daemonsink=device('nicos.devices.datasinks.DaemonSink',
),
Space=device('nicos.devices.generic.FreeSpace',
description='The amount of free space for storing data',
path='/opt/nicos-data',
minfree=5,
),
)
|
99673
|
import numpy as np
import torch
import torch.nn as nn
import torchtestcase
import unittest
from survae.transforms.bijections.coupling import *
from survae.nn.layers import ElementwiseParams, ElementwiseParams2d
from survae.tests.transforms.bijections import BijectionTest
class AdditiveCouplingBijectionTest(BijectionTest):
def test_bijection_is_well_behaved(self):
batch_size = 10
self.eps = 1e-6
for shape in [(6,),
(6,8,8)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Linear(3,3)
if len(shape) == 3: net = nn.Conv2d(3,3, kernel_size=3, padding=1)
else:
if len(shape) == 1: net = nn.Linear(1,5)
if len(shape) == 3: net = nn.Conv2d(1,5, kernel_size=3, padding=1)
bijection = AdditiveCouplingBijection(net, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
class AffineCouplingBijectionTest(BijectionTest):
def test_bijection_is_well_behaved(self):
batch_size = 10
self.eps = 5e-6
for shape in [(6,),
(6,8,8)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Sequential(nn.Linear(3,3*2), ElementwiseParams(2))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(3,3*2, kernel_size=3, padding=1), ElementwiseParams2d(2))
else:
if len(shape) == 1: net = nn.Sequential(nn.Linear(1,5*2), ElementwiseParams(2))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(1,5*2, kernel_size=3, padding=1), ElementwiseParams2d(2))
bijection = AffineCouplingBijection(net, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
if __name__ == '__main__':
unittest.main()
|
99681
|
import asyncio
import logging
import socket
import websockets
from gabriel_protocol import gabriel_pb2
from collections import namedtuple
URI_FORMAT = 'ws://{host}:{port}'
logger = logging.getLogger(__name__)
websockets_logger = logging.getLogger(websockets.__name__)
# The entire payload will be printed if this is allowed to be DEBUG
websockets_logger.setLevel(logging.INFO)
ProducerWrapper = namedtuple('ProducerWrapper', ['producer', 'source_name'])
# It isn't necessary to do this as of Python 3.6 because
# "The socket option TCP_NODELAY is set by default for all TCP connections"
# per https://docs.python.org/3/library/asyncio-eventloop.html
# However, this seems worth keeping in case the default behavior changes.
class NoDelayProtocol(websockets.client.WebSocketClientProtocol):
def connection_made(self, transport: asyncio.BaseTransport) -> None:
super().connection_made(transport)
sock = transport.get_extra_info('socket')
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
class WebsocketClient:
def __init__(self, host, port, producer_wrappers, consumer):
'''
producer should take no arguments and return an instance of
gabriel_pb2.InputFrame.
consumer should take one gabriel_pb2.ResultWrapper and does not need to
return anything.
'''
self._welcome_event = asyncio.Event()
self._sources = {}
self._running = True
self._uri = URI_FORMAT.format(host=host, port=port)
self.producer_wrappers = producer_wrappers
self.consumer = consumer
def launch(self, message_max_size=None):
event_loop = asyncio.get_event_loop()
try:
self._websocket = event_loop.run_until_complete(
websockets.connect(self._uri, create_protocol=NoDelayProtocol,
max_size=message_max_size))
except ConnectionRefusedError:
logger.error('Could not connect to server')
return
# We don't waste time checking TCP_NODELAY in production.
# Note that websocket.transport is an undocumented property.
# sock = self._websocket.transport.get_extra_info('socket')
# assert(sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 1)
consumer_task = asyncio.ensure_future(self._consumer_handler())
tasks = [
asyncio.ensure_future(self._producer_handler(
producer_wrapper.producer, producer_wrapper.source_name))
for producer_wrapper in self.producer_wrappers
]
tasks.append(consumer_task)
_, pending = event_loop.run_until_complete(asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED))
for task in pending:
task.cancel()
logger.info('Disconnected From Server')
def get_source_names(self):
return self._sources.keys()
def stop(self):
self._running = False
logger.info('stopping server')
async def _consumer_handler(self):
while self._running:
try:
raw_input = await self._websocket.recv()
except websockets.exceptions.ConnectionClosed:
return # stop the handler
logger.debug('Recieved input from server')
to_client = gabriel_pb2.ToClient()
to_client.ParseFromString(raw_input)
if to_client.HasField('welcome'):
self._process_welcome(to_client.welcome)
elif to_client.HasField('response'):
self._process_response(to_client.response)
else:
raise Exception('Empty to_client message')
def _process_welcome(self, welcome):
for source_name in welcome.sources_consumed:
self._sources[source_name] = _Source(welcome.num_tokens_per_source)
self._welcome_event.set()
def _process_response(self, response):
result_wrapper = response.result_wrapper
if (result_wrapper.status == gabriel_pb2.ResultWrapper.SUCCESS):
self.consumer(result_wrapper)
elif (result_wrapper.status ==
gabriel_pb2.ResultWrapper.NO_ENGINE_FOR_SOURCE):
raise Exception('No engine for source')
else:
status = result_wrapper.Status.Name(result_wrapper.status)
logger.error('Output status was: %s', status)
if response.return_token:
self._sources[response.source_name].return_token()
async def _producer_handler(self, producer, source_name):
'''
Loop waiting until there is a token available. Then calls producer to
get the gabriel_pb2.InputFrame to send.
'''
await self._welcome_event.wait()
source = self._sources.get(source_name)
assert source is not None, (
"No engines consume frames from source: {}".format(source_name))
while self._running:
await source.get_token()
input_frame = await producer()
if input_frame is None:
source.return_token()
logger.info('Received None from producer')
continue
from_client = gabriel_pb2.FromClient()
from_client.frame_id = source.get_frame_id()
from_client.source_name = source_name
from_client.input_frame.CopyFrom(input_frame)
try:
await self._send_from_client(from_client)
except websockets.exceptions.ConnectionClosed:
return # stop the handler
logger.debug('num_tokens for %s is now %d', source_name,
source.get_num_tokens())
source.next_frame()
async def _send_from_client(self, from_client):
# Removing this method will break measurement_client
await self._websocket.send(from_client.SerializeToString())
class _Source:
def __init__(self, num_tokens):
self._num_tokens = num_tokens
self._event = asyncio.Event()
self._frame_id = 0
def return_token(self):
self._num_tokens += 1
self._event.set()
async def get_token(self):
while self._num_tokens < 1:
logger.debug('Waiting for token')
self._event.clear() # Clear because we definitely want to wait
await self._event.wait()
self._num_tokens -= 1
def get_num_tokens(self):
return self._num_tokens
def get_frame_id(self):
return self._frame_id
def next_frame(self):
self._frame_id += 1
|
99697
|
from parler.models import TranslatableModel
from solo.models import SingletonModel
class SiteConfig(SingletonModel, TranslatableModel):
pass
def __str__(self) -> str:
return "Site Config"
|
99711
|
import json
import logging
import time
from selenium.common.exceptions import NoSuchElementException
from seleniumwire import webdriver
import config
def get_valid_cookies(cookies_amount):
options = webdriver.FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path='drivers/geckodriver.exe')
valid_cookies_index = []
for i in range(1, cookies_amount + 1):
logging.info(f'Checking %s cookies', i)
driver.delete_all_cookies()
driver.get('https://all-access.wax.io')
with open(f'cookies/{i}.json', 'r') as f:
for cookie in json.loads(f.read()):
del cookie['sameSite']
driver.add_cookie(cookie)
driver.get(config.WORK_SITE_DIR)
try:
driver.execute_script('wax.login()')
except Exception as e:
logging.info(f'Cookies %s failed verification. ({e.__str__()})', i)
continue
while len(driver.window_handles) < 2:
time.sleep(1)
for window in driver.window_handles:
driver.switch_to.window(window)
if driver.current_url == 'https://all-access.wax.io/cloud-wallet/login/':
break
while driver.execute_script('return document.readyState;') != 'complete':
time.sleep(2)
time.sleep(1)
try:
checked_element = driver.find_element_by_xpath('//span[@class="action-title"]')
except NoSuchElementException:
checked_element = None
if checked_element and checked_element.text == 'You must login into WAX Cloud Wallet first':
logging.info(f'Cookies %s failed verification.', i)
continue
for window in driver.window_handles[1:]:
driver.switch_to.window(window)
driver.close()
driver.switch_to.window(driver.window_handles[0])
logging.info('Cookies %s passed the check.', i)
valid_cookies_index.append(i)
logging.info('Checking the cookie for validity is over.')
driver.close()
return valid_cookies_index
|
99731
|
def extractThehlifestyleCom(item):
'''
Parser for 'thehlifestyle.com'
'''
tstr = str(item['tags']).lower()
if 'review' in tstr:
return None
if 'actors' in tstr:
return None
if 'game' in tstr:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('The Beloved Imperial Consort translation', 'The Beloved Imperial Consort', 'translated'),
('Good Morning, Miss Undercover Translation', 'Good Morning, Miss Undercover', 'translated'),
('Hilarous Pampered Consort Translation', 'Hilarous Pampered Consort', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
99732
|
import os
import json
import pytest
from test_api.run import create_app
@pytest.fixture(scope="session")
def app():
abs_file_path = os.path.abspath(os.path.dirname(__file__))
openapi_path = os.path.join(abs_file_path, "../", "openapi")
os.environ["SPEC_PATH"] = openapi_path
app = create_app()
return app
@pytest.fixture(scope="session", autouse=True)
def clean_up():
yield
default_pets = {
"1": {"name": "ginger", "breed": "bengal", "price": 100},
"2": {"name": "sam", "breed": "husky", "price": 10},
"3": {"name": "guido", "breed": "python", "price": 518},
}
abs_file_path = os.path.abspath(os.path.dirname(__file__))
json_path = os.path.join(abs_file_path, "../", "test_api", "core", "pets.json")
with open(json_path, "w") as pet_store:
json.dump(default_pets, pet_store, indent=4)
|
99832
|
from typing import Hashable
import pandas_flavor as pf
import pandas as pd
from janitor.utils import deprecated_alias
@pf.register_dataframe_method
@deprecated_alias(column="column_name")
def to_datetime(
df: pd.DataFrame, column_name: Hashable, **kwargs
) -> pd.DataFrame:
"""Convert column to a datetime type, in-place.
Intended to be the method-chaining equivalent of:
df[column_name] = pd.to_datetime(df[column_name], **kwargs)
This method mutates the original DataFrame.
Example: Converting a string column to datetime type with custom format.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({'date': ['20200101', '20200202', '20200303']})
>>> df
date
0 20200101
1 20200202
2 20200303
>>> df.to_datetime('date', format='%Y%m%d')
date
0 2020-01-01
1 2020-02-02
2 2020-03-03
Read the pandas documentation for [`to_datetime`][pd_docs] for more information.
[pd_docs]: https://pandas.pydata.org/docs/reference/api/pandas.to_datetime.html
:param df: A pandas DataFrame.
:param column_name: Column name.
:param kwargs: Provide any kwargs that `pd.to_datetime` can take.
:returns: A pandas DataFrame with updated datetime data.
""" # noqa: E501
df[column_name] = pd.to_datetime(df[column_name], **kwargs)
return df
|
99848
|
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from tf_metric_learning.utils.index import AnnoyDataIndex
class AnnoyEvaluatorCallback(AnnoyDataIndex):
"""
Callback, extracts embeddings, add them to AnnoyIndex and evaluate them as recall.
"""
def __init__(
self,
model,
data_store,
data_search,
save_dir=None,
eb_size=256,
metric="euclidean",
freq=1,
batch_size=None,
normalize_eb=True,
normalize_fn=None,
progress=True,
**kwargs
):
super().__init__(eb_size, data_store["labels"], metric=metric, save_dir=save_dir, progress=progress)
self.base_model = model
self.data_store = data_store
self.data_search = data_search
self.batch_size = batch_size
self.freq = int(freq)
self.normalize_eb = normalize_eb
self.normalize_fn = normalize_fn
self.results = {}
def on_epoch_begin(self, epoch, logs=None):
if self.freq and epoch % self.freq == 0:
self.compute_data()
def batch(self, iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def compute_data(self):
self.create_index()
i = 0
with tqdm(total=len(self.data_store["images"]), desc="Indexing ... ") as pbar:
for batch in self.batch(self.data_store["images"], n=self.batch_size*10):
store_images = self.normalize_fn(batch) if self.normalize_fn is not None else batch
embeddings_store = self.base_model.predict(store_images, batch_size=self.batch_size)
if self.normalize_eb:
embeddings_store = tf.nn.l2_normalize(embeddings_store, axis=1).numpy()
for embedding in embeddings_store:
self.add_to_index(i, embedding)
i += 1
pbar.update(len(batch))
self.build(k=5)
self.evaluate(self.data_search["images"])
def evaluate(self, images):
self.results = {"default": []}
i = 0
with tqdm(total=len(images), desc="Evaluating ... ") as pbar:
for batch in self.batch(images, n=self.batch_size*10):
search_images = self.normalize_fn(batch) if self.normalize_fn is not None else batch
embeddings_search = self.base_model.predict(search_images, batch_size=self.batch_size)
if self.normalize_eb:
embeddings_search = tf.nn.l2_normalize(embeddings_search, axis=1).numpy()
for embedding in embeddings_search:
annoy_results = self.search(embedding, n=20, include_distances=False)
annoy_results = [self.get_label(result) for result in annoy_results]
recalls = self.eval_recall(annoy_results, self.data_search["labels"][i], [1, 5, 10, 20])
self.results["default"].append(recalls)
i += 1
pbar.update(len(batch))
print("\nRecall@[1, 3, 5, 10, 20] Computed:", np.mean(np.asarray(self.results["default"]), axis=0), "\n")
def eval_recall(self, annoy_results, label, recalls):
return [1 if label in annoy_results[:recall_n] else 0 for recall_n in recalls]
|
99864
|
from HABApp.core.events import ValueChangeEventFilter, ValueUpdateEventFilter
from . import MqttValueChangeEvent, MqttValueUpdateEvent
class MqttValueUpdateEventFilter(ValueUpdateEventFilter):
_EVENT_TYPE = MqttValueUpdateEvent
class MqttValueChangeEventFilter(ValueChangeEventFilter):
_EVENT_TYPE = MqttValueChangeEvent
|
99891
|
version https://git-lfs.github.com/spec/v1
oid sha256:995a5a4cc97102e151664561338b41fb57c93314e63da5958bc2a641355d7cc3
size 11926
|
99923
|
import argparse
def get_arg_parser(parser=None):
"""Parse the command line arguments for merge using argparse
Args:
parser (argparse.ArgumentParser or CompliantArgumentParser):
an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the component
if parser is None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--value",
required=False,
type=int,
default=100,
help="the value on which we will operate",
)
return parser
def main():
"""The main function"""
# get the arguments
parser = get_arg_parser()
args = parser.parse_args()
args = vars(args)
# this shows a basic operation on the value passed as parameter
value = args["value"]
operand = 1000
result = value + operand
print(
"The value passed as parameter is: "
+ str(value)
+ ". We computed: "
+ str(value)
+ " + "
+ str(operand)
+ " = "
+ str(result)
+ "."
)
if __name__ == "__main__":
main()
|
99976
|
from kivymd.uix.screen import MDScreen
class HomeScreen(MDScreen):
"""
Example Screen.
"""
|
99978
|
import django_filters
from django.db.models import Q
from devices.enums import PasswordAlgorithm
from devices.models import Configuration, Platform
from utils.filters import (
BaseFilterSet,
CreatedUpdatedFilterSet,
NameSlugSearchFilterSet,
TagFilter,
)
class ConfigurationFilterSet(BaseFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(method="search", label="Search")
tag = TagFilter()
class Meta:
model = Configuration
fields = ["id", "jinja2_trim", "jinja2_lstrip"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(template__icontains=value))
class PlatformFilterSet(
BaseFilterSet, CreatedUpdatedFilterSet, NameSlugSearchFilterSet
):
password_algorithm = django_filters.MultipleChoiceFilter(
choices=PasswordAlgorithm.choices, null_value=None
)
class Meta:
model = Platform
fields = ["id", "name", "slug", "napalm_driver", "description"]
|
99990
|
from RestrictedPython import compile_restricted_eval
from RestrictedPython import compile_restricted_exec
import RestrictedPython.Guards
def _compile(compile_func, source):
"""Compile some source with a compile func."""
result = compile_func(source)
assert result.errors == (), result.errors
assert result.code is not None
return result.code
def _execute(code, glb, exc_func):
"""Execute compiled code using `exc_func`.
glb ... globals, gets injected with safe_builtins
"""
if glb is None:
glb = {}
if '__builtins__' not in glb:
glb['__builtins__'] = RestrictedPython.Guards.safe_builtins.copy()
if exc_func == 'eval':
return eval(code, glb)
else:
exec(code, glb)
return glb
def restricted_eval(source, glb=None):
"""Call compile_restricted_eval and actually eval it."""
code = _compile(compile_restricted_eval, source)
return _execute(code, glb, 'eval')
def restricted_exec(source, glb=None):
"""Call compile_restricted_eval and actually exec it."""
code = _compile(compile_restricted_exec, source)
return _execute(code, glb, 'exec')
|
100017
|
import queue
import struct
class CanIDService:
"""
Can ID service interface class.
This interface class has to be used to implement any can id based service that shall have be able to be
registered with the communication module :class:`test_framework.communication.communication`
"""
# ---------------------------------------------------------------------------------------------------------------- #
# function: initialization #
# ---------------------------------------------------------------------------------------------------------------- #
def __init__(self):
"""
The function initializes the basic values of the class.
"""
self.recvQueue = None
# ---------------------------------------------------------------------------------------------------------------- #
# function: getRXQueue #
# ---------------------------------------------------------------------------------------------------------------- #
def getRXQueue(self):
"""
The function returns the memory information of the queue.
Returns
-------
queue
"""
return self.recvQueue
# ---------------------------------------------------------------------------------------------------------------- #
# function: clearQueue #
# ---------------------------------------------------------------------------------------------------------------- #
def clearQueue(self):
"""
Flushes the recvQueue.
Returns
-------
None
"""
while self.isEmpty() is False:
self.getRXQueue()
# ---------------------------------------------------------------------------------------------------------------- #
# function: isEmpty #
# ---------------------------------------------------------------------------------------------------------------- #
def isEmpty(self):
"""
Checks if the queue is empty or not.
Returns
-------
retValue : bool
True queue is empty. False queue is not empty.
"""
retValue = self.recvQueue.empty()
return retValue
# ---------------------------------------------------------------------------------------------------------------- #
# function: decode #
# ---------------------------------------------------------------------------------------------------------------- #
@classmethod
def decode(cls, msg, extractRule):
"""
The function provides the basic rule to decode messages.
Parameters
----------
msg : binary
the message is a binary format of interested information
extractRule : dict
the dictionary is a set of information to decode the information from the message
Returns
-------
retValue : value
the value represents the extracted information
"""
data = cls._extractBytes(msg, extractRule)
if 'resolution' in extractRule:
resolution = extractRule['resolution']
else:
resolution = 1
if 'offset' in extractRule:
offset = extractRule['offset']
else:
offset = 0
retValue = (data - offset) * resolution
return retValue
# ---------------------------------------------------------------------------------------------------------------- #
# function: _extractBytes #
# ---------------------------------------------------------------------------------------------------------------- #
@classmethod
def _extractBytes(cls, msg, extractRule):
"""
The function extracts the requested bytes from the array.
Parameters
----------
msg : bytearray
msg with the necessary data
extractRule : dictionary
dictionary with the extraction rule
Returns
-------
data : integer
extracted data
"""
# get message length
msgLength = len(msg)
# append data to message
if msgLength < 8:
msg += bytearray(8 - msgLength)
convertedData = struct.unpack("<Q", msg)[0]
startBitShift = 8 * extractRule['start_byte'] + extractRule['start_bit']
endBitShift = 8 * (8 - extractRule['end_byte']) - (extractRule['end_bit'] + 1)
referenceShiftHigh = 64 - endBitShift
deactivationMaskTemp = convertedData >> referenceShiftHigh
deactivationMaskHigh = deactivationMaskTemp << referenceShiftHigh
data = (convertedData & ~deactivationMaskHigh) >> startBitShift
return data
# ---------------------------------------------------------------------------------------------------------------- #
# function: decode #
# ---------------------------------------------------------------------------------------------------------------- #
@staticmethod
def encode(msg, packingRule, value):
"""
The function provides the basic rule to encode data to a messages.
Parameters
----------
msg : byte
currently used message
packingRule : dictionary
rule to encode the given data
value : int or float
value to be packed by the function
Returns
-------
msg : byte
currently used message with added data
"""
# decode already packed data
packedData = struct.unpack("<Q", msg)[0]
# calculate number of bits
endBit = packingRule['end_byte'] * 8 + packingRule['end_bit']
startBit = packingRule['start_byte'] * 8 + packingRule['start_bit']
lengthOfData = endBit - startBit + 1
codingMask = 0
# set coding mask
for bitNumber in range (0, lengthOfData):
codingMask <<= 1
codingMask |= 1
# calculate number to pack
packingValue = ( int( value / packingRule['resolution'] ) ) + packingRule['offset']
# ensure that the value is not bigger than the coding mask
packingValue &= codingMask
# configure pre data mask
preDataMask = 0
# set coding mask
for bitNumber in range (0, startBit):
preDataMask <<= 1
preDataMask |= 1
# configure pre data mask
postDataMask = 0
# set coding mask
for bitNumber in range( 0, ( 64 - endBit ) ):
postDataMask <<= 1
postDataMask |= 1
postDataMask <<= endBit
# save existing data
preDataSet = packedData & preDataMask
currDataSet = packingValue << startBit
postDataSet = packedData & postDataMask
packedData = postDataSet | currDataSet | preDataSet
return packedData.to_bytes(8, byteorder='little')
|
100024
|
from ..factory import Method
class getGroupsInCommon(Method):
user_id = None # type: "int32"
offset_chat_id = None # type: "int53"
limit = None # type: "int32"
|
100038
|
import pytest
from usbq.plugins.hexdump import Hexdump
from usbq.usbmitm_proto import USBMessageDevice
from usbq.usbmitm_proto import USBMessageHost
@pytest.mark.parametrize('cls', [USBMessageDevice, USBMessageHost])
def test_hexdump(capsys, cls):
pkt = cls()
assert hasattr(pkt, 'content')
Hexdump().usbq_log_pkt(pkt)
captured = capsys.readouterr()
assert len(captured.out) > 0
|
100053
|
import platform
from pathlib import Path
import numpy as np
import torch
from spconv.pytorch import ops
from spconv.pytorch.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d,
SparseConvTranspose3d, SparseInverseConv2d,
SparseInverseConv3d, SubMConv2d, SubMConv3d)
from spconv.pytorch.core import SparseConvTensor
from spconv.pytorch.identity import Identity
from spconv.pytorch.modules import SparseModule, SparseSequential
from spconv.pytorch.ops import ConvAlgo
from spconv.pytorch.pool import SparseMaxPool2d, SparseMaxPool3d
from spconv.pytorch.tables import AddTable, ConcatTable, JoinTable
class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor.
"""
def forward(self, x: SparseConvTensor):
return x.dense()
class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer.
"""
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
100056
|
from .async_find_opportunities import *
from .async_build_markets import *
from .bellman_multi_graph import bellman_ford_multi, NegativeWeightFinderMulti
from .bellmannx import bellman_ford, calculate_profit_ratio_for_path, NegativeWeightFinder, NegativeWeightDepthFinder, \
find_opportunities_on_exchange, get_starting_volume
from .utils import *
from .fetch_exchange_tickers import *
from .settings import *
from .multi_graph_builder import *
|
100085
|
from slack_sdk.web.async_client import AsyncWebClient
class AsyncUpdate:
"""`update()` utility to tell Slack the processing results of a `save` listener.
async def save(ack, view, update):
await ack()
values = view["state"]["values"]
task_name = values["task_name_input"]["name"]
task_description = values["task_description_input"]["description"]
inputs = {
"task_name": {"value": task_name["value"]},
"task_description": {"value": task_description["value"]}
}
outputs = [
{
"type": "text",
"name": "task_name",
"label": "Task name",
},
{
"type": "text",
"name": "task_description",
"label": "Task description",
}
]
await update(inputs=inputs, outputs=outputs)
ws = AsyncWorkflowStep(
callback_id="add_task",
edit=edit,
save=save,
execute=execute,
)
app.step(ws)
This utility is a thin wrapper of workflows.stepFailed API method.
Refer to https://api.slack.com/methods/workflows.updateStep for details.
"""
def __init__(self, *, client: AsyncWebClient, body: dict):
self.client = client
self.body = body
async def __call__(self, **kwargs) -> None:
await self.client.workflows_updateStep(
workflow_step_edit_id=self.body["workflow_step"]["workflow_step_edit_id"],
**kwargs,
)
|
100103
|
import wx
class AuiManagerConfig(wx.Config):
"""
Custom wxConfig class to handle the main frame size and position, plus all
the the panes of the aui.
"""
def __init__(self, auiMgr, *args, **kwargs):
wx.Config.__init__(self, *args, **kwargs)
self.auiMgr = auiMgr
self.win = self.auiMgr.GetManagedWindow()
# Key constants
if self.win is not None:
winName = self.win.GetName()
self._keyWinPosX = winName + 'PosX'
self._keyWinPosY = winName + 'PosY'
self._keyWinSizeX = winName + 'SizeX'
self._keyWinSizeY = winName + 'SizeY'
self._keyWinMax = winName + 'Max'
self._keyPerspDefault = 'perspDefault'
def Save(self):
"""Save all panel layouts for the aui manager."""
# Get old window position and size. We'll use these instead of the
# maximized window's size and position.
winPosX = self.ReadInt(self._keyWinPosX)
winPosY = self.ReadInt(self._keyWinPosY)
winSizeX = self.ReadInt(self._keyWinSizeX)
winSizeY = self.ReadInt(self._keyWinSizeY)
self.DeleteAll()
if self.win is not None:
# Don't save maximized window properties
if not self.win.IsMaximized():
winPosX, winPosY = self.win.GetPosition()
winSizeX, winSizeY = self.win.GetSize()
# Save the managed window position and size
self.SavePosition(winPosX, winPosY)
self.SaveSize(winSizeX, winSizeY)
# Save the managed window state
winMax = self.win.IsMaximized()
self.WriteBool(self._keyWinMax, winMax)
# Save the current perspective as the default
self.Write(self._keyPerspDefault, self.auiMgr.SavePerspective())
def SavePosition(self, x, y):
"""Save the managed window's position."""
self.WriteInt(self._keyWinPosX, x)
self.WriteInt(self._keyWinPosY, y)
def SaveSize(self, x, y):
"""Save the managed window's size."""
self.WriteInt(self._keyWinSizeX, x)
self.WriteInt(self._keyWinSizeY, y)
def Load(self):
"""Load all panel layouts for the aui manager."""
if self.win is not None:
# Load the managed window state
winMax = self.ReadBool(self._keyWinMax)
self.win.Maximize(winMax)
# Load the managed window size
winSizeX = self.ReadInt(self._keyWinSizeX)
winSizeY = self.ReadInt(self._keyWinSizeY)
if winSizeX and winSizeY:
self.win.SetSize((winSizeX, winSizeY))
# Load the managed window position
winPosX = self.ReadInt(self._keyWinPosX)
winPosY = self.ReadInt(self._keyWinPosY)
if winPosX and winPosY:
self.win.SetPosition((winPosX, winPosY))
# Load the default perspective
winPersp = self.Read(self._keyPerspDefault)
self.auiMgr.LoadPerspective(winPersp)
|
100107
|
import struct
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return struct.pack('i', 0)
l = self.serialize(root.left)
r = self.serialize(root.right)
return struct.pack('i', len(l)) + struct.pack('i', root.val) + l + r
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
len_l = struct.unpack('i', data[:4])[0]
if len_l == 0:
return None
data = data[4:]
# val
val = struct.unpack('i', data[:4])[0]
data = data[4:]
# left
l = self.deserialize(data[:len_l])
data = data[len_l:]
# right
r = self.deserialize(data)
# build the node
node = TreeNode(val)
node.left = l
node.right = r
return node
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
def node(x, l=None, r=None):
n = TreeNode(x)
n.left = l
n.right = r
return n
def trees_equal(a, b):
if a is None and b is None:
return True
if a is None or b is None:
return False
return (
a.val == b.val and
trees_equal(a.left, b.left) and
trees_equal(a.right, b.right)
)
def test():
root = node(
1,
node(2),
node(
3,
node(4),
node(5),
),
)
codec = Codec()
got = codec.deserialize(codec.serialize(root))
assert trees_equal(root, got)
if __name__ == '__main__':
test()
|
100111
|
from tir import Webapp
import unittest
class GFEA030(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAGFE','05/12/2020','T1','D MG 01 ','78')
inst.oHelper.Program('GFEA030')
def test_GFEA030_CT001(self):
self.oHelper.SetButton('Incluir')
self.oHelper.SetValue('Tipo ', '900001')
self.oHelper.SetValue('Descricao ', 'TIPO OCORRENCIA TESTE AUTOMACAO')
self.oHelper.SetValue('Evento ', '3')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.SearchBrowse(' 900001')
self.oHelper.SetButton('Alterar')
self.oHelper.SetValue('Descricao ', 'TIPO OCORR ALTERADO TESTE AUTOMACAO')
self.oHelper.ClickGridCell('Motivo', 1)
self.oHelper.SetValue('Motivo', '000001', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.SearchBrowse(' 900001')
self.oHelper.SetButton('Visualizar')
self.oHelper.SetButton('Fechar')
self.oHelper.SearchBrowse(' 900001')
self.oHelper.SetButton('Outras Ações','Copiar')
self.oHelper.SetValue('Tipo ', '900002')
self.oHelper.SetValue('Descricao ', 'TIPO OCORR 2 TESTE AUTOMACAO')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
100125
|
from sfaira.versions.topologies.mouse.embedding.ae import AE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.linear import LINEAR_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.nmf import NMF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vae import VAE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaeiaf import VAEIAF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaevamp import VAEVAMP_TOPOLOGIES
|
100155
|
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable, Union
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.key_binding.vi_state import InputMode
if TYPE_CHECKING:
from .application import Application
__all__ = [
"CursorShape",
"CursorShapeConfig",
"SimpleCursorShapeConfig",
"ModalCursorShapeConfig",
"DynamicCursorShapeConfig",
"to_cursor_shape_config",
]
class CursorShape(Enum):
# Default value that should tell the output implementation to never send
# cursor shape escape sequences. This is the default right now, because
# before this `CursorShape` functionality was introduced into
# prompt_toolkit itself, people had workarounds to send cursor shapes
# escapes into the terminal, by monkey patching some of prompt_toolkit's
# internals. We don't want the default prompt_toolkit implemetation to
# interefere with that. E.g., IPython patches the `ViState.input_mode`
# property. See: https://github.com/ipython/ipython/pull/13501/files
_NEVER_CHANGE = "_NEVER_CHANGE"
BLOCK = "BLOCK"
BEAM = "BEAM"
UNDERLINE = "UNDERLINE"
BLINKING_BLOCK = "BLINKING_BLOCK"
BLINKING_BEAM = "BLINKING_BEAM"
BLINKING_UNDERLINE = "BLINKING_UNDERLINE"
class CursorShapeConfig(ABC):
@abstractmethod
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
"""
Return the cursor shape to be used in the current state.
"""
AnyCursorShapeConfig = Union[CursorShape, CursorShapeConfig, None]
class SimpleCursorShapeConfig(CursorShapeConfig):
"""
Always show the given cursor shape.
"""
def __init__(self, cursor_shape: CursorShape = CursorShape._NEVER_CHANGE) -> None:
self.cursor_shape = cursor_shape
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
return self.cursor_shape
class ModalCursorShapeConfig(CursorShapeConfig):
"""
Show cursor shape according to the current input mode.
"""
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
if application.editing_mode == EditingMode.VI:
if application.vi_state.input_mode == InputMode.INSERT:
return CursorShape.BEAM
if application.vi_state.input_mode == InputMode.REPLACE:
return CursorShape.UNDERLINE
# Default
return CursorShape.BLOCK
class DynamicCursorShapeConfig(CursorShapeConfig):
def __init__(
self, get_cursor_shape_config: Callable[[], AnyCursorShapeConfig]
) -> None:
self.get_cursor_shape_config = get_cursor_shape_config
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
return to_cursor_shape_config(self.get_cursor_shape_config()).get_cursor_shape(
application
)
def to_cursor_shape_config(value: AnyCursorShapeConfig) -> CursorShapeConfig:
"""
Take a `CursorShape` instance or `CursorShapeConfig` and turn it into a
`CursorShapeConfig`.
"""
if value is None:
return SimpleCursorShapeConfig()
if isinstance(value, CursorShape):
return SimpleCursorShapeConfig(value)
return value
|
100164
|
import random
from .samples import Sample
from .synthesizers import SAW
from .notes import C5
from .internals.chords import _CHORD_QUALITY
from .internals.scales import _SCALE_MODE
from .synth_server import (
SonicPi,
use_synth,
)
__debug = False
def synth(name, note=None, attack=None, decay=None,
sustain_level=None, sustain=None, release=None,
cutoff=None, cutoff_attack=None, amp=None, pan=None):
arguments = locals()
arguments.pop('name')
parameters = ['{0}: {1}'.format(k, v) for k, v in arguments.items() if v is not None]
parameter = ''
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
command = 'synth :{0}{1}'.format(name.name, parameter)
_debug('synth command={}'.format(command))
synth_server.synth(command)
def play(note, attack=None, decay=None,
sustain_level=None, sustain=None, release=None,
cutoff=None, cutoff_attack=None, amp=None, pan=None):
arguments = locals()
arguments.pop('note')
parameters = ['{0}: {1}'.format(k, v) for k, v in arguments.items() if v is not None]
parameter = ''
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
command = 'play {0}{1}'.format(note, parameter)
_debug('play command={}'.format(command))
synth_server.play(command)
def play_pattern_timed(notes, times, release=None):
"""play notes
:param notes:
:param times:
:return:
"""
if not type(notes) is list: notes = [notes]
if not type(times) is list: times = [times]
for t in times:
for i in notes:
play(i, release=release)
sleep(t)
def play_pattern(notes):
""":param notes:
:return:
"""
play_pattern_timed(notes, 1)
def sample(sample, rate=None, attack=None, sustain=None,
release=None, beat_stretch=None, start=None,
finish=None, amp=None, pan=None):
arguments = locals()
arguments.pop('sample')
parameters = ['{0}: {1}'.format(k, v) for k, v in arguments.items() if v is not None]
parameter = ''
command = ''
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
if type(sample) == Sample:
command = 'sample :{0}{1}'.format(sample.name, parameter)
else:
command = 'sample "{0}"{1}'.format(sample, parameter)
_debug('sample command={}'.format(command))
synth_server.sample(command)
def sleep(duration):
"""the same as time.sleep
:param duration:
:return:
"""
synth_server.sleep(duration)
_debug('sleep', duration)
def sample_duration(sample):
"""Returns the duration of the sample (in seconds)
:param sample:
:return: number
"""
return sample.duration
def one_in(max):
"""random function returns True in one of max cases
:param max:
:return: boolean
"""
return random.randint(1, max) == 1
def invert_pitches(pitches, inversion):
"""Inverts a list of pitches, wrapping the top pitches an octave below the root
:param pitches: list
:param inversion: int
:return: list
"""
for i in range(1, (inversion % (len(pitches)))+1):
pitches[-i] = pitches[-i] - 12
pitches.sort()
return pitches
def chord(root_note, chord_quality, inversion=None):
"""Generates a list of notes of a chord
:param root_note:
:param chord_quality:
:param inversion:
:return: list
"""
result = []
n = root_note
half_tone_steps = _CHORD_QUALITY[chord_quality]
for i in half_tone_steps:
q = n + i
result.append(q)
if inversion:
result = invert_pitches(result, inversion)
return result
def scale(root_note, scale_mode, num_octaves=1):
"""Genarates a liste of notes of scale
:param root_note:
:param scale_mode:
:param num_octaves:
:return: list
"""
result = []
n = root_note
half_tone_steps = _SCALE_MODE[scale_mode]
for o in range(num_octaves):
n = root_note + o * 12
result.append(n)
for i in half_tone_steps:
n = n + i
result.append(n)
return result
def run(command):
synth_server.run(command)
def stop():
synth_server.stop()
def send_message(message, *parameter):
synth_server.send_message(message, *parameter)
def start_recording():
synth_server.start_recording()
def stop_recording():
synth_server.stop_recording()
def save_recording(name):
synth_server.save_recording(name)
synth_server = SonicPi()
def set_server_parameter(udp_ip="", udp_port=-1, udp_port_osc_message=-1):
synth_server.set_parameter(udp_ip, udp_port, udp_port_osc_message)
def _debug(*args):
if __debug: print(args)
if __name__ == '__main__':
use_synth(SAW)
play(C5, amp=2, pan=-1)
|
100208
|
import vcs
import cdms2
import os
import MV2
f = cdms2.open(os.path.join(vcs.sample_data, "clt.nc"))
s = f("clt", time=slice(0, 1), squeeze=1)
s = MV2.masked_less(s, 65.)
x = vcs.init()
gm = x.createisofill()
gm.missing = 252
x.plot(s, gm)
x.png(gm.g_name)
x.interact()
|
100231
|
import numpy as np
class ClusterProcessor(object):
def __init__(self, dataset):
self.dataset = dataset
self.dtype = np.float32
def __len__(self):
return self.dataset.size
def build_adj(self, node, edge):
node = list(node)
abs2rel = {}
rel2abs = {}
for i, n in enumerate(node):
abs2rel[n] = i
rel2abs[i] = n
size = len(node)
adj = np.eye(size)
for e in edge:
w = 1.
if len(e) == 2:
e1, e2 = e
elif len(e) == 3:
e1, e2, dist = e
if not self.dataset.wo_weight:
w = 1. - dist
else:
raise ValueError('Unknown length of e: {}'.format(e))
v1 = abs2rel[e1]
v2 = abs2rel[e2]
adj[v1][v2] = w
adj[v2][v1] = w
if self.dataset.is_norm_adj:
adj /= adj.sum(axis=1, keepdims=True)
return adj, abs2rel, rel2abs
def build_features(self, node):
if self.dataset.featureless:
features = np.ones(len(node)).reshape(-1, 1)
else:
features = self.dataset.features[node, :]
return features
def __getitem__(self, idx):
raise NotImplementedError
|
100236
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Functions to compute observable properties.
'''
def _compute_class_balance(self, class_balance=None, Y_dev=None):
# generate class balance of Ys
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
cardinalities = [ 2 for i in range(self.v) ]
if class_balance is not None:
class_balance = class_balance / sum(class_balance)
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, class_balance
)
elif Y_dev is not None:
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
vals = { Y: (-1, 1) for Y in Ys_ordered }
Y_vecs = sorted([
[ vec_dict[Y] for Y in Ys_ordered ]
for vec_dict in dict_product(vals)
])
counts = {
tuple(Y_vec): 0
for Y_vec in Y_vecs
}
for data_point in Y_dev:
counts[tuple(data_point)] += 1
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities,
[
float(counts[tuple(Y_vec)]) / len(Y_dev)
for Y_vec in Y_vecs
])
else:
num_combinations = 2 ** self.v
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, [
1. / num_combinations for i in range(num_combinations)
])
return cb
def _compute_Y_marginals(self, Y_marginals):
for marginal in Y_marginals:
nodes = [ 'Y_{}'.format(idx) for idx in marginal ]
Y_marginals[marginal] = self.cb.marginal_distribution(
nodes,
inplace=False
)
return Y_marginals
def _compute_Y_equals_one(self, Y_equals_one):
# compute from class balance
for factor in Y_equals_one:
nodes = [ 'Y_{}'.format(idx) for idx in factor ]
Y_marginal = self.cb.marginal_distribution(
nodes,
inplace=False
)
vals = { Y: (-1, 1) for Y in nodes }
Y_vecs = sorted([
[ vec_dict[Y] for Y in nodes ]
for vec_dict in dict_product(vals)
])
# add up the probabilities of all the vectors whose values multiply to +1
total_prob = 0
for Y_vec in Y_vecs:
if np.prod(Y_vec) == 1:
vector_prob = Y_marginal.reduce(
[
(Y_i, Y_val if Y_val == 1 else 0)
for Y_i, Y_val in zip(nodes, Y_vec)
],
inplace=False
).values
total_prob += vector_prob
Y_equals_one[factor] = total_prob
return Y_equals_one
|
100237
|
import numpy as np
from hand import Hand
iterations = 250000
starting_size = 8 #inclusive
mullto = 7 #inclusive
hand = Hand("decklists/affinity.txt")
hand_types = ["t1 2-drop", "t1 3-drop"]
hand_counts = np.zeros(((starting_size + 1) - mullto,len(hand_types)))
totals = np.zeros(((starting_size + 1) - mullto,1))
zero_creatures = ["Memnite", "Ornithopter"]
zero_others = "Welding Jar"
ones = ["Signal Pest", "Vault Skirge"]
twos = ["Arcbound Ravager", "Cranial Plating", "Steel Overseer"]
threes = ["Etched Champion", "Master of Etherium"]
lands = ["Darksteel Citadel", "Spire of Industry", "Glimmervoid", "Inkmoth Nexus", "Blinkmoth Nexus", "Island"]
for i in range(iterations):
for j in range(0,(starting_size + 1) - mullto):
hand.new_hand(starting_size - j)
count_opal = hand.count_of("Mox Opal")
count_lands = hand.count_of(lands)
has_drum = hand.contains("Springleaf Drum")
count_zero_creatures = hand.count_of(zero_creatures)
count_zeros = count_zero_creatures + hand.count_of(zero_others) + hand.contains("Darksteel Citadel")
count_ones = hand.count_of(ones) + has_drum * 1
has_two = hand.contains(twos)
has_three = hand.contains(threes)
t1_opal = (count_zeros >= 2) or (has_drum and (count_zero_creatures > 0) and (count_lands > 0))
t1_pay_opal = (count_ones > 0) and (count_zeros > 0) and (count_lands > 0)
t1_mana = count_opal * t1_opal + (count_lands > 0) + ((not t1_opal) * t1_pay_opal * max((count_opal - 1),0))
# t2_accel = t1_accel or (has_drum and (count_zero_creatures > 0)) or ((count_ones > 0) and (count_zeros > 0) and (count_opal > 0)) \
results = [(t1_mana >= 2) and has_two, (t1_mana >= 3) and has_three]
hand_counts[starting_size - j - mullto,:] += results
totals[starting_size - j - mullto,:] += 1
# print(np.flip(hand_counts, axis = 0))
# print(np.flip(totals, axis = 0))
p_hands = hand_counts / totals
with open("output/affinity_output.csv","wb") as file:
file.write(str(iterations) + " iterations\n\n")
# hand size headers
file.write("hand sizes/types,")
for type in hand_types:
file.write(str(type) + ",")
#results
file.write("\n")
for size in reversed(range(len(p_hands))):
file.write(str(size + mullto) + ",")
for p in p_hands[size,:]:
file.write(str(p) + ",")
file.write("\n")
file.close()
print(np.flip(p_hands, axis = 0))
|
100288
|
from django.db import models
from django.template.defaultfilters import title
from django.utils.datastructures import SortedDict
from calaccess_campaign_browser.templatetags.calaccesscampaignbrowser import (
jsonify
)
class BaseModel(models.Model):
class Meta:
abstract = True
def meta(self):
return self._meta
def klass(self):
return self.__class__
def doc(self):
return self.__doc__
def to_dict(self):
d = SortedDict({})
for f in self._meta.fields:
d[f.verbose_name] = getattr(self, f.name)
return d
def to_json(self):
return jsonify(self)
class AllCapsNameMixin(BaseModel):
"""
Abstract model with name cleaners we can reuse across models.
"""
class Meta:
abstract = True
def __unicode__(self):
return self.clean_name
@property
def short_name(self, character_limit=60):
if len(self.clean_name) > character_limit:
return self.clean_name[:character_limit] + "..."
return self.clean_name
@property
def clean_name(self):
"""
A cleaned up version of the ALL CAPS names that are provided by
the source data.
"""
n = self.name
n = n.strip()
n = n.lower()
n = title(n)
n = n.replace("A. D.", "A.D.")
force_lowercase = ['Of', 'For', 'To', 'By']
for fl in force_lowercase:
s = []
for p in n.split(" "):
if p in force_lowercase:
s.append(p.lower())
else:
s.append(p)
n = " ".join(s)
force_uppercase = [
'Usaf', 'Pac', 'Ca', 'Ad', 'Rcc', 'Cdp', 'Aclu',
'Cbpa-Pac', 'Aka', 'Aflac',
]
for fl in force_uppercase:
s = []
for p in n.split(" "):
if p in force_uppercase:
s.append(p.upper())
else:
s.append(p)
n = " ".join(s)
n = n.replace("Re-Elect", "Re-elect")
n = n.replace("Political Action Committee", "PAC")
return n
|
100344
|
import os
import pytest
from ozy import OzyError
from ozy.files import walk_up_dirs, get_ozy_dir
def test_ozy_dirs():
ozy_dir = get_ozy_dir()
assert ozy_dir is not None
home = os.environ['HOME']
del os.environ['HOME']
with pytest.raises(OzyError):
get_ozy_dir()
os.environ['HOME'] = home
def test_walk_up_dirs():
test_path = os.path.join(os.path.sep, 'one', 'two', 'three')
assert [
os.path.join(os.path.sep, 'one', 'two', 'three'),
os.path.join(os.path.sep, 'one', 'two'),
os.path.join(os.path.sep, 'one'),
os.path.sep
] == [x for x in walk_up_dirs(test_path)]
|
100360
|
class CommandError(Exception):
"""
Exception class indicating a problem while executing a stapler command.
"""
pass
OPTIONS = None # optparse options
def main(arguments=None):
from . import stapler
stapler.main(arguments)
|
100378
|
import logging
from openpyxl import Workbook
from output.ReportBase import ReportBase
class RawMaturityAssessmentReport(ReportBase):
def createWorkbook(self, jobs, controllerData, jobFileName):
for reportType in ["apm", "brum", "mrum"]:
logging.info(f"Creating {reportType} Maturity Assessment Raw Report")
# Create Report with Raw Maturity Assessment Report
workbook = Workbook()
filteredJobs = [job for job in jobs if job.componentType == reportType]
if filteredJobs:
del workbook["Sheet"]
else:
summarySheet = workbook["Sheet"]
summarySheet.title = "Summary"
for jobStep in filteredJobs:
jobStep.reportData(workbook, controllerData, type(jobStep).__name__, False, False)
logging.debug(f"Saving Raw MaturityAssessment-{reportType} Workbook")
workbook.save(f"output/{jobFileName}/{jobFileName}-MaturityAssessmentRaw-{reportType}.xlsx")
|
100404
|
import SimpleHTTPServer
import SocketServer
import os
PORT = 8000
class Allow(SimpleHTTPServer.SimpleHTTPRequestHandler):
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Access-Control-Allow-Origin", "*")
if path.endswith("png") or path.endswith("gif") or path.endswith("jpg"):
# 2 minutes cache
self.send_header("Cache-Control", "max-age=120");
self.end_headers()
return f
SocketServer.ThreadingTCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer(("", PORT), Allow)
print "serving at port", PORT
httpd.serve_forever()
|
100425
|
import logging
from pyfcm import FCMNotification
from spacelaunchnow.config import keys
from spacelaunchnow import config
logger = logging.getLogger(__name__)
class EventNotificationHandler:
def __init__(self, debug=None):
if debug is None:
self.DEBUG = config.DEBUG
else:
self.DEBUG = debug
def send_ten_minute_notification(self, event):
self.send_notification(event, 'event_notification')
def send_webcast_notification(self, event):
self.send_notification(event, 'event_webcast', webcast=True)
def build_data(self, event, type):
if event.video_url:
webcast = True
else:
webcast = False
feature_image = None
if event.feature_image and hasattr(event.feature_image, 'url'):
feature_image = event.feature_image.url
return {
"notification_type": type,
"click_action": "FLUTTER_NOTIFICATION_CLICK",
"event": {
"id": event.id,
"name": event.name,
"description": event.description,
"type": {
"id": event.type.id,
"name": event.type.name,
},
"date": event.date.strftime("%B %d, %Y %H:%M:%S %Z"),
"location": event.location,
"news_url": event.news_url,
"video_url": event.video_url,
"webcast_live": event.webcast_live,
"feature_image": feature_image,
},
"webcast": webcast
}
def build_v2_topics(self):
if self.DEBUG:
topic = "'debug_v2' in topics && 'events' in topics"
else:
topic = "'prod_v2' in topics && 'events' in topics"
return topic
def build_v3_topics(self):
if self.DEBUG:
topic = "'debug_v3' in topics && 'events' in topics"
else:
topic = "'prod_v3' in topics && 'events' in topics"
return topic
def build_flutter_v2_topics(self):
if self.DEBUG:
topic = "'flutter_debug_v2' in topics && 'events' in topics"
else:
topic = "'flutter_production_v2' in topics && 'events' in topics"
return topic
def build_flutter_v3_topics(self):
if self.DEBUG:
topic = "'flutter_debug_v3' in topics && 'events' in topics"
else:
topic = "'flutter_production_v3' in topics && 'events' in topics"
return topic
def send_notification(self, event, event_type, webcast: bool = False):
data = self.build_data(event, event_type)
# Send Android notif
self.send_to_fcm(self.build_v2_topics(), data)
self.send_to_fcm(self.build_v3_topics(), data)
# Send Flutter notif
self.send_flutter_to_fcm(self.build_flutter_v2_topics(), data, webcast)
self.send_flutter_to_fcm(self.build_flutter_v3_topics(), data, webcast)
def send_to_fcm(self, topics, data):
logger.info('----------------------------------------------------------')
logger.info('Notification Data: %s' % data)
logger.info('Topics: %s' % topics)
push_service = FCMNotification(api_key=keys['FCM_KEY'])
notification = push_service.notify_topic_subscribers(data_message=data,
condition=topics,
time_to_live=86400)
logger.info(notification)
logger.info('----------------------------------------------------------')
def send_flutter_to_fcm(self, topics, data, webcast: bool = False):
logger.info('----------------------------------------------------------')
logger.info('Flutter Notification')
logger.info('Notification Data: %s' % data)
logger.info('Topics: %s' % topics)
push_service = FCMNotification(api_key=keys['FCM_KEY'])
if webcast:
message_body = "Live webcast is available!"
else:
message_body = data['event']['description']
notification = push_service.notify_topic_subscribers(data_message=data,
condition=topics,
time_to_live=86400,
message_title=data['event']['name'],
message_body=message_body)
logger.info(notification)
logger.info('----------------------------------------------------------')
|
100529
|
from setuptools import setup, Extension
from torch.utils import cpp_extension
import os
stonne_src_dir='../../stonne/src'
my_pwd=os.path.abspath(".")
include_dir=my_pwd+'/../../stonne/include/'
external_dir=my_pwd+'/../../stonne/external/'
list_of_src_files_to_link=['torch_stonne.cpp', '../../stonne/stonne_linker_src/stonne_linker.cpp']
#Appending STONNE code to the list in order to link the sources
for filename in os.listdir(stonne_src_dir):
if((filename != "main.cpp") and (filename.endswith("cpp"))):
filename_path = os.path.join(stonne_src_dir, filename)
#print(filename_path)
list_of_src_files_to_link.append(filename_path)
print(list_of_src_files_to_link)
setup(name='torch_stonne',
ext_modules=[cpp_extension.CppExtension('torch_stonne', list_of_src_files_to_link, include_dirs=[include_dir, external_dir])],
cmdclass={'build_ext': cpp_extension.BuildExtension})
|
100535
|
import numpy as np
from sklearn import model_selection
import typing as t
from copy import copy
from ..mltypes import RandomState
from ..data.dataset import Dataset
class DataSplit:
def get_splits(self, dataset: Dataset) -> t.Generator[t.Tuple[Dataset, Dataset], None, None]:
raise NotImplementedError
class TrainTestSplit(DataSplit):
def __init__(self,
train_size: t.Union[float, int] = 0.7,
random_state: RandomState = None,
shuffle: bool = True
):
self.train_size = train_size
self.random_state = random_state
self.shuffle = shuffle
def get_splits(self, dataset: Dataset) -> t.Generator[t.Tuple[Dataset, Dataset], None, None]:
all_rows = dataset.ids
train_rows, test_rows = model_selection.train_test_split(all_rows,
train_size=self.train_size,
random_state=self.random_state,
shuffle=self.shuffle)
train_dataset = copy(dataset)
train_dataset.ids = train_rows
test_dataset = copy(dataset)
test_dataset.ids = test_rows
yield train_dataset, test_dataset
|
100563
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import sf_manager, app
from panels import opportunities, cases, leads
server = app.server
app.layout = html.Div(
[
html.Div(
className="row header",
children=[
html.Button(id="menu", children=dcc.Markdown("≡")),
html.Span(
className="app-title",
children=[
dcc.Markdown("**CRM App**"),
html.Span(
id="subtitle",
children=dcc.Markdown("  using Salesforce API"),
style={"font-size": "1.8rem", "margin-top": "15px"},
),
],
),
html.Img(src=app.get_asset_url("logo.png")),
html.A(
id="learn_more",
children=html.Button("Learn More"),
href="https://plot.ly/dash/",
),
],
),
html.Div(
id="tabs",
className="row tabs",
children=[
dcc.Link("Opportunities", href="/"),
dcc.Link("Leads", href="/"),
dcc.Link("Cases", href="/"),
],
),
html.Div(
id="mobile_tabs",
className="row tabs",
style={"display": "none"},
children=[
dcc.Link("Opportunities", href="/"),
dcc.Link("Leads", href="/"),
dcc.Link("Cases", href="/"),
],
),
dcc.Store( # opportunities df
id="opportunities_df",
data=sf_manager.get_opportunities().to_json(orient="split"),
),
dcc.Store( # leads df
id="leads_df", data=sf_manager.get_leads().to_json(orient="split")
),
dcc.Store(
id="cases_df", data=sf_manager.get_cases().to_json(orient="split")
), # cases df
dcc.Location(id="url", refresh=False),
html.Div(id="tab_content"),
html.Link(
href="https://use.fontawesome.com/releases/v5.2.0/css/all.css",
rel="stylesheet",
),
html.Link(
href="https://fonts.googleapis.com/css?family=Dosis", rel="stylesheet"
),
html.Link(
href="https://fonts.googleapis.com/css?family=Open+Sans", rel="stylesheet"
),
html.Link(
href="https://fonts.googleapis.com/css?family=Ubuntu", rel="stylesheet"
),
],
className="row",
style={"margin": "0%"},
)
# Update the index
@app.callback(
[
Output("tab_content", "children"),
Output("tabs", "children"),
Output("mobile_tabs", "children"),
],
[Input("url", "pathname")],
)
def display_page(pathname):
tabs = [
dcc.Link("Opportunities", href="/dash-salesforce-crm/opportunities"),
dcc.Link("Leads", href="/dash-salesforce-crm/leads"),
dcc.Link("Cases", href="/dash-salesforce-crm/cases"),
]
if pathname == "/dash-salesforce-crm/opportunities":
tabs[0] = dcc.Link(
dcc.Markdown("**■ Opportunities**"),
href="/dash-salesforce-crm/opportunities",
)
return opportunities.layout, tabs, tabs
elif pathname == "/dash-salesforce-crm/cases":
tabs[2] = dcc.Link(
dcc.Markdown("**■ Cases**"), href="/dash-salesforce-crm/cases"
)
return cases.layout, tabs, tabs
tabs[1] = dcc.Link(
dcc.Markdown("**■ Leads**"), href="/dash-salesforce-crm/leads"
)
return leads.layout, tabs, tabs
@app.callback(
Output("mobile_tabs", "style"),
[Input("menu", "n_clicks")],
[State("mobile_tabs", "style")],
)
def show_menu(n_clicks, tabs_style):
if n_clicks:
if tabs_style["display"] == "none":
tabs_style["display"] = "flex"
else:
tabs_style["display"] = "none"
return tabs_style
if __name__ == "__main__":
app.run_server(debug=True)
|
100566
|
import numpy as np
from itertools import product
from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation
class BasicRoomEnv(Env):
"""
Basic empty room with stochastic transitions. Used for debugging.
"""
def __init__(self, prob, use_pixels_as_observations=True):
self.height = 3
self.width = 3
self.init_state = (1, 1)
self.prob = prob
self.nS = self.height * self.width
self.nA = 5
super().__init__(1, use_pixels_as_observations=use_pixels_as_observations)
self.num_features = 2
self.default_action = Direction.get_number_from_direction(Direction.STAY)
self.num_features = len(self.s_to_f(self.init_state))
self.reset()
states = self.enumerate_states()
self.make_transition_matrices(states, range(self.nA), self.nS, self.nA)
self.make_f_matrix(self.nS, self.num_features)
def enumerate_states(self):
return product(range(self.width), range(self.height))
def get_num_from_state(self, state):
return np.ravel_multi_index(state, (self.width, self.height))
def get_state_from_num(self, num):
return np.unravel_index(num, (self.width, self.height))
def s_to_f(self, s):
return s
def _obs_to_f(self, obs):
return np.unravel_index(obs[0].argmax(), obs[0].shape)
def _s_to_obs(self, s):
layers = [[s]]
obs = get_grid_representation(self.width, self.height, layers)
return np.array(obs, dtype=np.float32)
# render_width = 64
# render_height = 64
# x, y = s
# obs = np.zeros((3, render_height, render_width), dtype=np.float32)
# obs[
# :,
# y * render_height : (y + 1) * render_height,
# x * render_width : (x + 1) * render_width,
# ] = 1
# return obs
def get_next_states(self, state, action):
# next_states = []
# for a in range(self.nA):
# next_s = self.get_next_state(state, a)
# p = 1 - self.prob if a == action else self.prob / (self.nA - 1)
# next_states.append((p, next_s, 0))
next_s = self.get_next_state(state, action)
next_states = [(self.prob, next_s, 0), (1 - self.prob, state, 0)]
return next_states
def get_next_state(self, state, action):
"""Returns the next state given a state and an action."""
action = int(action)
if action == Direction.get_number_from_direction(Direction.STAY):
pass
elif action < len(Direction.ALL_DIRECTIONS):
move_x, move_y = Direction.move_in_direction_number(state, action)
# New position is legal
if 0 <= move_x < self.width and 0 <= move_y < self.height:
state = move_x, move_y
else:
# Move only changes orientation, which we already handled
pass
else:
raise ValueError("Invalid action {}".format(action))
return state
def s_to_ansi(self, state):
return str(self.s_to_obs(state))
if __name__ == "__main__":
from gym.utils.play import play
env = BasicRoomEnv(1)
play(env, fps=5)
|
100609
|
import datetime
import pytest
from tartiflette.scalar.builtins.time import ScalarTime
@pytest.mark.parametrize(
"value,should_raise_exception,expected",
[
(None, True, "Time cannot represent value: < None >."),
(True, True, "Time cannot represent value: < True >."),
(False, True, "Time cannot represent value: < False >."),
("", True, "Time cannot represent value: < >."),
(0, True, "Time cannot represent value: < 0 >."),
(1, True, "Time cannot represent value: < 1 >."),
(3, True, "Time cannot represent value: < 3 >."),
(0.0, True, "Time cannot represent value: < 0.0 >."),
(1.0, True, "Time cannot represent value: < 1.0 >."),
(3.0, True, "Time cannot represent value: < 3.0 >."),
(0.1, True, "Time cannot represent value: < 0.1 >."),
(1.1, True, "Time cannot represent value: < 1.1 >."),
(3.1, True, "Time cannot represent value: < 3.1 >."),
("0", True, "Time cannot represent value: < 0 >."),
("1", True, "Time cannot represent value: < 1 >."),
("3", True, "Time cannot represent value: < 3 >."),
("0.0", True, "Time cannot represent value: < 0.0 >."),
("1.0", True, "Time cannot represent value: < 1.0 >."),
("3.0", True, "Time cannot represent value: < 3.0 >."),
("0.1", True, "Time cannot represent value: < 0.1 >."),
("1.1", True, "Time cannot represent value: < 1.1 >."),
("3.1", True, "Time cannot represent value: < 3.1 >."),
("0e0", True, "Time cannot represent value: < 0e0 >."),
("1e0", True, "Time cannot represent value: < 1e0 >."),
("3e0", True, "Time cannot represent value: < 3e0 >."),
("0e1", True, "Time cannot represent value: < 0e1 >."),
("1e1", True, "Time cannot represent value: < 1e1 >."),
("3e1", True, "Time cannot represent value: < 3e1 >."),
("0.1e1", True, "Time cannot represent value: < 0.1e1 >."),
("1.1e1", True, "Time cannot represent value: < 1.1e1 >."),
("3.1e1", True, "Time cannot represent value: < 3.1e1 >."),
("0.11e1", True, "Time cannot represent value: < 0.11e1 >."),
("1.11e1", True, "Time cannot represent value: < 1.11e1 >."),
("3.11e1", True, "Time cannot represent value: < 3.11e1 >."),
(float("inf"), True, "Time cannot represent value: < inf >."),
("A", True, "Time cannot represent value: < A >."),
("{}", True, "Time cannot represent value: < {} >."),
({}, True, "Time cannot represent value: < {} >."),
(Exception("LOL"), True, "Time cannot represent value: < LOL >."),
(
Exception,
True,
"Time cannot represent value: < <class 'Exception'> >.",
),
("15:52:52", True, "Time cannot represent value: < 15:52:52 >."),
(datetime.datetime(1970, 11, 10, 15, 52, 52), False, "15:52:52"),
],
)
def test_scalar_time_coerce_output(value, should_raise_exception, expected):
if should_raise_exception:
with pytest.raises(TypeError, match=expected):
ScalarTime().coerce_output(value)
else:
assert ScalarTime().coerce_output(value) == expected
@pytest.mark.parametrize(
"value,should_raise_exception,expected",
[
(None, True, "Time cannot represent value: < None >."),
(True, True, "Time cannot represent value: < True >."),
(False, True, "Time cannot represent value: < False >."),
("", True, "Time cannot represent value: < >."),
(0, True, "Time cannot represent value: < 0 >."),
(1, True, "Time cannot represent value: < 1 >."),
(3, True, "Time cannot represent value: < 3 >."),
(0.0, True, "Time cannot represent value: < 0.0 >."),
(1.0, True, "Time cannot represent value: < 1.0 >."),
(3.0, True, "Time cannot represent value: < 3.0 >."),
(0.1, True, "Time cannot represent value: < 0.1 >."),
(1.1, True, "Time cannot represent value: < 1.1 >."),
(3.1, True, "Time cannot represent value: < 3.1 >."),
("0", True, "Time cannot represent value: < 0 >."),
("1", True, "Time cannot represent value: < 1 >."),
("3", True, "Time cannot represent value: < 3 >."),
("0.0", True, "Time cannot represent value: < 0.0 >."),
("1.0", True, "Time cannot represent value: < 1.0 >."),
("3.0", True, "Time cannot represent value: < 3.0 >."),
("0.1", True, "Time cannot represent value: < 0.1 >."),
("1.1", True, "Time cannot represent value: < 1.1 >."),
("3.1", True, "Time cannot represent value: < 3.1 >."),
("0e0", True, "Time cannot represent value: < 0e0 >."),
("1e0", True, "Time cannot represent value: < 1e0 >."),
("3e0", True, "Time cannot represent value: < 3e0 >."),
("0e1", True, "Time cannot represent value: < 0e1 >."),
("1e1", True, "Time cannot represent value: < 1e1 >."),
("3e1", True, "Time cannot represent value: < 3e1 >."),
("0.1e1", True, "Time cannot represent value: < 0.1e1 >."),
("1.1e1", True, "Time cannot represent value: < 1.1e1 >."),
("3.1e1", True, "Time cannot represent value: < 3.1e1 >."),
("0.11e1", True, "Time cannot represent value: < 0.11e1 >."),
("1.11e1", True, "Time cannot represent value: < 1.11e1 >."),
("3.11e1", True, "Time cannot represent value: < 3.11e1 >."),
(float("inf"), True, "Time cannot represent value: < inf >."),
("A", True, "Time cannot represent value: < A >."),
("{}", True, "Time cannot represent value: < {} >."),
({}, True, "Time cannot represent value: < {} >."),
(Exception("LOL"), True, "Time cannot represent value: < LOL >."),
(
Exception,
True,
"Time cannot represent value: < <class 'Exception'> >.",
),
("15:52:52", False, datetime.datetime(1900, 1, 1, 15, 52, 52)),
(
datetime.datetime(1970, 11, 10, 15, 52, 52),
True,
"Time cannot represent value: < 1970-11-10 15:52:52 >.",
),
],
)
def test_scalar_time_coerce_input(value, should_raise_exception, expected):
if should_raise_exception:
with pytest.raises(TypeError, match=expected):
ScalarTime().coerce_input(value)
else:
assert ScalarTime().coerce_input(value) == expected
|
100617
|
import time
def newJson(json):
print(op('container1/record')[0])
if op('container1/record')[0] == 1:
jsonRecord = op('json_record')
jsonRecord[0,0] = int(time.time())
jsonRecord[0,1] = json
op('json_record_out').par.write.pulse()
return
|
100624
|
from future.builtins import range
from .utils import memoize
from . import context
def redis_key(name, *args):
prefix = context.get_current_config()["redis_prefix"]
if name == "known_subqueues":
return "%s:ksq:%s" % (prefix, args[0].root_id)
elif name == "queue":
return "%s:q:%s" % (prefix, args[0].id)
elif name == "started_jobs":
return "%s:s:started" % prefix
elif name == "paused_queues":
return "%s:s:paused" % prefix
elif name == "notify":
return "%s:notify:%s" % (prefix, args[0].root_id)
@memoize
def redis_zaddbyscore():
""" Increments multiple keys in a sorted set & returns them """
return context.connections.redis.register_script("""
local zset = KEYS[1]
local min = ARGV[1]
local max = ARGV[2]
local offset = ARGV[3]
local count = ARGV[4]
local score = ARGV[5]
local data = redis.call('zrangebyscore', zset, min, max, 'LIMIT', offset, count)
for i, member in pairs(data) do
redis.call('zadd', zset, score, member)
end
return data
""")
@memoize
def redis_zpopbyscore():
""" Pops multiple keys by score """
return context.connections.redis.register_script("""
local zset = KEYS[1]
local min = ARGV[1]
local max = ARGV[2]
local offset = ARGV[3]
local count = ARGV[4]
local data = redis.call('zrangebyscore', zset, min, max, 'LIMIT', offset, count)
if #data > 0 then
redis.call('zremrangebyrank', zset, 0, #data - 1)
end
return data
""")
@memoize
def redis_lpopsafe():
""" Safe version of LPOP that also adds the key in a "started" zset """
return context.connections.redis.register_script("""
local key = KEYS[1]
local zset_started = KEYS[2]
local count = ARGV[1]
local now = ARGV[2]
local left = ARGV[3]
local data = {}
local current = nil
for i=1, count do
if left == '1' then
current = redis.call('lpop', key)
else
current = redis.call('rpop', key)
end
if current == false then
return data
end
data[i] = current
redis.call('zadd', zset_started, now, current)
end
return data
""")
def redis_group_command(command, cnt, redis_key):
with context.connections.redis.pipeline(transaction=False) as pipe:
for _ in range(cnt):
getattr(pipe, command)(redis_key)
return [x for x in pipe.execute() if x]
|
100637
|
from django.db import models
class AudioFileMixin(models.Model):
audio_file = models.FileField()
class Meta:
abstract = True
|
100678
|
import sys, os, imp
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
from pypy.tool import slaveproc
class IsolateSlave(slaveproc.Slave):
mod = None
def do_cmd(self, cmd):
cmd, data = cmd
if cmd == 'load':
assert self.mod is None
mod = data
if isinstance(mod, str):
mod = __import__(mod, {}, {}, ['__doc__'])
else:
dir, name = mod
file, pathname, description = imp.find_module(name, [dir])
try:
mod = imp.load_module(name, file, pathname, description)
finally:
if file:
file.close()
self.mod = mod
return 'loaded'
elif cmd == 'invoke':
assert self.mod is not None
func, args = data
try:
res = getattr(self.mod, func)(*args)
except KeyboardInterrupt:
raise
except:
exc_type = sys.exc_info()[0]
return ('exc', (exc_type.__module__, exc_type.__name__))
else:
return ('ok', res)
else:
return 'no-clue'
if __name__ == '__main__':
IsolateSlave().do()
|
100698
|
from sys import platform as sys_pf
if sys_pf == 'darwin':
import matplotlib
matplotlib.use("TkAgg")
import unittest
import numpy.testing as np_test
from scripts.algorithms.polynomial_predictor import PolynomialPredictor
class PolynomialPredictorTests(unittest.TestCase):
def test_static_sequence(self):
time_series = [1.0, 1.0, 1.0, 1.0, 1.0]
num_predicted_periods = 3
expected_prediction = [1] * num_predicted_periods
predictor = PolynomialPredictor(time_series, num_predicted_periods)
actual_prediction = predictor.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=4)
def test_linearly_increasing_sequence(self):
time_series = [8.9, 11.0, 13.0, 15.1, 17.0, 18.9, 21.0]
num_predicted_periods = 4
expected_prediction = [23.0, 25.0, 27.0, 29.0]
predictor = PolynomialPredictor(time_series, num_predicted_periods)
actual_prediction = predictor.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=0)
def test_quadratically_increasing_sequence(self):
values = list(map(lambda x: (x ** 2) - (3 * x) + 2, range(15)))
num_predicted_periods = 4
time_series = values[:-num_predicted_periods]
expected_prediction = values[-num_predicted_periods:]
predictor = PolynomialPredictor(time_series, num_predicted_periods)
actual_prediction = predictor.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=1)
|
100719
|
import time
import numpy
def getNotes():
return {
"id1": {
"noteId": "id1",
"userId": "user1",
"content": str(numpy.array([1,2,3,4])),
"createdAt": int(time.time()),
},
"id2": {
"noteId": "id2",
"userId": "user2",
"content": str(numpy.array([5,6,7,8])),
"createdAt": int(time.time()-1000),
},
}
|
100742
|
from nipype.interfaces.ants import N4BiasFieldCorrection
import sys
import os
import ast
if len(sys.argv) < 2:
print("INPUT from ipython: run n4_bias_correction input_image dimension n_iterations(optional, form:[n_1,n_2,n_3,n_4]) output_image(optional)")
sys.exit(1)
# if output_image is given
if len(sys.argv) > 3:
n4 = N4BiasFieldCorrection(output_image=sys.argv[4])
else:
n4 = N4BiasFieldCorrection()
# dimension of input image, input image
n4.inputs.dimension = int(sys.argv[2])
n4.inputs.input_image = sys.argv[1]
# if n_dinesions arg given
if len(sys.argv) > 2:
n4.inputs.n_iterations = ast.literal_eval(sys.argv[3])
n4.run()
|
100744
|
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
import tensorflow as tf
def convertMetaModelToPbModel(meta_model, pb_model):
# Step 1
# import the model metagraph
saver = tf.train.import_meta_graph(meta_model + '.meta', clear_devices=True)
# make that as the default graph
graph = tf.get_default_graph()
sess = tf.Session()
# now restore the variables
saver.restore(sess, meta_model)
# Step 2
# Find the output name
for op in graph.get_operations():
print(op.name)
# Step 3
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session
sess.graph_def, # input_graph_def is useful for retrieving the nodes
["Placeholder", "output/Sigmoid"])
# Step 4
# output folder
output_fld = './'
# output pb file name
output_model_file = 'model.pb'
# write the graph
graph_io.write_graph(output_graph_def, pb_model + output_fld, output_model_file, as_text=False)
|
100758
|
import os
import shutil
import numpy as np
import pandas as pd
import scipy.integrate, scipy.stats, scipy.optimize, scipy.signal
from scipy.stats import mannwhitneyu
import statsmodels.formula.api as smf
import pystan
def clean_folder(folder):
"""Create a new folder, or if the folder already exists,
delete all containing files
Args:
folder (string): Path to folder
"""
if os.path.isdir(folder):
shutil.rmtree(folder)
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_data_for_stan(y):
"""Convenience function for
collecting data for STAN estimation
Args:
y (np vector): Data series for Bayesian filtering
Returns:
dict: Data for Stan estimation
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
N_obs = len(pd.Series(y).dropna())
N_mis = np.sum(np.isnan(y))
ii_obs = list(range(1, N_obs + N_mis + 1))
ii_mis = []
if N_mis > 0:
for ii in np.argwhere(np.isnan(y)):
ii_mis.append(ii[0] + 1)
ii_obs.remove(ii[0] + 1)
return {'N_obs': N_obs,
'N_mis': N_mis,
'ii_obs': ii_obs,
'ii_mis': ii_mis,
'y_obs': pd.Series(y).dropna()}
def estimate_R(y, gamma, stm_missing, stm_no_missing, num_iter, num_chains, num_warmup, rng, sig_levels, full_output = False):
"""Estimate R using Bayesian Kalman
smoothing
Args:
y (np array): Data series for the growth rate of infected individuals
gamma (double): Inverse of average infectiousness duration
stm_missing (pickle): Stan model (for case with missing data)
stm_no_missing (pickle): Stan model (for case without missing data)
num_iter (int): Number of iterations
num_chains (int): Number of MCMC chains
num_warmup (int): Number of warmup periods
rng (obj): Numpy random state
sig_levels (list): List of significance levels for credible bounds
full_output (bool, optional): If True, return full output from Stan
Returns:
TYPE: Description
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
assert isinstance(num_chains, int) and isinstance(num_iter, int) and isinstance(num_warmup, int), \
"num_chains, num_iter, and num_warmup must be integers"
assert num_chains > 0 and num_iter > 0 and num_warmup > 0, \
"num_chains, num_iter, and num_warmup must be positive"
assert len(sig_levels) >= 1 and all(isinstance(x, int) for x in sig_levels), \
"sig_levels must be a list with only integers"
# Get data in Stan format
s_data = get_data_for_stan(y)
# Estimate model
if np.sum(np.isnan(y)) > 0:
fit = stm_missing.sampling(data = s_data,
iter = num_iter,
chains = num_chains,
warmup = num_warmup,
verbose = False,
seed = rng)
else:
fit = stm_no_missing.sampling(data = s_data,
iter = num_iter,
chains = num_chains,
warmup = num_warmup,
verbose = False,
seed = rng)
fit_res = fit.extract(permuted = True)
# Collect results
res = {}
res['R'] = 1 + 1 / gamma * fit_res['mu'].mean(axis = 0)
for aa in sig_levels:
ub = 1 + 1 / gamma * np.percentile(fit_res['mu'], axis = 0, q = 100 - aa / 2.0)
lb = np.maximum(1 + 1 / gamma * np.percentile(fit_res['mu'], axis = 0, q = aa / 2.0), 0.0)
res['ub_{}'.format(100 - aa)] = ub
res['lb_{}'.format(100 - aa)] = lb
res['signal_to_noise'] = fit_res['signal_to_noise'].mean()
res['var_irregular'] = (1 / fit_res['precision_irregular']).mean()
# Extract convergence statistics
fit_summary = fit.summary()
df_conv_stats = pd.DataFrame(fit_summary['summary'])
df_conv_stats.columns = fit_summary['summary_colnames']
df_conv_stats['var_name'] = fit_summary['summary_rownames']
mask = df_conv_stats['var_name'].apply(lambda x: 'mu' in x)
df_conv_stats = df_conv_stats.loc[mask, ]
res['n_eff_pct'] = df_conv_stats['n_eff'].min() / float(num_chains * (num_iter - num_warmup))
res['Rhat_diff'] = (df_conv_stats['Rhat'] - 1).abs().max()
# If requested, extract full Stan fit
if full_output:
res['stan_fit'] = fit
return res
def mean_se(x, robust = True):
"""Aggregation function for
pandas to calculate standard errors
for the mean
Args:
x (series): pandas Series
robust (bool, optional): if True, calculate
heteroskedasticity-robust standard errors
Returns:
float: standard error
"""
x = pd.DataFrame(x)
x.columns = ['x']
if robust:
mod = smf.ols('x ~ 1', data = x).fit(cov_type = 'HC2')
else:
mod = smf.ols('x ~ 1', data = x).fit()
return mod.bse['Intercept']
def simulate_AR1(rho, sigma, T, shocks = None):
"""Simulate a time series for
an AR(1) process with
x_{t + 1} = rho x_t + eps_{t+1}
where
eps_{t + 1} ~ N(0, sigma ^ 2).
Initial condition is
x_0 ~ N(0, sigma ^ 2 / (1 - rho ^ 2))
Persistence parameter must lie in (-1, 1)
for an AR(1) to be simulated.
Args:
rho (float): AR(1) persistence parameter
sigma (float): Standard deviation of shocks
T (int): Length of simulated time series
shocks (array, optional): If provided,
use the time series in shocks for the disturbances (eps)
Returns:
dict: Dictionary, contains:
shocks (float): Simulated shocks (eps)
x (float): Simulated time series
"""
assert rho > - 1 and rho < 1, \
'Persistence parameter should be in (-1, 1).'
if shocks is None:
shocks = np.random.randn(1, T).flatten() * sigma
shocks[0] = np.random.randn(1, 1) * sigma / np.sqrt(1 - rho ** 2)
return {'shocks': shocks,
'x': scipy.signal.lfilter([1] ,[1, -rho], shocks)}
|
100768
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.enqueue(assign, queue='long')
def assign():
users = frappe.get_all("User", {"user_type":"System User"})
for user in users:
user_roles = frappe.get_roles(user.name)
User = frappe.get_doc("User", {"name":user.name})
User.add_roles("Penalty Recipient")
if "Site Supervisor" in user_roles or "Shift Supervisor" in user_roles or "Project Manager" in user_roles:
User.add_roles("Penalty Issuer")
User.save(ignore_permissions=True)
frappe.db.commit()
|
100798
|
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
from small_text.integrations.transformers.datasets import TransformersDataset
from small_text.integrations.transformers.classifiers.classification import (
TransformerModelArguments,
TransformerBasedClassification,
TransformerBasedEmbeddingMixin)
except PytorchNotFoundError:
pass
|
100846
|
from abc import ABCMeta, abstractmethod
class EqualizerTuning(object):
def __init__(self, playback_function, result_extractor, comparator, comparison_data_extractor=None):
"""
:param playback_function: A function that plays back the operation using the recording in the given id
:type playback_function: function
:param result_extractor: Extracts result from the recording and playback
:type result_extractor: function
:param comparison_data_extractor: Extracts optional data from the recording that will be passed to the
comparator
:type comparison_data_extractor: function
:param comparator: A function use to create the equality status by comparing the expected vs actual result
:type comparator: function
"""
self.playback_function = playback_function
self.result_extractor = result_extractor
self.comparator = comparator
self.comparison_data_extractor = comparison_data_extractor
class EqualizerTuner(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_category_tuning(self, category):
"""
:param category: Category
:type category: basestring
:return: Tuning for category
:rtype: EqualizerTuning
"""
pass
|
100882
|
from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('tours/', TourView.as_view()),
path('tours/<int:tour_id>/', about_tour),
path('auth/', auth),
path('register/', register)
]
|
100907
|
import nltk
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import webtext
from nltk.metrics import BigramAssocMeasures
tokens=[t.lower() for t in webtext.words('grail.txt')]
words=BigramCollocationFinder.from_words(tokens)
print(words.nbest(BigramAssocMeasures.likelihood_ratio, 10))
|
100924
|
class Solution:
def decodeString(self, s: str) -> str:
St = []
num = 0
curr = ''
for c in s:
if c.isdigit():
num = num*10 + int(c)
elif c == '[':
St.append([num, curr])
num = 0
curr = ''
elif c == ']':
count, prev = St.pop()
curr = prev + count*curr
else:
curr += c
return curr
class Solution2:
def decodeString(self, s: str) -> str:
i = 0
def decode(s):
nonlocal i
result = []
while i < len(s) and s[i] != ']':
if s[i].isdigit():
num = 0
while i < len(s) and s[i].isdigit():
num = num*10 + int(s[i])
i += 1
i += 1
temp = decode(s)
i += 1
result += temp*num
else:
result.append(s[i])
i += 1
return result
return ''.join(decode(s))
|
100930
|
import os.path
import re
from numpy.distutils.core import setup, Extension
from numpy.distutils.system_info import get_info
def find_version(*paths):
fname = os.path.join(os.path.dirname(__file__), *paths)
with open(fname) as fp:
code = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", code, re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find version string.")
# ext_cpwt = Extension(name='plateflex.cpwt',
# sources=['src/cpwt/cpwt.f90', 'src/cpwt/cpwt_sub.f90'],
# libraries=['gfortran'],
# library_dirs=get_info('gfortran').get('library_dirs'))
# ext_flex = Extension(name='plateflex.flex',
# sources=['src/flex/flex.f90'],
# libraries=['gfortran'],
# library_dirs=get_info('gfortran').get('library_dirs'))
ext_cpwt = Extension(name='plateflex.cpwt',
sources=['src/cpwt/cpwt.f90', 'src/cpwt/cpwt_sub.f90'])
ext_flex = Extension(name='plateflex.flex',
sources=['src/flex/flex.f90'])
setup(
name='plateflex',
version=find_version('plateflex', '__init__.py'),
description='Python package for estimating lithospheric elastic thickness',
author='<NAME>',
maintainer='<NAME>',
author_email='<EMAIL>',
url='https://github.com/paudetseis/PlateFlex',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Fortran',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
install_requires=['numpy>=1.15', 'pymc3', 'matplotlib', 'seaborn'],
python_requires='>=3.5',
tests_require=['pytest'],
ext_modules=[ext_cpwt, ext_flex],
packages=['plateflex'],
package_data={
'plateflex': [
'examples/data.zip',
'examples/Notebooks/*.ipynb']
}
)
|
100954
|
from coincurve import PrivateKey
from coincurve import PublicKey
from coincurve import verify_signature
SECPK1_N = (
115792089237316195423570985008687907852837564279074904382605163141518161494337
)
def sign_ecdsa(message, priv):
"""Gets ECDSA signature for message from private key."""
if not isinstance(message, (bytes, bytearray)):
raise TypeError(f"Invalid message")
if len(message) != 32:
raise ValueError(f"Invalid length message: {len(message)} != 32")
if not isinstance(priv, (bytes, bytearray)):
raise TypeError(f"Invalid private key")
if len(priv) != 32:
raise ValueError(f"Invalid length private key: {len(priv)} != 32")
privkey = PrivateKey(priv)
sig_check = privkey.sign_recoverable(message, hasher=None)
return sig_check
def verify_ecdsa(message, signature, pub):
"""Checks if the signature from signEcdsa is correct."""
if not isinstance(message, (bytes, bytearray)):
raise TypeError(f"Invalid message")
if len(message) != 32:
raise ValueError(f"Invalid length message: {len(message)} != 32")
if not isinstance(signature, (bytes, bytearray)):
raise TypeError(f"Invalid signature key")
if len(signature) != 65:
raise ValueError(f"Invalid length signature key: {len(signature)} != 65")
if not isinstance(pub, (bytes, bytearray)):
raise TypeError(f"Invalid signature key")
if len(pub) != 64:
raise ValueError(f"Invalid length signature key: {len(pub)} != 64")
pubkey = PublicKey(b"\x04" + pub)
r = _big_endian_to_int(signature[0:32])
s = _big_endian_to_int(signature[32:64])
low_s = _coerce_low_s(s)
der_encoded_signature = bytes(_two_int_sequence_encoder(r, low_s))
return verify_signature(
der_encoded_signature, message, pubkey.format(compressed=False), hasher=None
)
def recover_ecdsa(message, signature):
"""Gets public key from the message and ECDSA signature."""
if not isinstance(message, (bytes, bytearray)):
raise TypeError(f"Invalid message")
if len(message) != 32:
raise ValueError(f"Invalid length message: {len(message)} != 32")
if not isinstance(signature, (bytes, bytearray)):
raise TypeError(f"Invalid signature key")
if len(signature) != 65:
raise ValueError(f"Invalid length signature key: {len(signature)} != 65")
return PublicKey.from_signature_and_message(signature, message, hasher=None).format(
compressed=False
)[1:]
def _int_to_big_endian(value: int) -> bytes:
return value.to_bytes((value.bit_length() + 7) // 8 or 1, "big")
def _big_endian_to_int(value: bytes) -> int:
return int.from_bytes(value, "big")
def _coerce_low_s(value: int) -> int:
"""Coerce the s component of an ECDSA signature into its low-s form.
See https://bitcoin.stackexchange.com/questions/83408/in-ecdsa-why-is-r-%E2%88%92s-mod-n-complementary-to-r-s # noqa: W501
or https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md.
"""
return min(value, -value % SECPK1_N)
def _encode_int(primitive: int):
# See: https://docs.microsoft.com/en-us/windows/desktop/seccertenroll/about-integer
# Integer tag
yield 0x02
encoded = _int_to_big_endian(primitive)
if encoded[0] >= 128:
# Indicate that integer is positive
yield len(encoded) + 1
yield 0x00
else:
yield len(encoded)
yield from encoded
def _two_int_sequence_encoder(signature_r: int, signature_s: int):
"""
Encode two integers using DER, defined as:
::
ECDSASpec DEFINITIONS ::= BEGIN
ECDSASignature ::= SEQUENCE {
r INTEGER,
s INTEGER
}
END
Only a subset of integers are supported: positive, 32-byte ints.
See: https://docs.microsoft.com/en-us/windows/desktop/seccertenroll/about-sequence
"""
# Sequence tag
yield 0x30
encoded1 = bytes(_encode_int(signature_r))
encoded2 = bytes(_encode_int(signature_s))
# Sequence length
yield len(encoded1) + len(encoded2)
yield from encoded1
yield from encoded2
|
100955
|
import random
import time
import datetime
from common.constants import DATE_ONLY_FORMAT
def tomorrow():
return datetime.datetime.now() + datetime.timedelta(days=1)
def generate_random_date(start, end, format=DATE_ONLY_FORMAT):
etime = time.mktime(time.strptime(end, format))
stime = time.mktime(time.strptime(start, format))
ptime = stime + random.random() * (etime - stime)
return time.strftime(format, time.localtime(ptime))
|
100970
|
arr=input("Enter array elements").split(' ')
arr=[int(x) for x in arr]
for i in range(len(arr)):
for j in range(len(arr)-1-i):
if(arr[j]>arr[j+1]):
arr[j],arr[j+1]=arr[j+1],arr[j]
print("Sorted array is:",arr)
"""
Problem Statement: Sort array using bubble sort technique
Sample Input/Output:
Input: 4 2 5 3 1
Output: 1,2,3,4,5
Time Complexity: O(n^2) (worst)
Space Complexity: O(1)
"""
|
100993
|
import os
import sys
import periphery
from .test import ptest, pokay, passert, AssertRaises
if sys.version_info[0] == 3:
raw_input = input
pwm_chip = None
pwm_channel = None
def test_arguments():
ptest()
# Invalid open types
with AssertRaises("invalid open types", TypeError):
periphery.PWM("foo", 0)
with AssertRaises("invalid open types", TypeError):
periphery.PWM(0, "foo")
def test_open_close():
ptest()
# Open non-existent PWM chip
with AssertRaises("non-existent PWM chip", LookupError):
periphery.PWM(9999, pwm_channel)
# Open non-existent PWM channel
with AssertRaises("non-existent PWM channel", periphery.PWMError):
periphery.PWM(pwm_chip, 9999)
# Open legitimate PWM chip/channel
pwm = periphery.PWM(pwm_chip, pwm_channel)
passert("property chip", pwm.chip == pwm_chip)
passert("property channel", pwm.channel == pwm_channel)
# Initialize period and duty cycle
pwm.period = 5e-3
pwm.duty_cycle = 0
# Set period, check period, check period_ns, check frequency
pwm.period = 1e-3
passert("period is correct", abs(pwm.period - 1e-3) < 1e-4)
passert("period_ns is correct", abs(pwm.period_ns - 1000000) < 1e5)
passert("frequency is correct", abs(pwm.frequency - 1000) < 100)
pwm.period = 5e-4
passert("period is correct", abs(pwm.period - 5e-4) < 1e-5)
passert("period_ns is correct", abs(pwm.period_ns - 500000) < 1e4)
passert("frequency is correct", abs(pwm.frequency - 2000) < 100)
# Set frequency, check frequency, check period, check period_ns
pwm.frequency = 1000
passert("frequency is correct", abs(pwm.frequency - 1000) < 100)
passert("period is correct", abs(pwm.period - 1e-3) < 1e-4)
passert("period_ns is correct", abs(pwm.period_ns - 1000000) < 1e5)
pwm.frequency = 2000
passert("frequency is correct", abs(pwm.frequency - 2000) < 100)
passert("period is correct", abs(pwm.period - 5e-4) < 1e-5)
passert("period_ns is correct", abs(pwm.period_ns - 500000) < 1e4)
# Set period_ns, check period_ns, check period, check frequency
pwm.period_ns = 1000000
passert("period_ns is correct", abs(pwm.period_ns - 1000000) < 1e5)
passert("period is correct", abs(pwm.period - 1e-3) < 1e-4)
passert("frequency is correct", abs(pwm.frequency - 1000) < 100)
pwm.period_ns = 500000
passert("period_ns is correct", abs(pwm.period_ns - 500000) < 1e4)
passert("period is correct", abs(pwm.period - 5e-4) < 1e-5)
passert("frequency is correct", abs(pwm.frequency - 2000) < 100)
pwm.period_ns = 1000000
# Set duty cycle, check duty cycle, check duty_cycle_ns
pwm.duty_cycle = 0.25
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.25) < 1e-3)
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 250000) < 1e4)
pwm.duty_cycle = 0.50
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.50) < 1e-3)
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 500000) < 1e4)
pwm.duty_cycle = 0.75
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.75) < 1e-3)
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 750000) < 1e4)
# Set duty_cycle_ns, check duty_cycle_ns, check duty_cycle
pwm.duty_cycle_ns = 250000
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 250000) < 1e4)
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.25) < 1e-3)
pwm.duty_cycle_ns = 500000
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 500000) < 1e4)
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.50) < 1e-3)
pwm.duty_cycle_ns = 750000
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 750000) < 1e4)
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.75) < 1e-3)
# Set polarity, check polarity
pwm.polarity = "normal"
passert("polarity is normal", pwm.polarity == "normal")
pwm.polarity = "inversed"
passert("polarity is inversed", pwm.polarity == "inversed")
# Set enabled, check enabled
pwm.enabled = True
passert("pwm is enabled", pwm.enabled == True)
pwm.enabled = False
passert("pwm is disabled", pwm.enabled == False)
# Use enable()/disable(), check enabled
pwm.enable()
passert("pwm is enabled", pwm.enabled == True)
pwm.disable()
passert("pwm is disabled", pwm.enabled == False)
# Set invalid polarity
with AssertRaises("set invalid polarity", ValueError):
pwm.polarity = "foo"
pwm.close()
def test_loopback():
ptest()
def test_interactive():
ptest()
pwm = periphery.PWM(pwm_chip, pwm_channel)
print("Starting interactive test. Get out your oscilloscope, buddy!")
raw_input("Press enter to continue...")
# Set initial parameters and enable PWM
pwm.duty_cycle = 0.0
pwm.frequency = 1e3
pwm.polarity = "normal"
pwm.enabled = True
# Check tostring
print("PWM description: {}".format(str(pwm)))
passert("interactive success", raw_input("PWM description looks ok? y/n ") == "y")
# Set 1 kHz frequency, 0.25 duty cycle
pwm.frequency = 1e3
pwm.duty_cycle = 0.25
passert("interactive success", raw_input("Frequency is 1 kHz, duty cycle is 25%? y/n ") == "y")
# Set 1 kHz frequency, 0.50 duty cycle
pwm.frequency = 1e3
pwm.duty_cycle = 0.50
passert("interactive success", raw_input("Frequency is 1 kHz, duty cycle is 50%? y/n ") == "y")
# Set 2 kHz frequency, 0.25 duty cycle
pwm.frequency = 2e3
pwm.duty_cycle = 0.25
passert("interactive success", raw_input("Frequency is 2 kHz, duty cycle is 25%? y/n ") == "y")
# Set 2 kHz frequency, 0.50 duty cycle
pwm.frequency = 2e3
pwm.duty_cycle = 0.50
passert("interactive success", raw_input("Frequency is 2 kHz, duty cycle is 50%? y/n ") == "y")
pwm.duty_cycle = 0.0
pwm.enabled = False
pwm.close()
if __name__ == "__main__":
if os.environ.get("CI") == "true":
test_arguments()
sys.exit(0)
if len(sys.argv) < 3:
print("Usage: python -m tests.test_pwm <PWM chip> <PWM channel>")
print("")
print("[1/4] Arguments test: No requirements.")
print("[2/4] Open/close test: PWM channel should be real.")
print("[3/4] Loopback test: No test.")
print("[4/4] Interactive test: PWM channel should be observed with an oscilloscope or logic analyzer.")
print("")
print("Hint: for Raspberry Pi 3, enable PWM0 and PWM1 with:")
print(" $ echo \"dtoverlay=pwm-2chan,pin=18,func=2,pin2=13,func2=4\" | sudo tee -a /boot/config.txt")
print(" $ sudo reboot")
print("Monitor GPIO 18 (header pin 12), and run this test with:")
print(" python -m tests.test_pwm 0 0")
print("or, monitor GPIO 13 (header pin 33), and run this test with:")
print(" python -m tests.test_pwm 0 1")
print("")
sys.exit(1)
pwm_chip = int(sys.argv[1])
pwm_channel = int(sys.argv[2])
test_arguments()
pokay("Arguments test passed.")
test_open_close()
pokay("Open/close test passed.")
test_loopback()
pokay("Loopback test passed.")
test_interactive()
pokay("Interactive test passed.")
pokay("All tests passed!")
|
101001
|
from django.contrib.auth.models import Group
from django.db.models import signals
from dms_plugins.pluginpoints import BeforeStoragePluginPoint, BeforeRetrievalPluginPoint, BeforeRemovalPluginPoint
from dms_plugins.workers import Plugin, PluginError
SECURITY_GROUP_NAME = 'security'
class GroupSecurityStore(Plugin, BeforeStoragePluginPoint):
title = 'Security Group on storage'
description = 'Security group member only [storage]'
plugin_type = 'security'
def work(self, document):
return GroupSecurity().work(document)
class GroupSecurityRetrieval(Plugin, BeforeRetrievalPluginPoint):
title = 'Security Group on retrieval'
description = 'Security group member only [retrieval]'
plugin_type = 'security'
def work(self, document):
return GroupSecurity().work(document)
class GroupSecurityRemoval(Plugin, BeforeRemovalPluginPoint):
title = 'Security Group on removal'
description = 'Security group member only [removal]'
plugin_type = 'security'
def work(self, document):
return GroupSecurity().work(document)
class GroupSecurity(object):
def work(self, document):
user = document.user
if not user:
raise PluginError("Not a logged in user.", 403)
security_group, created = Group.objects.get_or_create(name=SECURITY_GROUP_NAME)
if not security_group in user.groups.all() and not user.is_superuser:
raise PluginError("You're not in %s group" % SECURITY_GROUP_NAME, 403)
return document
def create_security_group(**kwargs):
"""Create user groups required for processing of security in MUI"""
Group.objects.get_or_create(name=SECURITY_GROUP_NAME)
# Attached this to recreate group for each syncdb
signals.post_syncdb.connect(create_security_group)
|
101022
|
from lk_utils import relpath
from rich_click import Path
from ._vendor.rich_click_ext import click
@click.grp(help='PyPortable Installer command line interface.')
def cli():
pass
@click.cmd()
@click.arg('directory', default='.', type=Path())
def init(directory='.'):
"""
Initialize project with template config file.
args:
directory:
Create a [cyan]"pyproject.json"[/] under this directory.
If directory not exists, will create it.
If directory parameter is not given, will use [magenta]current
directory[/].
"""
import os
from shutil import copyfile
file_i = relpath('./template/pyproject.json')
file_o = f'{directory}/pyproject.json'
if not os.path.exists(directory):
os.mkdir(directory)
elif os.path.exists(file_o):
os.remove(file_o)
copyfile(file_i, file_o)
@click.cmd()
@click.arg('pyproject-file', default='./pyproject.json', type=Path(exists=True))
def build(pyproject_file='./pyproject.json'):
"""
Start building application from pyproject config file.
args:
pyproject-file:
Choose a pyproject config file. Accepts ".json", ".yaml", ".toml"
formats.
If parameter is not given, will use [magenta]"./pyproject.json"[/]
as default.
"""
from .main import full_build
full_build(pyproject_file)
@click.cmd()
def gui():
"""
Launch PyPortable Installer GUI. [dim](experimental feature)[/]
"""
from .user_interface import gui_on_psg
gui_on_psg.main()
if __name__ == '__main__':
cli()
|
101025
|
from toee import *
def OnBeginSpellCast(spell):
print "Ironthunder Horn OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect(spell):
print "Ironthunder Horn OnSpellEffect"
targetsToRemove = []
spell.duration = 0
game.particles('sp-Ironthunder Horn', spell.caster)
for spellTarget in spell.target_list:
#Saving Throw to negate
if spellTarget.obj.saving_throw_spell(spell.dc, D20_Save_Reflex, D20STD_F_NONE, spell.caster, spell.id): #success
spellTarget.obj.float_mesfile_line('mes\\spell.mes', 30001)
game.particles('Fizzle', spellTarget.obj)
else:
spellTarget.partsys_id = game.particles('sp-Shout-Hit', spellTarget.obj)
spellTarget.obj.fall_down()
spellTarget.obj.condition_add("Prone")
spellTarget.obj.float_mesfile_line('mes\\combat.mes', 18, 1) #ID18: Knockdown message
targetsToRemove.append(spellTarget.obj)
spell.target_list.remove_list(targetsToRemove)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Ironthunder Horn OnBeginRound"
def OnEndSpellCast(spell):
print "Ironthunder Horn OnEndSpellCast"
|
101031
|
def perfect_square(x):
if (x == 0 or x == 1):
return x
i = 1
result = 1
while (result <= x):
i += 1
result = i * i
return i - 1
x = int(input('Enter no.'))
print(perfect_square(x))
|
101072
|
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
l1 = [int(s) for s in version1.split(".")]
l2 = [int(s) for s in version2.split(".")]
len1, len2 = len(l1), len(l2)
if len1 > len2:
l2 += [0] * (len1 - len2)
elif len1 < len2:
l1 += [0] * (len2 - len1)
return (l1 > l2) - (l1 < l2)
|
101140
|
import os
import unittest
import numpy as np
from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.mesh_utils.mass_properties import (
mass_properties, mass_properties_nsm) #mass_properties_breakdown
from pyNastran.bdf.cards.test.utils import save_load_deck
from pyNastran.bdf.mesh_utils.loads import sum_forces_moments, sum_forces_moments_elements
class TestBars(unittest.TestCase):
"""test CBAR/PBAR/PBARL classes"""
def test_pbar_1(self):
"""tests the PBAR BDF add"""
area = 0.0
i11 = 4.9e-2
i22 = 5.5e-2
i12 = 6.6e-2
j = 7.7e-2
nsm = 1.0
fields = [
u'PBAR', 1510998, 1520998, area, i11,
i22, j, nsm, None, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, None, None, i12,
]
card = print_card_8(fields)
#print(card)
card = print_card_8(fields)
lines = card.split('\n')
model = BDF(debug=False)
card = model._process_card(lines)
cardi = BDFCard(card)
pbar = PBAR.add_card(cardi)
pbar.raw_fields()
self.assertEqual(pbar.A, area)
self.assertEqual(pbar.i1, i11)
self.assertEqual(pbar.i2, i22)
self.assertEqual(pbar.i12, i12)
self.assertEqual(pbar.j, j)
self.assertEqual(pbar.k1, None)
self.assertEqual(pbar.k2, None)
self.assertEqual(pbar.nsm, nsm)
assert np.allclose(pbar.Area(), area)
assert np.allclose(pbar.I11(), i11)
assert np.allclose(pbar.I22(), i22)
assert np.allclose(pbar.I12(), i12)
assert np.allclose(pbar.J(), j)
assert np.allclose(pbar.Nsm(), nsm)
def test_pbar_2(self):
"""tests the PBAR BDF add"""
pid = 1
mid = 2
A = None
I1 = I2 = None
J = None
nsm = None
c1 = c2 = d1 = d2 = e1 = e2 = f1 = f2 = None
k1 = k2 = None
i12 = 3.
fields = [
'PBAR', pid, mid, A, I1, I2, J, nsm, None,
c1, c2, d1, d2, e1, e2, f1, f2,
k1, k2, i12
]
card = print_card_8(fields)
lines = card.split('\n')
model = BDF(debug=False)
card = model._process_card(lines)
cardi = BDFCard(card)
pbar = PBAR.add_card(cardi)
self.assertEqual(pbar.pid, 1)
self.assertEqual(pbar.mid, 2)
self.assertEqual(pbar.A, 0.0)
self.assertEqual(pbar.i1, 0.0)
self.assertEqual(pbar.i2, 0.0)
self.assertEqual(pbar.j, 0.0)
self.assertEqual(pbar.nsm, 0.0)
self.assertEqual(pbar.i12, 3.0)
self.assertEqual(pbar.c1, 0.0)
self.assertEqual(pbar.c2, 0.0)
self.assertEqual(pbar.d1, 0.0)
self.assertEqual(pbar.d2, 0.0)
self.assertEqual(pbar.e1, 0.0)
self.assertEqual(pbar.e2, 0.0)
self.assertEqual(pbar.k1, None)
self.assertEqual(pbar.k2, None)
#--------------------------------------------------------
A = 6.
I1 = 5.
I2 = 4.
J = 3.
nsm = 2.
c1 = c2 = d1 = d2 = e1 = e2 = f1 = f2 = None
k1 = k2 = 1e2
i12 = 0.
fields = [
'PBAR', pid, mid, A, I1, I2, J, nsm, None,
c1, c2, d1, d2, e1, e2, f1, f2,
k1, k2, i12]
card = print_card_8(fields)
lines = card.split('\n')
model = BDF(debug=False)
card = model._process_card(lines)
cardi = BDFCard(card)
pbar = PBAR.add_card(cardi)
self.assertEqual(pbar.pid, 1)
self.assertEqual(pbar.mid, 2)
self.assertEqual(pbar.A, 6.0)
self.assertEqual(pbar.i1, 5.0)
self.assertEqual(pbar.i2, 4.0)
self.assertEqual(pbar.j, 3.0)
self.assertEqual(pbar.nsm, 2.0)
self.assertEqual(pbar.i12, 0.0)
self.assertEqual(pbar.c1, 0.0)
self.assertEqual(pbar.c2, 0.0)
self.assertEqual(pbar.d1, 0.0)
self.assertEqual(pbar.d2, 0.0)
self.assertEqual(pbar.e1, 0.0)
self.assertEqual(pbar.e2, 0.0)
self.assertEqual(pbar.k1, 1e2)
self.assertEqual(pbar.k2, 1e2)
def test_pbar_3(self):
"""tests the PBAR validate"""
pid = 42
mid = 10
i1 = -1.
i2 = -2.
i12 = -3.
j = -4.
pbar = PBAR(pid, mid, A=0., i1=i1, i2=i2, i12=i12, j=j, nsm=0., c1=0., c2=0.,
d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8,
k2=1.e8, comment='pbar')
with self.assertRaises(ValueError):
pbar.validate()
pbar.i1 = 1.
with self.assertRaises(ValueError):
pbar.validate()
pbar.i2 = 2.
with self.assertRaises(ValueError):
pbar.validate()
pbar.j = 4.
pbar.validate()
model = BDF(debug=False)
pbar = model.add_pbar(pid, mid, A=0., i1=2., i2=2., i12=1., j=4., nsm=0., c1=0., c2=0.,
d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8,
k2=1.e8, comment='pbar')
pbar.validate()
nids = [100, 101]
eid = 1000
x = [0., 0., 1.]
g0 = None
model.add_cbar(eid, pid, nids, x, g0, comment='cbar')
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [1., 0., 0.])
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
save_load_deck(model)
def test_cbar_g0(self):
"""modification of test_cbeam_01"""
model = BDF(debug=False)
pid = 200
mid = 6
model.add_pbar(pid, mid, A=0., i1=2., i2=2., i12=1., j=4., nsm=0., c1=0., c2=0.,
d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8,
k2=1.e8, comment='pbar')
eid = 100
nids = [10, 20]
x = None
g0 = 30
cbar = model.add_cbar(eid, pid, nids, x, g0, comment='cbar')
cbar.write_card_16(is_double=False)
E = 1.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
model.add_grid(10, [0., 0., 0.])
model.add_grid(20, [0., 1., 0.])
model.add_grid(30, [0., 2., 0.])
model.cross_reference()
save_load_deck(model)
def test_pbarl_1(self):
"""tests the PBARL"""
model = BDF(log=None, debug=False)
pid = 4
mid = 40
group = 'group'
Type = 'bad_type'
dim = 42
nsm = 0.5
pbarl = PBARL(pid, mid, Type, dim, group=group, nsm=nsm, comment='comment')
with self.assertRaises(ValueError): # Type
pbarl.validate()
pbarl.Type = 'TUBE'
with self.assertRaises(TypeError): # dim
pbarl.validate()
pbarl.dim = [20.]
with self.assertRaises(RuntimeError):
pbarl.validate()
pbarl.dim = [2., 1.]
#with self.assertRaises(ValueError):
#pbarl.validate()
#pbarl.group = 'MSCBML0'
pbarl.validate()
str(pbarl)
pbarl.write_card(size=8, is_double=False)
pbarl.write_card(size=16, is_double=False)
pbarl.write_card(size=16, is_double=True)
model.properties[pid] = pbarl
nid1 = 52
xyz1 = [0., 0., 0.]
model.nodes[nid1] = GRID(nid1, cp=0, xyz=xyz1)
nid2 = 53
xyz2 = [1., 0., 0.]
model.nodes[nid2] = GRID(nid2, cp=0, xyz=xyz2)
E = 30.0e7
G = None
nu = 0.3
mat = MAT1(mid, E, G, nu, rho=1.0)
model.materials[mid] = mat
eid = 42
x = None
g0 = None
cbar = CBAR(eid, pid, [nid1, nid2], x, g0, offt='GGG',
pa=0, pb=0, wa=None, wb=None, comment='')
with self.assertRaises(ValueError):
cbar.validate()
cbar.x = [0., 1., 2.]
cbar.validate()
model.elements[eid] = cbar
pbarl._verify(xref=False)
model.validate()
model.cross_reference()
pbarl._verify(xref=True)
assert np.allclose(cbar.Mass(), 9.9247779608), cbar.Mass()
mat.rho = 0.
assert np.allclose(cbar.Mass(), 0.5), cbar.Mass()
scale = 'FR'
x = [0.2, 0.4, 0.6, 0.8]
model.add_cbarao(eid, scale, x, comment='cbarao')
model.add_card(['CBARAO', eid+1, 'RF', 6, 0.1, 0.2], 'CBARAO')
save_load_deck(model, run_quality=False, run_test_bdf=False)
def test_bar_mass_1(self):
"""tests CBAR/PBAR mass"""
model = BDF(debug=False)
#model.case_control_deck = CaseControlDeck(case_control_lines)
spc = ['SPC1', 123456, 123456, 1]
grid1 = ['GRID', 1, None, 0., 0., 0.]
grid2 = ['GRID', 2, None, 1., 0., 0.]
#grid3 = ['GRID', 3, None, 1., 0., 0.]
force = ['FORCE', 100, 1, 0, 2., 3., 4.]
pid = 11
mid = 12
cbar = [
'CBAR', 10, pid, 1, 2, 0., 1., 0., None,
]
k1 = k2 = None
area = 2.0
rho = 3.
nu = 0.3
i1 = 2.1
i2 = 1.2
i12 = 0.1
j = None
nsm = 0.1
pbar = [
'PBAR', pid, mid, area, i1, i2, j, nsm,
None, None, None, None, None, None, None, None,
k1, k2, i12
]
mat1 = ['MAT1', mid, 3.0e7, None, nu, rho]
model.add_card(grid1, 'GRID')
model.add_card(grid2, 'GRID')
model.add_card(cbar, 'CBAR')
model.add_card(pbar, 'PBAR')
model.add_card(mat1, 'MAT1')
model.add_card(spc, 'SPC1')
model.add_card(force, 'FORCE')
model.validate()
model.cross_reference()
mass, unused_cg, unused_I = mass_properties(
model,
element_ids=None, mass_ids=None,
reference_point=None,
sym_axis=None,
scale=None)
#print('cg* =', cg)
L = 1.0
mass_per_length = area * rho + nsm
mass = L * mass_per_length
#xcg = (0.0 * mass_a + 1.0 * mass_b) / (mass_a + mass_b)
#print(mass_a, mass_b, xcg, mass_a + mass_b)
#print('mass =', mass)
#cbar = CBEAM()
cbar = model.elements[10]
pbar = model.properties[11]
assert pbar.Nu() == nu, 'pbar.Nu()=%s nu=%s' % (pbar.Nu(), nu)
assert pbar.Rho() == rho, 'pbar.Rho()=%s rho=%s' % (pbar.Rho(), rho)
assert np.allclose(cbar.Length(), 1.0), cbar.Length()
#assert np.allclose(cbar.Mass(), 10.25), cbar.Mass()
#assert np.allclose(cbar.MassPerLength(), 10.25), cbar.MassPerLength()
#assert np.allclose(mass, 10.25), mass
case_control_lines = (
'SOL 101\n'
'CEND\n'
'SUBCASE 1\n'
' STRESS(PLOT,SORT1,REAL) = ALL\n'
' SPC = 123456\n'
' LOAD = 100\n'
'BEGIN BULK\n'
'PARAM,GRDPNT,0\n'
'PARAM,POST,-1\n'
'PARAM POSTEXT YES\n'
)
with open('cbar.bdf', 'w') as bdf_file:
bdf_file.write(case_control_lines)
model.write_bdf(bdf_file, enddata=True)
model2 = BDF(debug=False)
model2.read_bdf('cbar.bdf')
model2._verify_bdf(xref=True)
if not os.path.exists('cbar.op2') and 0:
os.system('nastran scr=yes bat=no old=no cbar.bdf')
os.remove('cbar.bdf')
if 0: # pragma: no cover
from pyNastran.op2.op2 import OP2
op2 = OP2()
op2.read_op2('cbar.op2')
#os.remove('cbar.op2')
gpw = op2.grid_point_weight
op2_mass = gpw.mass.max()
assert np.allclose(op2_mass, mass), 'op2_mass=%s mass=%s' % (op2_mass, mass)
#print('op2_mass=%s mass=%s' % (op2_mass, mass))
unused_op2_cg = gpw.cg
unused_cg = np.array([0.5, 0., 0.], dtype='float32')
#print('cg =', op2_cg)
def test_bar_mass_2(self):
"""CBAR/PBARL"""
model = BDF(debug=False)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [0., 1., 0.])
mid = 1
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=1.)
#---------------------------------------------------------------
eid = 1
pid = 101
nids = [1, 2]
x = [0., 0., 1.]
g0 = None
unused_cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG',
pa=0, pb=0, wa=None, wb=None,
comment='CBAR')
Type = 'BOX'
dim = [1., 2., 0.1, 0.1]
#pbeaml = model.add_pbeaml(pid, mid, Type, xxb, dims, nsm=None,
#so=None, comment='PBEAML')
unused_pbarl = model.add_pbarl(pid, mid, Type, dim, group='MSCBML0', nsm=0.,
comment='PBARL')
#---------------------------------------------------------------
eid = 2
pid = 102
x = None
g0 = 3
unused_cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG',
pa=0, pb=0, wa=None, wb=None,
comment='CBAR')
Type = 'BOX'
dim = [1., 2., 0.1, 0.1]
unused_pbarl = model.add_pbarl(pid, mid, Type, dim, group='MSCBML0', nsm=0.,
comment='PBARL')
#---------------------------------------------------------------
eid = 3
pid = 103
#cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG',
#pa=42, pb=5, wa=None, wb=None,
#comment='CBAR')
unused_pbar = model.add_pbar(pid, mid, A=1., i1=0., i2=0., i12=0., j=0., nsm=0.1,
c1=0., c2=0.,
d1=0., d2=0.,
e1=0., e2=0.,
f1=0., f2=0.,
k1=1.e8, k2=1.e8,
comment='pbar')
#G = 3.0e7
#E = None
#nu = 0.3
#model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0,
#St=0.0, Sc=0.0, Ss=0.0, mcsid=0,
#comment='')
#---------------------------------------------------------------
model.validate()
model.pop_parse_errors()
model._verify_bdf(xref=False)
model.cross_reference()
model.pop_xref_errors()
model._verify_bdf(xref=True)
model.uncross_reference()
def test_pbar_nsm(self):
model = BDF(debug=False)
pid = 1
mid = 1
nsm = 1.
area = 2.0
pbar = model.add_pbar(pid, mid, A=area, i1=0., i2=0., i12=0., j=0.,
nsm=nsm,
c1=0., c2=0., d1=0., d2=0.,
e1=0., e2=0., f1=0., f2=0.,
k1=1.e8, k2=1.e8,
comment='')
E = 1.0
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
#----------------
card_lines = [
'PBAR 2 1 2. 1.',
]
model.add_card(card_lines, 'PBAR', comment='', is_list=False,
has_none=True)
pbar2 = model.properties[2]
#------------------
model.cross_reference()
assert pbar.Nsm() == 1.0
assert pbar.Area() == 2.0
# mass/L = area*rho + nsm
assert pbar.MassPerLength() == 1.0
# area = 2.0
mat1.rho = 10.0
assert pbar.MassPerLength() == 21.0, pbar.MassPerLength()
assert pbar2.MassPerLength() == 21.0, pbar2.MassPerLength()
def test_pbarl_nsm(self):
model = BDF(debug=False)
pid = 1
mid = 1
bar_type = 'BAR'
dim = [1., 2.] # area = 2.0
pbarl = model.add_pbarl(pid, mid, bar_type, dim, group='MSCBML0', nsm=1.,
comment='')
E = 1.0
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
#----------------
card_lines = [
'PBARL 2 1 BAR',
' 1.0 2.0 1.0',
]
model.add_card(card_lines, 'PBARL', comment='', is_list=False,
has_none=True)
pbarl2 = model.properties[2]
#------------------
model.cross_reference()
assert pbarl.Nsm() == 1.0
assert pbarl.Area() == 2.0
# mass/L = area*rho + nsm
assert pbarl.MassPerLength() == 1.0
# area = 2.0
mat1.rho = 10.0
assert pbarl.MassPerLength() == 21.0, pbarl.MassPerLength()
assert pbarl2.MassPerLength() == 21.0, pbarl2.MassPerLength()
loadcase_id = 10
eid = 11
load_type = 'FZ'
x1 = 0.
x2 = None
p1 = 10.
scale = 'FR'
model.add_pload1(loadcase_id, eid, load_type, scale, x1, p1,
x2=x2, p2=None, comment='pload1')
scale = 'LE'
model.add_pload1(loadcase_id, eid, load_type, scale, x1, p1,
x2=x2, p2=None, comment='')
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [0., 1., 0.])
x = None
g0 = 3
model.add_cbar(eid, pid, [1, 2], x, g0)
model.cross_reference()
p0 = 1
eids = None
nids = None
force1, moment1 = sum_forces_moments(model, p0, loadcase_id,
include_grav=False, xyz_cid0=None)
force2, moment2 = sum_forces_moments_elements(model, p0, loadcase_id, eids, nids,
include_grav=False, xyz_cid0=None)
#print(force1, force2)
assert np.allclose(force1, force2), force1
assert np.allclose(moment1, moment2), moment1
save_load_deck(model, xref='standard', punch=True)
def test_baror(self):
"""tests a BAROR"""
model = BDF(debug=False)
n1 = 10
n2 = 20
model.add_grid(n1, [0., 0., 0.])
model.add_grid(n2, [1., 0., 0.])
pid = 2
mid = 1
bar_type = 'BAR'
dim = [1., 2.] # area = 2.0
unused_pbarl = model.add_pbarl(pid, mid, bar_type, dim, group='MSCBML0', nsm=1.,
comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=1.)
card_lines = ['BAROR', None, pid, None, None, 0.6, 2.9, -5.87, 'GOG']
model.add_card(card_lines, 'BAROR', comment='BAROR', is_list=True,
has_none=True)
eid = 1
card_lines = ['CBAR', eid, pid, n1, n2]
model.add_card(card_lines, 'CBAR', comment='', is_list=True, has_none=True)
model.pop_parse_errors()
save_load_deck(model)
def test_baror_2(self):
model = BDF(debug=False)
pid = 12
is_g0 = True
g0 = 42
x = None
baror = model.add_baror(pid, is_g0, g0, x, offt='GGG', comment='baror')
baror.raw_fields()
baror.write_card(size=8)
baror.write_card(size=16)
save_load_deck(model)
def test_cbend(self):
"""tests a CBEND"""
model = BDF(debug=False)
eid = 7
pid = 10
nids = [2, 3]
g0 = 5
x = None
geom = 1
cbend = model.add_cbend(eid, pid, nids, g0, x, geom, comment='cbend')
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.add_grid(5, [0., 0., 0.])
#pbend = model.add_pbend(pid, mid, beam_type, A, i1, i2, j,
#c1, c2, d1, d2, e1, e2, f1, f2,
#k1, k2, nsm, rc, zc, delta_n, fsi,
#rm, t, p, rb, theta_b, comment='')
cbend.validate()
cbend.raw_fields()
cbend.write_card()
cbend.write_card(size=16)
model.validate()
model._verify_bdf(xref=False)
model.pop_parse_errors()
#model.cross_reference()
#model.pop_xref_errors()
#model._verify_bdf(xref=True)
#model.uncross_reference()
def test_cbeam3(self):
"""tests a CBEAM3"""
model = BDF(debug=False)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.add_grid(4, [0., 0., 0.])
eid = 1
pid = 2
nids = [1, 2, 3]
x = None
g0 = 4
cbeam3 = model.add_cbeam3(eid, pid, nids, x, g0, wa=None, wb=None, wc=None, tw=None, s=None, comment='cbeam3')
cbeam3.raw_fields()
A = 1.
iz = 2.
iy = 3.
mid = 4
pbeam3 = model.add_pbeam3(pid, mid, A, iz, iy, iyz=0., j=None,
nsm=0., cy=0., cz=0., dy=0., dz=0., ey=0., ez=0., fy=0., fz=0., comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=0.1)
str(cbeam3)
pbeam3s = str(pbeam3)
#print(pbeam3s)
str(pbeam3s)
card_lines = pbeam3s.split('\n')
cbeam3._verify(xref=False)
model.cross_reference()
model.uncross_reference()
del model.properties[pid]
model.cards_to_read.add('PBEAM3')
model.add_card(card_lines, 'PBEAM3', comment='', ifile=None, is_list=False, has_none=True)
model.pop_parse_errors()
model.pop_xref_errors()
assert pbeam3 == model.properties[pid]
def test_bar_area(self):
"""tests the PBARL"""
model = BDF(log=None, debug=False)
mid = 40
group = 'group'
nsm = 0.0
shape_dims_area = [
# name, dims, area, i1
('ROD', [2.], 4. * np.pi, 0.),
('TUBE', [5., 1.], 24. * np.pi, 0.),
('BAR', [2., 3.], 6., 0.),
('BOX', [2., 3., 0.5, 0.5], 4., 0.),
('L', [2., 3., 1., 1.], 4., 0.),
('CHAN', [10., 10., 1., 1.], 28., None),
('CHAN1', [9., 0.1, 8., 10.], 19., None),
('CHAN2', [1, 1., 9., 10.], 26., None),
# new
('I', [1., 1., 1., 0.1, 0.1, 0.1], 0.28, None),
('I1', [0.1, 1., 0.5, 1.], 1.05, None),
('H', [1.0, 0.1, 1.0, 0.1], 0.2, None),
('Z', [0.5, 0.5, 0.5, 1.], 0.75, None),
('Z', [0.8, 0.5, 0.5, 1.], 0.90, None),
('Z', [0.5, 0.8, 0.5, 1.], 1.05, None),
('Z', [0.5, 0.5, 0.8, 1.], 0.60, None),
('Z', [0.5, 0.5, 0.5, 2.], 1.75, None),
('CHAN', [1., 1., 0.1, 0.1], 0.28, None),
('CHAN1', [0.5, 0.5, 0.5, 1.], 0.75, None),
('CHAN2', [0.1, 0.1, 1., 1.], 0.28, None),
('CROSS', [0.1, 0.1, 1., 0.1], 0.11, None),
('HEXA', [0.1, 1., 1.], 0.90, None),
('HEXA', [0.2, 1., 1.], 0.80, None),
('HEXA', [0.1, 2., 1.], 1.90, None),
('HEXA', [0.1, 1., 2.], 1.80, None),
('HAT', [1., 0.1, 1., 0.1], 0.30, None),
('HAT', [2., 0.1, 1., 0.1], 0.50, None),
('HAT', [1., 0.2, 1., 0.1], 0.56, None),
('HAT', [1., 0.1, 2., 0.1], 0.40, None),
('HAT', [1., 0.1, 1., 0.2], 0.32, None),
('HAT1', [3., 1., 1., 0.1, 0.1], 0.76, None),
('HAT1', [3., 2., 1., 0.1, 0.1], 0.96, None),
('HAT1', [3., 1., 2., 0.1, 0.1], 0.76, None),
('HAT1', [3., 1., 1., 0.2, 0.1], 1.18, None),
('HAT1', [3., 1., 1., 0.1, 0.2], 1.04, None),
('T', [10., 10., 3., 0.5], 33.5, None),
('T2', [10., 5., 0.5, 2.0], 14., None), # ball,hall,tflange,tweb
('T', [1., 1., 0.1, 0.1], 0.19, None),
('T1', [1., 1., 0.1, 0.1], 0.20, None),
('T2', [1., 1., 0.1, 0.1], 0.19, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.64, None),
('DBOX', [2., 2., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.94, None),
('DBOX', [2., 1., 2., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.64, None),
('DBOX', [2., 1., 1., 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.72, None),
('DBOX', [2., 1., 1., 0.1, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.72, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.1, ], 0.72, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, ], 0.725, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, ], 0.725, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, ], 0.725, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, ], 0.725, None),
]
pid = 1
for bar_type, dims, areai, i1 in shape_dims_area:
pbarl = PBARL(pid, mid, bar_type, dims, group=group, nsm=nsm, comment='comment')
pbarl.validate()
area2 = pbarl.Area()
if i1 is not None:
pbarl.I1()
pbarl.I2()
pbarl.I12()
assert np.allclose(areai, area2), 'bar_type=%r dims=%s area=%s area_expected=%s' % (bar_type, dims, area2, areai)
pid += 1
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
101145
|
from moviepy.editor import VideoFileClip
from davg.lanefinding.Pipeline import Pipeline
left_line = None
right_line = None
pipeline = Pipeline()
def lane_line_diag(img):
global left_line, right_line, pipeline
screen, left_line, right_line = pipeline.visualize_lanes_using_diagnostic_screen(img, left_line, right_line)
return screen
def writeout_lane_finding_video_with_diag_screen(src, dst, start=0, end=0):
clip = VideoFileClip(src).subclip(start, end)
diag_clip = clip.fl_image( lane_line_diag )
diag_clip.write_videofile(dst)
# UNCOMMENT TO RUN
writeout_lane_finding_video_with_diag_screen('video/stream1.mpg', 'video/stream1_test-20180304.mp4', start=0, end=None)
|
101150
|
import requests
class RapidProClient:
def __init__(self, thread):
self.thread = thread
def send_reply(self, text):
response = requests.get(url=self.thread.chatbot.request_url, params={
'from': self.thread.uuid,
'text': text,
}, timeout=10)
return response
|
101151
|
prompt = """Translate to French (fr)
From English (es)
==========
Bramshott is a village with mediaeval origins in the East Hampshire district of Hampshire, England. It lies 0.9 miles (1.4 km) north of Liphook. The nearest railway station, Liphook, is 1.3 miles (2.1 km) south of the village.
----------
Bramshott est un village avec des origines médiévales dans le quartier East Hampshire de Hampshire, en Angleterre. Il se trouve à 0,9 miles (1,4 km) au nord de Liphook. La gare la plus proche, Liphook, est à 1,3 km (2,1 km) au sud du village.
==========
Translate to Russian (rs)
From German (de)
==========
Der Gewöhnliche Strandhafer (Ammophila arenaria (L.) Link; Syn: Calamagrostis arenaria (L.) Roth) – auch als Gemeiner Strandhafer, Sandrohr, Sandhalm, Seehafer oder Helm (niederdeutsch) bezeichnet – ist eine zur Familie der Süßgräser (Poaceae) gehörige Pionierpflanze.
----------
Обычная пляжная овсянка (аммофила ареалия (л.) соединение; сина: каламагростисная анария (л.) Рот, также называемая обычной пляжной овцой, песчаной, сандалмой, морской орой или шлемом (нижний немецкий) - это кукольная станция, принадлежащая семье сладких трав (поа).
==========
"""
def generate_prompt(text, from_name, from_code, to_name, to_code):
# TODO: document
to_return = prompt
to_return += "Translate to "
if from_name:
to_return += from_name
if from_code:
to_return += " (" + from_code + ")"
to_return += "\nFrom "
if to_name:
to_return += to_name
if from_code:
to_return += " (" + to_code + ")"
to_return += "\n" + "=" * 10 + "\n"
to_return += text
to_return += "\n" + "-" * 10 + "\n"
return to_return
def parse_inference(output):
end_index = output.find("=" * 10)
if end_index != -1:
return output[end_index]
end_index = output.find("-" * 10)
if end_index != -1:
return output[end_index]
return output
|
101191
|
import os
from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole
from aztk.models.plugins.plugin_file import PluginFile
dir_path = os.path.dirname(os.path.realpath(__file__))
def JupyterPlugin():
return PluginConfiguration(
name="jupyter",
ports=[PluginPort(internal=8888, public=True)],
target_role=PluginTargetRole.All,
execute="jupyter.sh",
files=[PluginFile("jupyter.sh", os.path.join(dir_path, "jupyter.sh"))],
)
|
101233
|
import json, re
ontologies = ['ontocompchem', 'ontokin', 'wiki']
def process_puncutation(string):
# Load the regular expression library
# Remove punctuation
string_temp = re.sub('[-\n,.!?()\[\]0-9]', '', string)
# Convert the titles to lowercase
string_temp = string_temp.lower()
# Print out the first rows of papers
return string_temp
arrays = []
for o in ontologies:
f_name = '%s_corpus' % o
with open(f_name) as f:
content = json.loads(f.read())
content = [process_puncutation(x) for x in content]
arrays.append(content)
with open('corpus', 'w') as f:
f.write(json.dumps(arrays))
|
101244
|
import pytest
from valid8.validation_lib import gt, gts, lt, lts, between, NotInRange, TooSmall, TooBig
def test_gt():
""" tests that the gt() function works """
assert gt(1)(1)
with pytest.raises(TooSmall):
gt(-1)(-1.1)
def test_gts():
""" tests that the gts() function works """
with pytest.raises(TooSmall):
gts(1)(1)
assert gts(-1)(-0.9)
def test_lt():
""" tests that the lt() function works """
assert lt(1)(1)
with pytest.raises(TooBig):
lt(-1)(-0.9)
def test_lts():
""" tests that the lts() function works """
with pytest.raises(TooBig):
lts(1)(1)
assert lts(-1)(-1.1)
def test_between():
""" tests that the between() function works """
assert between(0, 1)(0)
assert between(0, 1)(1)
with pytest.raises(NotInRange):
between(0, 1)(-0.1)
with pytest.raises(NotInRange):
between(0, 1)(1.1)
def test_numpy_nan():
""" Test that a numpy nan is correctly handled """
import numpy as np
with pytest.raises(TooSmall) as exc_info:
gt(5.1)(np.nan)
with pytest.raises(TooBig) as exc_info:
lt(5.1)(np.nan)
with pytest.raises(NotInRange) as exc_info:
between(5.1, 5.2)(np.nan)
|
101254
|
import tensorflow as tf
from utils.bert import bert_utils
from loss import loss_utils
from utils.bert import albert_modules
from metric import tf_metrics
def classifier(config, seq_output,
input_ids,
sampled_ids,
input_mask,
num_labels,
dropout_prob,
**kargs):
"""
input_ids: original input ids
sampled_ids: generated fake ids
"""
output_layer = seq_output
hidden_size = output_layer.shape[-1].value
unk_mask = tf.cast(tf.math.equal(input_ids, 100), tf.float32) # not replace unk
cls_mask = tf.cast(tf.math.equal(input_ids, 101), tf.float32) # not replace cls
sep_mask = tf.cast(tf.math.equal(input_ids, 102), tf.float32) # not replace sep
none_replace_mask = unk_mask + cls_mask + sep_mask
input_mask = tf.cast(input_mask, tf.int32)
input_mask *= tf.cast(1 - none_replace_mask, tf.int32) # cls, unk, sep are not considered as replace or original
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
if config.get('ln_type', 'postln') == 'preln':
output_layer = albert_modules.layer_norm(output_layer)
print('====preln transformer====')
elif config.get('ln_type', 'postln') == 'postln':
output_layer = output_layer
print('====postln transformer====')
else:
output_layer = output_layer
print('====no layer layer_norm====')
output_layer = tf.nn.dropout(output_layer, keep_prob=1 - dropout_prob)
logits = tf.einsum("abc,dc->abd", output_layer, output_weights)
logits = tf.nn.bias_add(logits, output_bias) # batch x seq_length x 2
input_ids = tf.cast(input_ids, tf.int32)
input_shape_list = bert_utils.get_shape_list(sampled_ids, expected_rank=[2,3])
if len(input_shape_list) == 3:
tmp_sampled_ids = tf.argmax(sampled_ids, axis=-1) # batch x seq x vocab
tmp_sampled_ids = tf.cast(tmp_sampled_ids, tf.int32)
tf.logging.info("****** gumbel 3-D sampled_ids *******")
elif len(input_shape_list) == 2:
tmp_sampled_ids = sampled_ids
tmp_sampled_ids = tf.cast(tmp_sampled_ids, tf.int32)
tf.logging.info("****** normal 2-D sampled_ids *******")
ori_sampled_ids = kargs.get('ori_sampled_ids', None)
if ori_sampled_ids is not None:
input_shape_list = bert_utils.get_shape_list(ori_sampled_ids, expected_rank=[2,3])
if len(input_shape_list) == 3:
tmp_ori_sampled_ids = tf.argmax(ori_sampled_ids, axis=-1) # batch x seq x vocab
tmp_ori_sampled_ids = tf.cast(tmp_sampled_ori_ids, tf.int32)
tf.logging.info("****** gumbel 3-D sampled_ids *******")
elif len(input_shape_list) == 2:
tmp_ori_sampled_ids = tf.cast(ori_sampled_ids, tf.int32)
tf.logging.info("****** normal 2-D sampled_ids *******")
masked_not_equal_mask = tf.cast(tf.not_equal(input_ids, tmp_ori_sampled_ids), tf.int32)
masked_not_equal_mask *= tf.cast(input_mask, tf.int32)
else:
masked_not_equal_mask = None
if masked_not_equal_mask is not None:
tf.logging.info("****** loss mask using masked token mask for masked tokens *******")
loss_mask = masked_not_equal_mask
else:
tf.logging.info("****** loss mask using input_mask for all tokens *******")
loss_mask = input_mask
# original:0, replace:1
not_equal_label_ids = tf.cast(tf.not_equal(input_ids, tmp_sampled_ids), tf.int32)
not_equal_label_ids *= tf.cast(input_mask, tf.int32)
if kargs.get('loss', 'cross_entropy') == 'cross_entropy':
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tf.stop_gradient(not_equal_label_ids))
elif kargs.get('loss', 'cross_entropy') == 'focal_loss':
input_shape_list = bert_utils.get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
not_equal_label_ids_ = tf.reshape(not_equal_label_ids, [batch_size*seq_length])
logits_ = tf.reshape(logits, [batch_size*seq_length, -1])
per_example_loss, _ = loss_utils.focal_loss_binary_v2(config, logits_, not_equal_label_ids_)
per_example_loss = tf.reshape(per_example_loss, [batch_size, seq_length])
# loss = per_example_loss * tf.cast(loss_mask, tf.float32)
# loss = tf.reduce_sum(loss) / (1e-10 + tf.reduce_sum(tf.cast(loss_mask, tf.float32)))
equal_label_ids = (1 - tf.cast(not_equal_label_ids, tf.float32)) * tf.cast(loss_mask, tf.float32)
equal_loss = tf.reduce_sum(per_example_loss * equal_label_ids)
equal_loss_output = equal_loss / (1e-10 + tf.reduce_sum(equal_label_ids))
not_equal_loss = tf.reduce_sum(per_example_loss * tf.cast(not_equal_label_ids, tf.float32)) # not equal:1, equal:0
not_equal_loss_output = not_equal_loss / (1e-10 + tf.reduce_sum(tf.cast(not_equal_label_ids, tf.float32)))
loss = (equal_loss + not_equal_loss) / (1e-10 + tf.reduce_sum(tf.cast(loss_mask, tf.float32)))
tf.logging.info("====discriminator classifier use_tpu %s ====", str(kargs.get('use_tpu', True)))
if not kargs.get('use_tpu', True):
tf.logging.info("====logging discriminator loss ====")
tf.summary.scalar('mask_based_loss',
loss)
tf.summary.scalar('equal_loss',
equal_loss/(1e-10 + tf.reduce_sum(tf.cast(input_mask, tf.float32))))
tf.summary.scalar('not_equal_loss',
not_equal_loss/(1e-10 + tf.reduce_sum(tf.cast(input_mask, tf.float32))))
tf.summary.scalar('loss_decomposition',
loss - (equal_loss+not_equal_loss)/(1e-10 + tf.reduce_sum(tf.cast(input_mask, tf.float32))))
return (loss, logits, per_example_loss)
def nce_loss(true_model_dict, true_features_dict,
fake_model_dict, fake_features_dict):
true_input_ids = tf.cast(true_features_dict['input_ids'], tf.int32)
sampled_input_ids = tf.cast(fake_features_dict['input_ids'], tf.int32)
unk_mask = tf.cast(tf.math.equal(true_input_ids, 100), tf.float32) # not replace unk
cls_mask = tf.cast(tf.math.equal(true_input_ids, 101), tf.float32) # not replace cls
sep_mask = tf.cast(tf.math.equal(true_input_ids, 102), tf.float32) # not replace sep
none_replace_mask = unk_mask + cls_mask + sep_mask
input_mask = tf.cast(true_features_dict['input_mask'], tf.int32)
input_mask *= tf.cast(1 - none_replace_mask, tf.int32) # cls, unk, sep are not considered as replace or original
true_logits = true_model_dict['logits']
fake_logits = fake_model_dict['logits']
input_shape_list = bert_utils.get_shape_list(sampled_input_ids, expected_rank=[2,3])
if len(input_shape_list) == 3:
tmp_sampled_ids = tf.argmax(sampled_input_ids, axis=-1) # batch x seq x vocab
tmp_sampled_ids = tf.cast(tmp_sampled_ids, tf.int32)
tf.logging.info("****** gumbel 3-D sampled_ids *******")
elif len(input_shape_list) == 2:
tmp_sampled_ids = sampled_input_ids
tmp_sampled_ids = tf.cast(tmp_sampled_ids, tf.int32)
tf.logging.info("****** normal 2-D sampled_ids *******")
masked_not_equal_mask = tf.cast(tf.not_equal(true_input_ids, tmp_sampled_ids), tf.int32)
loss_mask = masked_not_equal_mask * tf.cast(input_mask, tf.int32)
true_labels = tf.zeros_like(loss_mask)
fake_labels = tf.ones_like(loss_mask)
# nce positive part, batch x seq
true_per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=true_logits,
labels=tf.stop_gradient(true_labels))
# nce genative part, batch x seq
fake_per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=fake_logits,
labels=tf.stop_gradient(fake_labels))
loss = tf.reduce_sum((true_per_example_loss + fake_per_example_loss) * tf.cast(loss_mask, tf.float32))
loss /= tf.reduce_sum(1e-10 + tf.cast(loss_mask, tf.float32))
return loss
def discriminator_metric_train(per_example_loss, logits, input_ids, sampled_ids,
input_mask):
# original:0, replace:1
discriminator_label_ids = tf.not_equal(
tf.cast(input_ids, tf.int32),
tf.cast(sampled_ids, tf.int32)
)
unk_mask = tf.cast(tf.math.equal(input_ids, 100), tf.float32) # not replace unk
cls_mask = tf.cast(tf.math.equal(input_ids, 101), tf.float32) # not replace cls
sep_mask = tf.cast(tf.math.equal(input_ids, 102), tf.float32) # not replace sep
none_replace_mask = unk_mask + cls_mask + sep_mask
input_mask = tf.cast(input_mask, tf.int32)
input_mask *= tf.cast(1 - none_replace_mask, tf.int32) # cls, unk, sep are not considered as replace or original
discriminator_lm_predictions = tf.argmax(
logits, axis=-1, output_type=tf.int32)
discriminator_mean_loss = per_example_loss * tf.cast(input_mask, tf.float32)
discriminator_mean_loss = tf.reduce_sum(discriminator_mean_loss) / (1e-10 + tf.reduce_sum(tf.cast(input_mask, tf.float32)))
discriminator_lm_accuracy = tf.equal(
tf.cast(discriminator_lm_predictions, tf.int32),
tf.cast(discriminator_label_ids, tf.int32)
)
discriminator_lm_accuracy = tf.cast(discriminator_lm_accuracy, tf.float32)
discriminator_lm_accuracy_original = tf.reduce_sum(discriminator_lm_accuracy * tf.cast(discriminator_label_ids, tf.float32)) / (1e-10 + tf.reduce_sum(tf.cast(input_mask, tf.float32)))
discriminator_lm_accuracy_diff = tf.reduce_sum(discriminator_lm_accuracy * tf.cast(discriminator_label_ids, tf.float32)) / (1e-10 + tf.reduce_sum(tf.cast(discriminator_label_ids, tf.float32)))
discriminator_lm_accuracy = tf.reduce_sum(discriminator_lm_accuracy * tf.cast(input_mask, tf.float32)) / (1e-10 + tf.reduce_sum(tf.cast(input_mask, tf.float32)))
return {
"discriminator_lm_accuracy": discriminator_lm_accuracy,
"discriminator_lm_loss": discriminator_mean_loss,
"discriminator_lm_accuracy_diff":discriminator_lm_accuracy_diff,
"discriminator_lm_accuracy_original":discriminator_lm_accuracy_original,
}
def discriminator_metric_eval(per_example_loss, logits, input_ids, sampled_ids,
input_mask):
# original:0, replace:1
discriminator_label_ids = tf.not_equal(
tf.cast(input_ids, tf.int32),
tf.cast(sampled_ids, tf.int32)
)
unk_mask = tf.cast(tf.math.equal(input_ids, 100), tf.float32) # not replace unk
cls_mask = tf.cast(tf.math.equal(input_ids, 101), tf.float32) # not replace cls
sep_mask = tf.cast(tf.math.equal(input_ids, 102), tf.float32) # not replace sep
none_replace_mask = unk_mask + cls_mask + sep_mask
input_mask = tf.cast(input_mask, tf.int32)
input_mask *= tf.cast(1 - none_replace_mask, tf.int32) # cls, unk, sep are not considered as replace or original
discriminator_lm_predictions = tf.argmax(
logits, axis=-1, output_type=tf.int32)
discriminator_label_ids = tf.reshape(discriminator_label_ids, [-1])
discriminator_lm_predictions = tf.reshape(discriminator_lm_predictions, [-1])
discriminator_mask = tf.reshape(input_mask, [-1])
discriminator_accuracy = tf.metrics.accuracy(
labels=discriminator_label_ids,
predictions=discriminator_lm_predictions,
weights=discriminator_mask)
discriminator_per_example_loss = tf.reshape(per_example_loss, [-1])
discriminator_mean_loss = tf.metrics.mean(
values=discriminator_per_example_loss,
weights=discriminator_mask)
discriminator_recall = tf.compat.v1.metrics.recall(discriminator_label_ids,
discriminator_lm_predictions,
weights=discriminator_mask)
discriminator_precision = tf.compat.v1.metrics.precision(discriminator_label_ids,
discriminator_lm_predictions,
weights=discriminator_mask)
# discriminator_f1 = 2*(discriminator_recall * discriminator_precision) / ( discriminator_recall + discriminator_precision)
return {
"discriminator_accuracy":discriminator_accuracy,
"discriminator_loss":discriminator_mean_loss,
"discriminator_recall":discriminator_recall,
"discriminator_precision":discriminator_precision,
}
|
101294
|
import torch
import shutil
import numpy as np
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
# import cv2
from skimage.transform import resize
import torchvision.transforms as transforms
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def adjust_learning_rate(optimizer, epoch, args, interval):
"""Sets the learning rate to the initial LR decayed by 10 every 100 epochs"""
lr = args.lr
if epoch < interval[0]:
lr = args.lr
elif epoch >= interval[0] and epoch < interval[1]:
lr = args.lr * 0.1
else:
lr = args.lr * 0.01
#lr = args.lr * (0.1 ** (epoch // 100))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def multi_class_auc(all_target, all_output, num_c = None):
from sklearn.preprocessing import label_binarize
# all_output = np.stack(all_output)
all_target = label_binarize(all_target, classes=list(range(0, num_c)))
all_output = label_binarize(all_output, classes=list(range(0, num_c)))
auc_sum = []
for num_class in range(0, num_c):
try:
auc = roc_auc_score(all_target[:, num_class], all_output[:, num_class])
auc_sum.append(auc)
except ValueError:
pass
auc = sum(auc_sum) / (float(len(auc_sum))+1e-8)
return auc
def evaluation_metrics(label, pred, C):
if C==2:
auc = roc_auc_score(label, pred)
else:
auc = multi_class_auc(label, pred, num_c=C)
corrects = np.equal(np.array(label), np.array(pred))
acc = float(sum(corrects)) / len(corrects)
# mean class
precision = precision_score(label, pred, average='macro')
recall = recall_score(label, pred, average='macro')
f1score = f1_score(label, pred, average='macro')
return round(auc, 4), round(acc, 4), round(precision, 4), round(recall, 4), round(f1score, 4)
def showfeature(x, savename):
# trun to numpy
x = x.data.cpu().numpy()
print (x.shape)
box = []
for item in range(0, x.shape[0]):
x_patch = x[item, :, :]
box.append(x_patch)
x_patch = np.stack(box)
x_patch = np.max(x_patch, axis=0)
x_patch = resize(x_patch, (224, 224), order=3, mode='constant',
cval=0, clip=True, preserve_range=True)
x_patch = (x_patch - np.min(x_patch)) / (np.max(x_patch) - np.min(x_patch) + 1e-11)
x_patch = x_patch * 255
x_patch = np.array(x_patch, dtype="uint8")
plt.plot(1), plt.imshow(x_patch, cmap='jet')
plt.axis('off')
plt.savefig(savename, bbox_inches='tight', pad_inches=0)
def showimage(x, savename):
import torchvision.transforms as transforms
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
z = x * torch.tensor(std).view(3, 1, 1).cuda()
z = z + torch.tensor(mean).view(3, 1, 1).cuda()
z = z.cpu()
z = z[[2,1,0], : ,:]
img2 = transforms.ToPILImage()(z)
img2.save(savename)
def get_color_distortion(s=1.0):
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])
return color_distort
def gaussian_blur(x):
from PIL.ImageFilter import GaussianBlur
if np.random.randint(0, 2) == 1:
x = x.filter(GaussianBlur(radius=np.random.uniform(0.1, 2.0)))
return x
|
101315
|
import unittest
from src.graph import Graph
from src.breadth_first_search import bfs
class BreadthFirstSearchTest(unittest.TestCase):
def test_bfs_parses_the_graph_in_order(self):
"""
Correctly explore the following graph:
_(a)--(c)--(e)
/ | / \ |
(s)--(b)-------(d)
"""
edges = [('s', 'a'), ('s', 'b'), ('a', 'b'), ('a', 'c'), ('b', 'd'),
('c', 'e'), ('c', 'd'), ('e', 'd')]
graph = Graph.build(edges=edges)
expected = ['s', 'a', 'b', 'c', 'd', 'e']
actual = bfs(graph, 's')
self.assertEqual(actual, expected,
'should have visited the graph in correct order')
|
101341
|
from rest_framework.permissions import BasePermission, SAFE_METHODS, IsAdminUser
class IsAdminOrReadOnly(IsAdminUser):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
return super().has_permission(request, view)
|
101345
|
import torch
from torch.serialization import normalize_storage_type, location_tag, _should_read_directly
import io
import pickle
import pickletools
from .find_file_dependencies import find_files_source_depends_on
from ._custom_import_pickler import CustomImportPickler
from ._importlib import _normalize_path
import types
import importlib
from typing import List, Any, Callable, Dict, Tuple
from distutils.sysconfig import get_python_lib
from pathlib import Path
import linecache
import sys
from tempfile import NamedTemporaryFile
class PackageExporter:
""" Exporters allow you to write packages of code, pickled python data, and
arbitrary binary and text resources into a self-contained package.
Imports can load this code in a hermetic way, such that code is loaded
from the package rather than the normal python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The code contained in packages is copied file-by-file from the original
source when it is created, and the file format is a specially organized
zip file. Future users of the package can unzip the package, and edit the code
in order to perform custom modifications to it.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external using :method:`extern_module`.
The file `extern_modules` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
Dependencies
------------
When source code is added to the package, the exporter optionally can scan it
for further code dependencies (`dependencies=True`). It looks for import statements,
resolves relative references to qualified module names, and calls :method:`require_module`
on each it finds, recursively resolving dependencies.
"""
importers: List[Callable[[str], Any]]
""" A list of functions that will be called in order to find the module assocated
with module names referenced by other modules or by pickled objects. Initialized to
`[importlib.import_module]` by default. When pickling code or objects that was loaded
from an imported packaged, that `importer.import_module` should be put into the importer list.
When a name conflict occurs between importers, the first importer in the list takes precedence,
and only objects that refer to this first importers class can be saved
"""
def __init__(self, filename: str, verbose: bool = True):
"""
Create an exporter.
Args:
filename: e.g. my_package.zip
verbose: Print information about dependency resolution to stdout.
Useful for tracking down why certain files get included.
"""
self.zip_file = torch._C.PyTorchFileWriter(filename)
self.serialized_storages : Dict[str, Any] = {}
self.external : List[str] = []
self.provided : Dict[str, bool] = {}
self.verbose = verbose
self.importers = [importlib.import_module]
self.debug_deps : List[Tuple[str, str]] = []
def save_source_file(self, module_name: str, file_or_directory: str, dependencies=True):
"""Adds the local file system `file_or_directory` to the source package to provide the code
for `module_name`.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
file_or_directory (str): the path to a file or directory of code. When a directory, all python files in the directory
are recursively copied using :meth:`save_source_file`. If a file is named "/__init__.py" the code is treated
as a package.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
path = Path(file_or_directory)
if path.is_dir():
to_save = [] # list of tuples with arguments to save_source_string
module_path = module_name.replace('.', '/')
for filename in path.glob('**/*.py'):
relative_path = filename.relative_to(path).as_posix()
archivename = module_path + '/' + relative_path
if filename.is_dir():
self.provided[archivename] = True
else:
submodule_name = None
if filename.name == '__init__.py':
submodule_name = archivename[:-len('/__init__.py')].replace('/', '.')
is_package = True
else:
submodule_name = archivename[:-len('.py')].replace('/', '.')
is_package = False
self.provided[submodule_name] = True
# we delay the call to save_source_string so that we record all the source files
# being provided by this directory structure _before_ attempting to resolve the dependencies
# on the source. This makes sure we don't try to copy over modules that will just get
# overwritten by this directory blob
to_save.append((submodule_name, _read_file(str(filename)), is_package, dependencies, str(filename)))
for item in to_save:
self.save_source_string(*item)
else:
is_package = path.name == '__init__.py'
self.save_source_string(module_name, _read_file(file_or_directory), is_package, dependencies, file_or_directory)
def save_source_string(self, module_name: str, src: str, is_package: bool = False,
dependencies: bool = True, orig_file_name: str = None):
"""Adds `src` as the source code for `module_name` in the exported package.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
src (str): The python source code to save for this package
is_package (bool, optional): If True, this module is treated as a package. Packages are allowed to have submodules
(e.g. my_package.my_subpackage.my_subsubpackage), and resources can be saved inside them. Defaults to False.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
orig_file_name (str, optional): If present, used in logging to identifying where the source came from. Defaults to None.
"""
self.provided[module_name] = True
extension = '/__init__.py' if is_package else '.py'
filename = module_name.replace('.', '/') + extension
self._write(filename, src)
if dependencies:
package = module_name if is_package else module_name.rsplit('.', maxsplit=1)[0]
dep_pairs = find_files_source_depends_on(src, package)
dep_list = {}
for dep_module_name, dep_module_obj in dep_pairs:
# handle the case where someone did something like `from pack import sub`
# where `sub` is a submodule. In this case we don't have to save pack, just sub.
# this ensures we don't pick up additional dependencies on pack.
# However, in the case where `sub` is not a submodule but an object, then we do have
# to save pack.
if dep_module_obj is not None:
possible_submodule = f'{dep_module_name}.{dep_module_obj}'
if self._module_exists(possible_submodule):
dep_list[possible_submodule] = True
# we don't need to save `pack`
continue
if self._module_exists(dep_module_name):
dep_list[dep_module_name] = True
for dep in dep_list.keys():
self.debug_deps.append((module_name, dep))
if self.verbose:
dep_str = ''.join(f' {dep}\n' for dep in dep_list.keys())
file_info = f'(from file {orig_file_name}) ' if orig_file_name is not None else ''
print(f"{module_name} {file_info}depends on:\n{dep_str}\n")
for dep in dep_list.keys():
self.require_module_if_not_provided(dep)
def _module_exists(self, module_name: str) -> bool:
try:
self._import_module(module_name)
return True
except Exception:
return False
def _write_dep_graph(self, failing_module=None, output_file=None):
depended_on : Dict[str, List[str]] = {}
for f, t in self.debug_deps:
if t not in depended_on:
depended_on[t] = []
if f not in depended_on:
depended_on[f] = []
depended_on[t].append(f)
level : Dict[str, int] = {}
def visit(x: str):
if x in level:
return level[x]
level[x] = 0
for e in depended_on[x]:
level[x] = max(level[x], visit(e) + 1)
return level[x]
for x in depended_on.keys():
visit(x)
nodes = []
node_to_id = {}
n = 0
for ft in self.debug_deps:
for e in ft:
if e not in node_to_id:
node_to_id[e] = n
extra = ''
if e == failing_module:
extra = ", color: 'red'"
nodes.append(f" {{id: {n}, label: '{e}', level: {level[e]}, shape: 'box'{extra}}},\n")
n += 1
edges = []
for f, t in self.debug_deps:
fn, tn = node_to_id[f], node_to_id[t]
edges.append(f" {{from: {fn}, to: {tn}, arrows: 'to'}},\n")
nodes_s, edges_s = ''.join(nodes), ''.join(edges)
template = f"""\
<html>
<head>
<script type="text/javascript" src="https://almende.github.io/vis/dist/vis.js"></script>
<link href="https://almende.github.io/vis/dist/vis.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="mynetwork"></div>
<script type="text/javascript">
var nodes = new vis.DataSet([
{nodes_s}
]);
var edges = new vis.DataSet([
{edges_s}
]);
var options = {{
layout: {{
hierarchical: {{
direction: "LR",
levelSeparation: 400,
}},
}},
}};
// create a network
var container = document.getElementById('mynetwork');
var network = new vis.Network(container, {{nodes: nodes, edges: edges}}, options);
</script>
</body>
</html>
"""
if output_file:
output_file.write(template)
return None
with NamedTemporaryFile(mode='w', suffix='.html', delete=False) as tf:
tf.write(template)
return tf.name
def _get_source_of_module(self, module: types.ModuleType) -> str:
filename = getattr(module, '__file__', None)
result = None if filename is None or not filename.endswith('.py') else linecache.getlines(filename, module.__dict__)
if result is None:
extra = ''
if self.verbose:
extra = f' See the dependency graph for more info: {self._write_dep_graph(module.__name__)}'
raise ValueError(f'cannot save source for module "{module.__name__}" because '
f'its source file "{filename}" could not be found.{extra}')
return ''.join(result)
def require_module_if_not_provided(self, module_name: str, dependencies=True):
if self._module_is_already_provided(module_name):
return
self.require_module(module_name, dependencies)
def require_module(self, module_name: str, dependencies=True):
"""This is called by dependencies resolution when it finds that something in the package
depends on the module and it is not already present. It then decides how to provide that module.
The default resolution rules will mark the module as extern if it is part of the standard library,
and call `save_module` otherwise. Clients can subclass this object
and override this method to provide other behavior, such as automatically mocking out a whole class
of modules"""
root_name = module_name.split('.', maxsplit=1)[0]
if self._can_implicitly_extern(root_name):
if self.verbose:
print(f'implicitly adding {root_name} to external modules '
f'since it is part of the standard library and is a dependency.')
self.extern_module(root_name)
return
self.save_module(module_name, dependencies)
def save_module(self, module_name: str, dependencies=True):
"""Save the code for `module_name` into the package. Code for the module is resolved using the `importers` path to find the
module object, and then using its `__file__` attribute to find the source code.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
module = self._import_module(module_name)
source = self._get_source_of_module(module)
self.save_source_string(module_name, source, hasattr(module, '__path__'), dependencies, module.__file__)
def _import_module(self, module_name):
last_err = None
for import_module in self.importers:
try:
return import_module(module_name)
except ModuleNotFoundError as err:
last_err = err
if last_err is not None:
raise last_err
else:
raise ModuleNotFoundError(module_name)
def _create_pickler(self, data_buf):
if self.importers == [importlib.import_module]:
# if we are using the normal import library system, then
# we can use the C implementation of pickle which is faster
return pickle.Pickler(data_buf, protocol=3)
else:
return CustomImportPickler(self._import_module, data_buf, protocol=3)
def save_pickle(self, package: str, resource: str, obj: Any, dependencies: bool = True):
"""Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into
the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects.
If `dependencies` is true, this method will also scan the pickled objects for which modules are required
to reconstruct them and save the relevant code.
To be able to save an object where `type(obj).__name__` is `my_module.MyObject`,
`my_module.MyObject` must resolve to the class of the object according to the `importer` order. When saving objects that
have previously been packaged, the importer's `import_module` method will need to be present in the `importer` list
for this to work.
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
obj (Any): The object to save, must be picklable.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
filename = self._filename(package, resource)
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = self._create_pickler(data_buf)
pickler.persistent_id = self._persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
if dependencies:
all_dependencies = []
for opcode, arg, pos in pickletools.genops(data_value):
if opcode.name == 'GLOBAL': # a global reference
assert isinstance(arg, str)
module, field = arg.split(' ')
if module not in all_dependencies:
all_dependencies.append(module)
for dep in all_dependencies:
self.debug_deps.append((package + '.' + resource, dep))
if self.verbose:
dep_string = ''.join(f' {dep}\n' for dep in all_dependencies)
print(f"{resource} depends on:\n{dep_string}\n")
for module_name in all_dependencies:
self.require_module_if_not_provided(module_name)
self._write(filename, data_value)
def save_text(self, package: str, resource: str, text: str):
"""Save text data to the package
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
text (str): The contents to save
"""
return self.save_binary(package, resource, text.encode('utf-8'))
def save_binary(self, package, resource, binary: bytes):
"""Save raw bytes to the package.
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
binary (str): The data to save.
"""
filename = self._filename(package, resource)
self._write(filename, binary)
def extern_module(self, module_name: str):
"""Include `module` in the list of external modules the package can import.
This will prevent dependency discover from saving
it in the package. The importer will load an external module directly from the standard import system.
Code for extern modules must also exist in the process loading the package.
Args:
module_name (str): e.g. "my_package.my_subpackage" the name of the external module
"""
if module_name not in self.external:
self.external.append(module_name)
def extern_modules(self, module_names: List[str]):
"""Extern a list of modules. Convience wrapper for calling :meth:`extern_module` on many items.
Args:
module_names (List[str]): List of module names
"""
for m in module_names:
self.extern_module(m)
def mock_module(self, module_name: str):
"""Replace the code for `module_name` in the package with a fake implementation. This module will return a fake
object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes
find files that are imported by model files but whose functionality is never used
(e.g. custom serialization code or training helpers).
Use this function to mock this functionality out without having to modify the original code.
Args:
module_name (str): e.g. "my_package.my_subpackage" the name of the module to be mocked out.
"""
if '_mock' not in self.provided:
self.save_source_file('_mock', str(Path(__file__).parent / '_mock.py'), dependencies=False)
is_package = hasattr(self._import_module(module_name), '__path__')
self.save_source_string(module_name, _MOCK_IMPL, is_package, dependencies=False)
def mock_modules(self, module_names):
"""Mock a list of modules. Convience wrapper for calling :meth:`mock_module` on many items.
Args:
module_names (List[str]): List of module names
"""
for module_name in module_names:
self.mock_module(module_name)
def _module_is_already_provided(self, qualified_name: str) -> bool:
for mod in self.external:
if qualified_name == mod or qualified_name.startswith(mod + '.'):
return True
return qualified_name in self.provided
def _persistent_id(self, obj):
# FIXME: the docs say that persistent_id should only return a string
# but torch store returns tuples. This works only in the binary protocol
# see
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
if torch.is_storage(obj):
storage_type = normalize_storage_type(type(obj))
obj_key = str(obj._cdata)
location = location_tag(obj)
self.serialized_storages[obj_key] = obj
return ('storage',
storage_type,
obj_key,
location,
obj.size())
return None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _write(self, filename, str_or_bytes):
if isinstance(str_or_bytes, str):
str_or_bytes = str_or_bytes.encode('utf-8')
self.zip_file.write_record(filename, str_or_bytes, len(str_or_bytes))
def close(self):
"""Write the package to the filesystem. Any calls after close are now invalid.
It is preferable to use resource guard syntax instead:
with PackageExporter("file.zip") as e:
...
"""
if self.verbose:
print(f"Dependency graph for exported package: {self._write_dep_graph()}")
# Write each tensor to a file named tensor/the_tensor_key in the zip archive
for key in sorted(self.serialized_storages.keys()):
name = 'data/{}'.format(key)
storage = self.serialized_storages[key]
if storage.device.type == 'cpu':
# If it's on the CPU we can directly copy it into the zip file
num_bytes = storage.size() * storage.element_size()
self.zip_file.write_record(name, storage.data_ptr(), num_bytes)
else:
# Copy to a buffer, then serialize that
buf = io.BytesIO()
storage._write_file(buf, _should_read_directly(buf))
buf_value = buf.getvalue()
self._write(name, buf_value)
contents = ('\n'.join(self.external) + '\n')
self._write('extern_modules', contents)
del self.zip_file
def _filename(self, package, resource):
package_path = package.replace('.', '/')
resource = _normalize_path(resource)
return f'{package_path}/{resource}'
def _can_implicitly_extern(self, module_name: str):
return module_name == 'torch' or (module_name not in _DISALLOWED_MODULES
and _is_builtin_or_stdlib_module(self._import_module(module_name)))
# even though these are in the standard library, we do not allow them to be
# automatically externed since they offer a lot of system level access
_DISALLOWED_MODULES = ['sys', 'io']
def _is_builtin_or_stdlib_module(module: types.ModuleType) -> bool:
if module.__name__ in sys.builtin_module_names:
return True
filename = getattr(module, '__file__', None)
if filename is None:
return False
standard_lib = get_python_lib(standard_lib=True)
# this is often a subdirectory of standard_lib so we have to check
# that the file is in the standard_lib directory but not in this one
installed_libs = get_python_lib(standard_lib=False)
in_standard_lib = filename.startswith(standard_lib + '/')
in_installed_libs = filename.startswith(installed_libs + '/')
return in_standard_lib and not in_installed_libs
_MOCK_IMPL = """\
from _mock import MockedObject
def __getattr__(attr: str):
return MockedObject(__name__ + '.' + attr)
"""
def _read_file(filename: str) -> str:
with open(filename, 'rb') as f:
b = f.read()
return b.decode('utf-8')
|
101380
|
import math
import numpy as np
def quaternion_to_rotation_matrix(q):
# Original C++ Method defined in pba/src/pba/DataInterface.h
qq = math.sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3])
qw = qx = qy = qz = 0
if qq > 0: # NORMALIZE THE QUATERNION
qw = q[0] / qq
qx = q[1] / qq
qy = q[2] / qq
qz = q[3] / qq
else:
qw = 1
qx = qy = qz = 0
m = np.zeros((3, 3), dtype=float)
m[0][0] = float(qw * qw + qx * qx - qz * qz - qy * qy)
m[0][1] = float(2 * qx * qy - 2 * qz * qw)
m[0][2] = float(2 * qy * qw + 2 * qz * qx)
m[1][0] = float(2 * qx * qy + 2 * qw * qz)
m[1][1] = float(qy * qy + qw * qw - qz * qz - qx * qx)
m[1][2] = float(2 * qz * qy - 2 * qx * qw)
m[2][0] = float(2 * qx * qz - 2 * qy * qw)
m[2][1] = float(2 * qy * qz + 2 * qw * qx)
m[2][2] = float(qz * qz + qw * qw - qy * qy - qx * qx)
return m
def rotation_matrix_to_quaternion(m):
# Original C++ Method defined in pba/src/pba/DataInterface.h
q = np.array([0, 0, 0, 0], dtype=float)
q[0] = 1 + m[0][0] + m[1][1] + m[2][2]
if q[0] > 0.000000001:
q[0] = math.sqrt(q[0]) / 2.0
q[1] = (m[2][1] - m[1][2]) / (4.0 * q[0])
q[2] = (m[0][2] - m[2][0]) / (4.0 * q[0])
q[3] = (m[1][0] - m[0][1]) / (4.0 * q[0])
else:
if m[0][0] > m[1][1] and m[0][0] > m[2][2]:
s = 2.0 * math.sqrt(1.0 + m[0][0] - m[1][1] - m[2][2])
q[1] = 0.25 * s
q[2] = (m[0][1] + m[1][0]) / s
q[3] = (m[0][2] + m[2][0]) / s
q[0] = (m[1][2] - m[2][1]) / s
elif m[1][1] > m[2][2]:
s = 2.0 * math.sqrt(1.0 + m[1][1] - m[0][0] - m[2][2])
q[1] = (m[0][1] + m[1][0]) / s
q[2] = 0.25 * s
q[3] = (m[1][2] + m[2][1]) / s
q[0] = (m[0][2] - m[2][0]) / s
else:
s = 2.0 * math.sqrt(1.0 + m[2][2] - m[0][0] - m[1][1])
q[1] = (m[0][2] + m[2][0]) / s
q[2] = (m[1][2] + m[2][1]) / s
q[3] = 0.25 * s
q[0] = (m[0][1] - m[1][0]) / s
return q
class Extrinsics:
def __init__(self):
# center is the coordinate of the camera center with respect to the
# world coordinate frame (t = -R C)
self._center = np.array([0, 0, 0], dtype=float)
# the translation vector is the vector used to transform points in
# world coordinates to camera coordinates (C = -R^T t)
self._translation_vec = np.array([0, 0, 0], dtype=float)
# use for these attributes the getter and setter methods
self._quaternion = np.array([0, 0, 0, 0], dtype=float)
# for rotations the inverse is equal to the transpose
# self._rotation_inv_mat = np.linalg.transpose(self._rotation_mat)
self._rotation_mat = np.zeros((3, 3), dtype=float)
@staticmethod
def invert_transformation_mat(trans_mat):
# Exploit that the inverse of the rotation part is equal to the
# transposed of the rotation part. This should be more robust than
# trans_mat_inv = np.linalg.inv(trans_mat)
trans_mat_inv = np.zeros_like(trans_mat)
rotation_part_inv = trans_mat[0:3, 0:3].T
trans_mat_inv[0:3, 0:3] = rotation_part_inv
trans_mat_inv[0:3, 3] = -np.dot(rotation_part_inv, trans_mat[0:3, 3])
trans_mat_inv[3, 3] = 1
return trans_mat_inv
def is_rotation_mat_valid(self, some_mat):
# TEST if rotation_mat is really a rotation matrix
# (i.e. det = -1 or det = 1)
det = np.linalg.det(some_mat)
is_close = np.isclose(det, 1) or np.isclose(det, -1)
# if not is_close:
# logger.vinfo('some_mat', some_mat)
# logger.vinfo('determinante', det)
return is_close
def set_quaternion(self, quaternion):
self._quaternion = quaternion
# we must change the rotation matrixes as well
self._rotation_mat = quaternion_to_rotation_matrix(quaternion)
def set_rotation_mat(self, rotation_mat):
assert self.is_rotation_mat_valid(rotation_mat)
self._rotation_mat = rotation_mat
# we must change the quaternion as well
self._quaternion = rotation_matrix_to_quaternion(rotation_mat)
def set_camera_center_after_rotation(self, center):
assert self.is_rotation_mat_valid(self._rotation_mat)
self._center = center
self._translation_vec = -np.dot(self._rotation_mat, center)
def set_camera_translation_vector_after_rotation(self, translation_vector):
# translation_vector: trans_vec = -Rc
assert self.is_rotation_mat_valid(self._rotation_mat)
self._translation_vec = translation_vector
self._center = -np.dot(
self._rotation_mat.transpose(), translation_vector
)
def get_quaternion(self):
return self._quaternion
def get_rotation_mat(self):
# Note:
# self._rotation_mat.T or self._rotation_mat.transpose()
# DO NOT CHANGE THE MATRIX
return self._rotation_mat
def get_translation_vec(self):
return self._translation_vec
def get_camera_center(self):
return self._center
def get_4x4_world_to_cam_mat(self):
# This matrix can be used to convert points given in world coordinates
# into points given in camera coordinates
# M = [R -Rc]
# [0 1],
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
homogeneous_mat = np.identity(4, dtype=float)
homogeneous_mat[0:3, 0:3] = self.get_rotation_mat()
homogeneous_mat[0:3, 3] = -self.get_rotation_mat().dot(
self.get_camera_center()
)
return homogeneous_mat
def set_4x4_cam_to_world_mat(self, cam_to_world_mat):
# This matrix can be used to convert points given in camera coordinates
# into points given in world coordinates
# M = [R^T c]
# [0 1]
#
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
rotation_part = cam_to_world_mat[0:3, 0:3]
translation_part = cam_to_world_mat[0:3, 3]
self.set_rotation_mat(rotation_part.transpose())
self.set_camera_center_after_rotation(translation_part)
def get_4x4_cam_to_world_mat(self):
# This matrix can be used to convert points given in camera coordinates
# into points given in world coordinates
# M = [R^T c]
# [0 1]
# :return:
#
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
homogeneous_mat = np.identity(4, dtype=float)
homogeneous_mat[0:3, 0:3] = self.get_rotation_mat().transpose()
homogeneous_mat[0:3, 3] = self.get_camera_center()
return homogeneous_mat
def cam_to_world_coord_multiple_coords(self, cam_coords):
num_coords = cam_coords.shape[0]
hom_entries = np.ones(num_coords).reshape((num_coords, 1))
cam_coords_hom = np.hstack((cam_coords, hom_entries))
world_coords_hom = (
self.get_4x4_cam_to_world_mat().dot(cam_coords_hom.T).T
)
world_coords = np.delete(world_coords_hom, 3, 1)
return world_coords
|
101388
|
import numpy as np
import matplotlib.pyplot as plt
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(8,12))
# Plot pow with different settings
t = 1e-2
rcut = 5
settings = [
[2, rcut/(1/t*(1-t)) ** (1 / 2), 1],
[4, rcut/(1/t*(1-t)) ** (1 / 4), 1],
[8, rcut/(1/t*(1-t)) ** (1 / 8), 1],
]
rmin = 0
rmax = 5.2
for setting in settings:
m = setting[0]
r0 = setting[1]
c = setting[2]
d = c
r = np.arange(rmin, rmax, 0.01)
polym = c / (d + (r / r0) ** m)
ax2.plot(r, polym, label="m = {}, r0 = {:.3}, c = d = {}".format(m, r0, c))
ax2.axvline(rcut, color='k', linestyle='--')
ax2.text(
rcut*0.99,
0.5,
"rcut inferred from threshold",
verticalalignment='center',
horizontalalignment='right',
rotation="vertical",
)
ax2.set_title("pow")
ax2.set_xlabel("r")
ax2.set_ylabel("w(r)")
# Plot poly with different settings
settings = [
[rcut, 3],
[rcut, 2],
[rcut, 1],
]
for setting in settings:
r0 = setting[0]
m = setting[1]
c = 1
poly3m = []
for ri in r:
if ri < r0:
poly3m.append(c*(1 + 2 * (ri / r0) ** 3 - 3 * (ri / r0) ** 2) ** m)
else:
poly3m.append(0)
ax1.plot(r, poly3m, label="m = {}, r0 = {}, c={}".format(m, r0, c))
ax1.axvline(rcut, color='k', linestyle='--')
ax1.text(
rcut*0.99,
0.5,
"rcut inferred from r0".format(t),
verticalalignment='center',
horizontalalignment='right',
rotation="vertical",
)
ax1.set_title("poly")
ax1.set_xlabel("r")
ax1.set_ylabel("w(r)")
# Plot exp with different settings
settings = [
[rcut/np.log(1/t - 0), 1, 0],
[rcut/np.log(10/t - 9), 10, 9],
[rcut/np.log(100/t - 99), 100, 99],
]
for setting in settings:
r = np.arange(rmin, rmax, 0.01)
r0 = setting[0]
c = setting[1]
d = setting[2]
exp = c/(d + np.exp(r/r0))
ax3.plot(r, exp, label="r0={:.3}, c={}, d={}".format(r0, c, d))
ax3.axvline(rcut, color='k', linestyle='--')
ax3.text(
rcut*0.99,
0.5,
"rcut inferred from threshold",
verticalalignment='center',
horizontalalignment='right',
rotation="vertical",
)
ax3.set_title("exp")
ax3.set_xlabel("r")
ax3.set_ylabel("w(r)")
l = "upper right"
anchor = (0.9, 1)
ax1.set_xlim(rmin, rmax)
ax1.set_ylim(0, 1)
ax2.set_ylim(0, 1)
ax3.set_ylim(0, 1)
ax1.legend(loc=l, bbox_to_anchor=anchor)
ax2.legend(loc=l, bbox_to_anchor=anchor)
ax3.legend(loc=l, bbox_to_anchor=anchor)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.05)
plt.show()
|
101485
|
from ._base import PDFItem
from .PDFShape import PDFShape
from .PDFImage import PDFImage
from .PDFText import PDFText
from .PDFXObject import PDFXObject
|
101493
|
from fireworks import FiretaskBase, FWAction, explicit_serialize, Workflow
from atomate.utils.utils import env_chk
from atomate.vasp.database import VaspCalcDb
from atomate.vasp.fireworks.approx_neb import ImageFW
from atomate.common.powerups import powerup_by_kwargs
__author__ = "<NAME>"
__email__ = "<EMAIL>"
@explicit_serialize
class GetImageFireworks(FiretaskBase):
"""
Adds ImageFWs to the workflow for the provided images_key
according to the scheme specified by launch_mode. Optional
parameters such as "handler_group", "add_additional_fields",
and "add_tags" can be used to modify the resulting ImageFWs.
Args:
db_file (str): path to file containing the database
credentials.
approx_neb_wf_uuid (str): unique id for approx neb workflow
record keeping.
images_key (str): specifies a key corresponding the images
field of the approx_neb collection which specifies the
desired combination of end points to interpolate images
between. images_key should be a string of format "0+1",
"0+2", etc. matching end_points_combo input of
PathfinderToDb Firetask or pathfinder_key input of
AddSelectiveDynamics Firetask. If images_key is not
provided images will be launched for all paths/keys in
the approx_neb collection images field.
launch_mode (str): "all" or "screening"
vasp_cmd (str): the name of the full executable for running
VASP.
Optional Params:
vasp_input_set (VaspInputSet class): can use to
define VASP input parameters.
See pymatgen.io.vasp.sets module for more
information. MPRelaxSet() and
override_default_vasp_params are used if
vasp_input_set = None.
override_default_vasp_params (dict): if provided,
vasp_input_set is disregarded and the Vasp Input
Set is created by passing
override_default_vasp_params to MPRelaxSet().
Allows for easy modification of MPRelaxSet().
For example, to set ISIF=2 in the INCAR use:
{"user_incar_settings":{"ISIF":2}}
handler_group (str or [ErrorHandler]): group of handlers to
use for RunVaspCustodian firetask. See handler_groups
dict in the code for the groups and complete list of
handlers in each group. Alternatively, you can specify a
list of ErrorHandler objects.
add_additional_fields (dict): dict of additional fields to
add to task docs (by additional_fields of VaspToDb).
add_tags (list of strings): added to the "tags" field of the
task docs.
"""
required_params = [
"db_file",
"approx_neb_wf_uuid",
"images_key",
"launch_mode",
"vasp_cmd",
]
optional_params = [
"vasp_input_set",
"override_default_vasp_params",
"handler_group",
"add_additional_fields",
"add_tags",
]
def run_task(self, fw_spec):
# get the database connection
db_file = env_chk(self["db_file"], fw_spec)
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
mmdb.collection = mmdb.db["approx_neb"]
wf_uuid = self["approx_neb_wf_uuid"]
launch_mode = self["launch_mode"]
images_key = self["images_key"]
approx_neb_doc = mmdb.collection.find_one({"wf_uuid": wf_uuid}, {"images": 1})
all_images = approx_neb_doc["images"]
# get structure_path of desired images and sort into structure_paths
if images_key and isinstance(all_images, (dict)):
images = all_images[images_key]
max_n = len(images)
if launch_mode == "all":
structure_paths = [
"images." + images_key + "." + str(n) + ".input_structure"
for n in range(0, max_n)
]
elif launch_mode == "screening":
structure_paths = self.get_and_sort_paths(
max_n=max_n, images_key=images_key
)
elif isinstance(all_images, (dict)):
structure_paths = dict()
if launch_mode == "all":
for key, images in all_images.items():
max_n = len(images)
structure_paths[key] = [
"images." + key + "." + str(n) + ".input_structure"
for n in range(0, max_n)
]
elif launch_mode == "screening":
for key, images in all_images.items():
structure_paths[key] = self.get_and_sort_paths(
max_n=len(images), images_key=key
)
# get list of fireworks to launch
if isinstance(structure_paths, (list)):
if isinstance(structure_paths[0], (str)):
relax_image_fws = []
for path in structure_paths:
relax_image_fws.append(self.get_fw(structure_path=path))
else:
relax_image_fws = self.get_screening_fws(sorted_paths=structure_paths)
elif isinstance(structure_paths, (dict)):
relax_image_fws = []
if launch_mode == "all":
for key in structure_paths.keys():
for path in structure_paths[key]:
relax_image_fws.append(self.get_fw(structure_path=path))
elif launch_mode == "screening":
for key in structure_paths.keys():
sorted_paths = structure_paths[key]
relax_image_fws.extend(
self.get_screening_fws(sorted_paths=sorted_paths)
)
# place fws in temporary wf in order to use powerup_by_kwargs
# to apply powerups to image fireworks
if "vasp_powerups" in fw_spec.keys():
temp_wf = Workflow(relax_image_fws)
powerup_dicts = fw_spec["vasp_powerups"]
temp_wf = powerup_by_kwargs(temp_wf, powerup_dicts)
relax_image_fws = temp_wf.fws
return FWAction(additions=relax_image_fws)
def get_and_sort_paths(self, max_n, images_key=""):
sorted_paths = [[], [], []]
mid_n = int(max_n / 2)
q1 = int((max_n - mid_n) / 2) # for second screening pass
q3 = int((max_n + mid_n) / 2) # for second screening pass
for n in range(0, max_n):
path = "images." + images_key + "." + str(n) + ".input_structure"
if n == mid_n: # path for first screening pass (center image index)
sorted_paths[0].append(path)
elif n in [q1, q3]:
sorted_paths[1].append(path)
else:
sorted_paths[-1].append(path)
return sorted_paths
def get_fw(self, structure_path, parents=None):
add_tags = self.get("add_tags")
fw = ImageFW(
approx_neb_wf_uuid=self["approx_neb_wf_uuid"],
structure_path=structure_path,
db_file=self["db_file"],
vasp_input_set=self.get("vasp_input_set"),
vasp_cmd=self["vasp_cmd"],
override_default_vasp_params=self.get("override_default_vasp_params"),
handler_group=self.get("handler_group"),
parents=parents,
add_additional_fields=self.get("add_additional_fields"),
add_tags=add_tags,
)
if isinstance(add_tags, (list)):
if "tags" in fw.spec.keys():
fw.spec["tags"].extend(add_tags)
else:
fw.spec["tags"] = add_tags
return fw
def get_screening_fws(self, sorted_paths):
if isinstance(sorted_paths, (list)) != True:
if (
any([isinstance(i, (list)) for i in sorted_paths]) != True
or len(sorted_paths) != 3
):
raise TypeError("sorted_paths must be a list containing 3 lists")
s1_fw = self.get_fw(structure_path=sorted_paths[0][0])
# ToDo: modify this firework to add firetask that checks whether to run/defuse children
s2_fws = []
for path in sorted_paths[1]:
s2_fws.append(self.get_fw(structure_path=path, parents=s1_fw))
# ToDo: modify this firework to add firetask that checks whether to run/defuse children
remaining_fws = []
for path in sorted_paths[-1]:
remaining_fws.append(self.get_fw(structure_path=path, parents=s2_fws))
return [s1_fw] + s2_fws + remaining_fws
|
101502
|
from collections import deque
from csp.variable import Variable
from csp.constraint_problem import ConstraintProblem
def pc2(constraint_problem: ConstraintProblem) -> bool:
variable_diffvariable_neighbor_triplets = deque()
variables = constraint_problem.get_variables()
for var in variables:
for neighbor in constraint_problem.get_neighbors(var):
for different_variable in variables - {var, neighbor}:
variable_diffvariable_neighbor_triplets.append((var, different_variable, neighbor))
while variable_diffvariable_neighbor_triplets:
var, different_variable, neighbor = variable_diffvariable_neighbor_triplets.popleft()
if __revise3(constraint_problem, var, neighbor, different_variable):
for different_variable in variables - {var, neighbor}:
variable_diffvariable_neighbor_triplets.append((different_variable, var, neighbor))
variable_diffvariable_neighbor_triplets.append((different_variable, neighbor, var))
for var in variables:
if not var.domain or not constraint_problem.get_consistent_domain(var):
return False
return True
def __revise3(constraints_problem: ConstraintProblem, variable: Variable, neighbor: Variable,
different_variable: Variable) -> bool:
any_revised = False
for variable_value in variable.domain:
variable_was_assigned = False if variable.value is None else True
if not variable_was_assigned:
variable.assign(variable_value)
curr_revised = False
inconsistent_neighbor_values = list()
neighbor_was_assigned = False if neighbor.value is None else True
for neighbor_value in neighbor.domain:
if not neighbor_was_assigned:
neighbor.assign(neighbor_value)
if not constraints_problem.get_consistent_domain(different_variable):
inconsistent_neighbor_values.append(neighbor_value)
curr_revised, any_revised = True, True
if not neighbor_was_assigned:
neighbor.unassign()
if not variable_was_assigned:
variable.unassign()
if curr_revised:
variable.remove_from_domain(variable_value)
for value in inconsistent_neighbor_values:
neighbor.remove_from_domain(value)
return any_revised
|
101504
|
import dataloader as dl
# Path to the ANI-1x data set
path_to_h5file = '/home/jujuman/Scratch/Research/ANI-1x1ccx/FINAL_CLEANED_DATA/ani1x-20190925_rz.h5'
# List of keys to point to requested data
data_keys = ['<KEY>','<KEY>'] # Original ANI-1x data (https://doi.org/10.1063/1.5023802)
#data_keys = ['<KEY>','<KEY>'] # CHNO portion of the data set used in AIM-Net (https://doi.org/10.1126/sciadv.aav6490)
#data_keys = ['ccsd(t)_cbs.energy'] # The coupled cluster ANI-1ccx data set (https://doi.org/10.1038/s41467-019-10827-4)
#data_keys = ['<KEY>'] # A subset of this data was used for training the ACA charge model (https://doi.org/10.1021/acs.jpclett.8b01939)
# Example for extracting DFT/DZ energies and forces
for data in dl.iter_data_buckets(path_to_h5file,keys=data_keys):
X = data['coordinates']
Z = data['atomic_numbers']
E = data['wb97x_dz.energy']
F = data['wb97x_dz.forces']
|
101505
|
example_schema_array = {"type": "array", "items": {"type": "string"}}
example_array = ["string"]
example_schema_integer = {"type": "integer", "minimum": 3, "maximum": 5}
example_integer = 3
example_schema_number = {"type": "number", "minimum": 3, "maximum": 5}
example_number = 3.2
example_schema_object = {"type": "object", "properties": {"value": {"type": "integer"}}, "required": ["value"]}
example_object = {"value": 1}
example_schema_string = {"type": "string", "minLength": 3, "maxLength": 5}
example_string = "str"
example_response_types = [example_array, example_integer, example_number, example_object, example_string]
example_schema_types = [
example_schema_array,
example_schema_integer,
example_schema_number,
example_schema_object,
example_schema_string,
]
|
101521
|
import torch
import torch.nn as nn
import torchvision
class HopeNet(nn.Module):
# Hopenet with 3 output layers for yaw, pitch and roll
# Predicts Euler angles by binning and regression with the expected value
def __init__(self, block, layers, num_bins):
super(HopeNet, self).__init__()
if block == 'resnet':
block = torchvision.models.resnet.Bottleneck
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc_yaw = nn.Linear(512 * block.expansion, num_bins)
self.fc_pitch = nn.Linear(512 * block.expansion, num_bins)
self.fc_roll = nn.Linear(512 * block.expansion, num_bins)
self.idx_tensor = torch.arange(66).float()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
@staticmethod
def softmax_temperature(tensor, temperature):
result = torch.exp(tensor / temperature)
result = torch.div(result, torch.sum(result, 1).unsqueeze(1).expand_as(result))
return result
def bin2degree(self, predict):
predict = self.softmax_temperature(predict, 1)
return torch.sum(predict * self.idx_tensor.type_as(predict), 1) * 3 - 99
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
pre_yaw = self.fc_yaw(x)
pre_pitch = self.fc_pitch(x)
pre_roll = self.fc_roll(x)
yaw = self.bin2degree(pre_yaw)
pitch = self.bin2degree(pre_pitch)
roll = self.bin2degree(pre_roll)
return yaw, pitch, roll
|
101538
|
from requests import Request
from backend.database.objects import Game
from backend.blueprints.spa_api.service_layers.replay.json_tag import JsonTag
from backend.database.wrapper.tag_wrapper import TagWrapper
from tests.utils.location_utils import LOCAL_URL
from tests.utils.test_utils import check_array_equal
class TestReplayHistory:
def test_get_replays_no_params_fails(self, test_client):
r = Request('GET', LOCAL_URL + '/api/replay', params={})
response = test_client.send(r)
assert(response.status_code == 400)
def test_get_replays_not_logged_in_fails(self, test_client, mock_user):
mock_user.logout()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': ['one']})
response = test_client.send(r)
assert(response.status_code == 401)
def test_get_replays_none_in_server(self, test_client):
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 0
def test_get_all_replays(self, initialize_database_tags, test_client):
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == len(games)
def test_get_all_replays_with_player(self, initialize_database_tags, test_client):
query_player = ['76561197998150808']
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'player_ids': query_player})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
for players in replay['players']:
if players['id'] in query_player:
player_count.append(players['id'])
check_array_equal(player_count, query_player)
assert data['totalCount'] == len(data['replays']) == 22
def test_get_all_replays_with_players(self, initialize_database_tags, test_client):
query_player = ['76561197998150808', '76561198041178440']
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'player_ids': query_player})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
for players in replay['players']:
if players['id'] in query_player:
player_count.append(players['id'])
check_array_equal(player_count, query_player)
assert data['totalCount'] == len(data['replays']) == 5
def test_get_all_replays_with_date_before(self, initialize_database_tags, test_client):
# before '2018-09-30T00:25:29'
# '2018-09-30T23:28:39'
timestamp = 1538303129
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'date_before': timestamp})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
assert data['totalCount'] == len(data['replays']) == 13
def test_get_all_replays_with_date_after(self, initialize_database_tags, test_client):
timestamp = 1538303129
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'date_after': timestamp})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
assert data['totalCount'] == len(data['replays']) == 13
def test_get_all_replays_with_date_range(self, initialize_database_tags, test_client):
# before '2018-09-30T00:25:29'
# '2018-09-30T23:28:39'
timestamp_before = 1538784000
timestamp_after = 1538303129
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'date_before': timestamp_before,
'date_after': timestamp_after})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
assert data['totalCount'] == len(data['replays']) == 11
def test_get_all_replays_with_team_size(self, initialize_database_tags, test_client):
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'team_size': 2})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 3
def test_get_all_replays_with_tags(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
tag_name = tags[0][0]
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': tag_name})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
assert replay['id'] in tagged_games[tag_name]
assert data['totalCount'] == len(data['replays']) == 5
def test_get_all_replays_with_tags_do_union(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[-1][0], tags[-2][0]]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] != len(data['replays']) == 5
def test_get_all_replays_with_tags_inside(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[0][0], tags[1][0]]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] != len(data['replays']) == 5
def test_get_all_replays_with_tags_no_overlap(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[0][0], tags[3][0]]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 9
def test_get_all_replays_with_tags_private_id(self, initialize_database_tags, test_client, mock_user):
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
encoded_key_0 = JsonTag.get_encoded_private_key(tags[0][0], session=session)
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'private_tag_keys': [encoded_key_0]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 5
def test_get_all_replays_with_tags_private_id_and_name(self, initialize_database_tags, test_client, mock_user):
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
encoded_key_0 = JsonTag.get_encoded_private_key(tags[0][0], session=session)
encoded_key_2 = JsonTag.get_encoded_private_key(tags[2][0], session=session)
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[3][0]],
'private_tag_keys': [encoded_key_0, encoded_key_2]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] != len(data['replays']) == 10
def test_get_all_replays_with_tags_invalid_private_id(self, initialize_database_tags, test_client, mock_user):
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tag_id = TagWrapper.get_tag_by_name(session, mock_user.get_user().platformid, tags[0][0]).id
invalid_private_id = JsonTag.encode_tag(tag_id, 'invalid_key')
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'private_tag_keys': [invalid_private_id]})
response = test_client.send(r)
assert(response.status_code == 400)
|
101540
|
from threading import Thread
from time import sleep
from PythonCard import model
import urllib
import re
def get_quote(symbol):
global quote
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('class="pr".*?>(.*?)<', content)
if m:
quote = m.group(1)
else:
quote = 'N/A'
return quote
def get_change(symbol):
global change
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('class="chg".*?>(.*?)<', content)
if m:
change = m.group(1)
else:
change = 'N/A'
return change
def get_open(symbol):
global opens
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?op".*?>(.*?)<', content)
if m:
opens = m.group(1)
else:
opens = 'N/A'
return opens
def get_high(symbol):
global high
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?hi".*?>(.*?)<', content)
if m:
high = m.group(1)
else:
high = 'N/A'
return high
def get_high52(symbol):
global high52
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?hi52".*?>(.*?)<', content)
if m:
high52 = m.group(1)
else:
high52 = 'N/A'
return high52
def get_low(symbol):
global low
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?lo".*?>(.*?)<', content)
if m:
low = m.group(1)
else:
low = 'N/A'
return low
def get_vol(symbol):
global vol
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?vo".*?>(.*?)<', content)
if m:
vol = m.group(1)
else:
vol = 'N/A'
return vol
def get_mc(symbol):
global mc
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?mc".*?>(.*?)<', content)
if m:
mc = m.group(1)
else:
mc = 'N/A'
return mc
def get_lo52(symbol):
global lo52
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?lo52".*?>(.*?)<', content)
if m:
lo52 = m.group(1)
else:
lo52 = 'N/A'
return lo52
def get_pe(symbol):
global pe
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?lo52".*?>(.*?)<', content)
if m:
pe = m.group(1)
else:
pe = 'N/A'
return pe
def get_beta(symbol):
global beta
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?beta".*?>(.*?)<', content)
if m:
beta = m.group(1)
else:
beta = 'N/A'
return beta
def get_div(symbol):
global div
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?div".*?>(.*?)<', content)
if m:
div = m.group(1)
else:
div = 'N/A'
return div
def get_yield(symbol):
global yield1
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?yield".*?>(.*?)<', content)
if m:
yield1 = m.group(1)
else:
yield1 = N/A
return yield1
def get_shares(symbol):
global shares
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?shares".*?>(.*?)<', content)
if m:
shares = m.group(1)
else:
shares = N/A
return shares
def get_own(symbol):
global own
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?own".*?>(.*?)<', content)
if m:
own = m.group(1)
else:
own = N/A
return own
class stock(model.Background):
def on_getQuote_mouseClick(self, event):
global symbol
symbol = self.components.quotese.text
class Repeater(Thread):
def __init__(self,interval,fun,*args,**kw):
Thread.__init__(self)
self.interval=interval
self.fun=fun
self.args=args
self.kw=kw
self.keep_going=True
def run(self):
while(self.keep_going):
sleep(self.interval)
self.fun(*self.args,**self.kw)
def Refresh(*a):
get_quote(symbol)
get_change(symbol)
self.components.current.text = quote
self.components.change.text = change
r=Repeater(1.0, Refresh)
r.start()
def on_stockinfo_mouseClick(self, event):
global symbol
symbol = self.components.quotese.text
get_open(symbol)
get_high(symbol)
get_high52(symbol)
get_low(symbol)
get_vol(symbol)
get_mc(symbol)
get_lo52(symbol)
get_pe(symbol)
get_beta(symbol)
get_div(symbol)
get_yield(symbol)
get_shares(symbol)
get_own(symbol)
self.components.inst.text = own
self.components.shares.text = shares
self.components.yield1.text = yield1
self.components.div.text = div
self.components.beta.text = beta
self.components.pe.text = pe
self.components.lo52.text = lo52
self.components.mkt.text = mc
self.components.vol.text = vol
self.components.opens.text = opens
self.components.high.text = high
self.components.hi52.text = high52
self.components.low.text = low
def on_save_mouseClick(self, event):
stock1 = open('stock1.txt', 'w')
stock1.write(self.components.stock1.text)
stock1.close()
stock2 = open('stock2.txt', 'w')
stock2.write(self.components.stock2.text)
stock2.close()
stock3 = open('stock3.txt', 'w')
stock3.write(self.components.stock3.text)
stock3.close()
stock4 = open('stock4.txt', 'w')
stock4.write(self.components.stock4.text)
stock4.close()
def on_load_mouseClick(self, event):
load1 = open('stock1.txt' , 'r').read()
self.components.stock1.text = load1
load2 = open('stock2.txt' , 'r').read()
self.components.stock2.text = load2
load3 = open('stock3.txt' , 'r').read()
self.components.stock3.text = load3
load4 = open('stock4.txt' , 'r').read()
self.components.stock4.text = load4
def on_update_mouseClick(self, event):
symbol = self.components.stock1.text
get_quote(symbol)
self.components.change1.text = quote
symbol = self.components.stock2.text
get_quote(symbol)
self.components.change2.text = quote
symbol = self.components.stock3.text
get_quote(symbol)
self.components.change3.text = quote
symbol = self.components.stock4.text
get_quote(symbol)
self.components.change4.text = quote
def on_clear_mouseClick(self, event):
self.components.stock1.text = ""
self.components.stock2.text = ""
self.components.stock3.text = ""
self.components.stock4.text = ""
self.components.change1.text = ""
self.components.change2.text = ""
self.components.change3.text = ""
self.components.change4.text = ""
if __name__ == '__main__':
app = model.Application(stock)
app.MainLoop()
_______________________________________________________________________________
save following as stock.rsrc.py
{'application':{'type':'Application',
'name':'Template',
'backgrounds': [
{'type':'Background',
'name':'bgTemplate',
'title':'Standard Template with File->Exit menu',
'size':(711, 634),
'style':['resizeable'],
'menubar': {'type':'MenuBar',
'menus': [
{'type':'Menu',
'name':'menuFile',
'label':'&File',
'items': [
{'type':'MenuItem',
'name':'save',
'label':u'Save',
},
{'type':'MenuItem',
'name':'menuFileExit',
'label':'E&xit',
'command':'exit',
},
]
},
]
},
'components': [
{'type':'Button',
'name':'clear',
'position':(333, 555),
'label':u'Clear',
},
{'type':'Button',
'name':'save',
'position':(523, 444),
'size':(117, -1),
'label':u'Save',
},
{'type':'StaticText',
'name':'statictext10',
'position':(40, 510),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Update to show price',
},
{'type':'Button',
'name':'load',
'position':(523, 420),
'size':(117, -1),
'label':u'Load',
},
{'type':'StaticText',
'name':'StaticText5',
'position':(26, 488),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Or load to load previous',
},
{'type':'Button',
'name':'update',
'position':(522, 470),
'size':(120, 80),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 11},
'label':u'Update',
},
{'type':'StaticText',
'name':'StaticText4',
'position':(31, 465),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Then click save to save',
},
{'type':'StaticText',
'name':'StaticText3',
'position':(10, 445),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Enter name of stocks to monitor',
},
{'type':'StaticBox',
'name':'StaticBox1',
'position':(343, 109),
'size':(349, 242),
},
{'type':'StaticLine',
'name':'StaticLine1',
'position':(2, 400),
'size':(697, -1),
'layout':'horizontal',
},
{'type':'StaticText',
'name':'StaticText2',
'position':(395, 414),
'text':u'Current Price',
},
{'type':'StaticText',
'name':'StaticText1',
'position':(268, 414),
'text':u'Name Of Stock',
},
{'type':'TextField',
'name':'change4',
'position':(378, 530),
'editable':False,
},
{'type':'TextField',
'name':'stock4',
'position':(258, 530),
},
{'type':'TextField',
'name':'change3',
'position':(378, 500),
'editable':False,
},
{'type':'TextField',
'name':'stock3',
'position':(258, 500),
},
{'type':'TextField',
'name':'change2',
'position':(378, 471),
'editable':False,
},
{'type':'TextField',
'name':'stock2',
'position':(258, 470),
},
{'type':'TextField',
'name':'change1',
'position':(378, 440),
'editable':False,
},
{'type':'TextField',
'name':'stock1',
'position':(258, 440),
},
{'type':'Button',
'name':'stockinfo',
'position':(170, 92),
'label':u'Get Info',
},
{'type':'HtmlWindow',
'name':'HtmlWindow1',
'position':(348, 120),
'size':(339, 225),
'backgroundColor':(255, 255, 255, 255),
'text':u'tickers.html',
},
{'type':'StaticText',
'name':'stockCheckversion',
'position':(213, 0),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 36},
'foregroundColor':(255, 128, 0, 255),
'text':u'Stock Check V2.0',
},
{'type':'StaticText',
'name':'Changelbl',
'position':(14, 84),
'foregroundColor':(128, 0, 0, 255),
'text':u'Change',
},
{'type':'TextField',
'name':'change',
'position':(53, 74),
'size':(-1, 21),
'border':'none',
'editable':False,
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 10},
'foregroundColor':(128, 0, 0, 255),
},
{'type':'TextField',
'name':'current',
'position':(12, 33),
'size':(194, 33),
'border':'none',
'editable':False,
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 24},
'foregroundColor':(0, 128, 0, 255),
},
{'type':'StaticText',
'name':'Currentlbl',
'position':(81, 10),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 18},
'foregroundColor':(0, 128, 0, 255),
'text':u'Current',
},
{'type':'StaticText',
'name':'wkLowlbl',
'position':(8, 364),
'text':u'52Wk Low',
},
{'type':'StaticText',
'name':'instOwnlbl',
'position':(183, 325),
'text':u'Inst. Own',
},
{'type':'StaticText',
'name':'Shareslbl',
'position':(186, 284),
'text':u'Shares',
},
{'type':'StaticText',
'name':'PElbl',
'position':(193, 124),
'text':u'P/E',
},
{'type':'StaticText',
'name':'Openlbl',
'position':(12, 124),
'text':u'Open',
},
{'type':'StaticText',
'name':'Highlbl',
'position':(16, 164),
'text':u'High',
},
{'type':'TextField',
'name':'pe',
'position':(260, 120),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'opens',
'position':(80, 120),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'high',
'position':(80, 160),
'size':(80, -1),
'editable':False,
},
{'type':'TextField',
'name':'inst',
'position':(260, 320),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'shares',
'position':(260, 280),
'size':(81, -1),
'editable':False,
},
{'type':'StaticText',
'name':'Yieldlbl',
'position':(191, 244),
'text':u'Yield',
},
{'type':'StaticText',
'name':'Dividendlbl',
'position':(183, 163),
'text':u'Dividend',
},
{'type':'TextField',
'name':'lo52',
'position':(80, 360),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'yield1',
'position':(260, 240),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'div',
'position':(260, 160),
'size':(81, -1),
'editable':False,
},
{'type':'StaticText',
'name':'Betalbl',
'position':(193, 204),
'text':u'Beta',
},
{'type':'TextField',
'name':'beta',
'position':(260, 200),
'size':(81, -1),
'editable':False,
},
{'type':'StaticText',
'name':'wkHighlbl',
'position':(6, 323),
'text':u'52Wk High',
},
{'type':'TextField',
'name':'hi52',
'position':(80, 320),
'size':(80, -1),
'editable':False,
},
{'type':'StaticText',
'name':'mktCaplbl',
'position':(11, 283),
'size':(-1, 16),
'text':u'Mkt Cap',
},
{'type':'TextField',
'name':'mkt',
'position':(80, 280),
'size':(80, -1),
'editable':False,
},
{'type':'StaticText',
'name':'Vollbl',
'position':(19, 244),
'text':u'Vol',
},
{'type':'StaticText',
'name':'Lowlbl',
'position':(16, 204),
'text':u'Low',
},
{'type':'TextField',
'name':'vol',
'position':(80, 240),
'size':(80, -1),
'editable':False,
},
{'type':'TextField',
'name':'low',
'position':(80, 200),
'size':(80, -1),
'editable':False,
},
{'type':'Button',
'name':'getQuote',
'position':(313, 63),
'label':u'Get Quote',
},
{'type':'TextField',
'name':'quotese',
'position':(209, 64),
},
] # end components
} # end background
] # end backgrounds
} }
|
101541
|
from __future__ import annotations
import yaml
from importlib.resources import read_text
from ...config.junos import JunosMetricConfiguration
from ...devices import junosdevice
from ...utitlities import create_list_from_dict
from .. import junos
from ..base import Collector
from . import base
class BGPCollector(Collector):
default = yaml.load(read_text(junos, "bgp.yaml"), Loader=yaml.SafeLoader)
name = "bgp"
base_name = "{0}_{1}".format(base, name)
def __init__(
self, device: junosdevice.JuniperNetworkDevice, config_path: str = None
) -> "BGPCollector":
config = self.default
if config_path is not None:
with open(config_path, "r") as file:
config = yaml.load(file, Loader=yaml.SafeLoader)
super(BGPCollector, self).__init__(self.base_name, device, config)
self._init_prometheus_metrics(metric_configuration=JunosMetricConfiguration)
def collect(self):
bgp = self.device.get_bgp()
if bgp:
bgp_list = create_list_from_dict(bgp, "peeraddr")
for prometheus in self.prometheus_metrics.values():
for interface in bgp_list:
prometheus.metric.add_metric(
labels=self.get_labels(interface),
value=prometheus.function(interface.get(prometheus.json_key)),
)
yield prometheus.metric
prometheus.flush()
|
101558
|
import unittest
import transaction
from pyramid import testing
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import StatementReference, Statement
from dbas.tests.utils import construct_dummy_request
from dbas.views import set_references, get_reference
class AjaxReferencesTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.config.include('pyramid_chameleon')
self.config.testing_securitypolicy(userid='Tobias', permissive=True)
# test every ajax method, which is not used in other classes
def test_get_references_empty(self):
request = construct_dummy_request(json_body={
'uids': [14],
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
for uid in response['data']:
self.assertTrue(len(response['data'][uid]) == 0)
self.assertTrue(len(response['text'][uid]) != 0)
def test_get_references(self):
request = construct_dummy_request(json_body={
'uids': [15],
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
for uid in response['data']:
self.assertTrue(len(response['data'][uid]) != 0)
self.assertTrue(len(response['text'][uid]) != 0)
def test_get_references_failure(self):
request = construct_dummy_request(json_body={
'uids': 'ab',
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
self.assertEqual(400, response.status_code)
def test_set_references(self):
self.config.testing_securitypolicy(userid='Tobias', permissive=True)
statement: Statement = DBDiscussionSession.query(Statement).get(17)
request = construct_dummy_request(json_body={
'statement_id': statement.uid,
'issue': statement.issues[0].uid,
'text': 'This is a source',
'ref_source': 'http://www.google.de/some_source',
})
self.assertTrue(set_references(request))
request = construct_dummy_request(json_body={
'uids': [17],
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
for uid in response['data']:
self.assertTrue(17, uid)
self.assertTrue(len(response['data'][uid]) != 0)
self.assertTrue(len(response['text'][uid]) != 0)
DBDiscussionSession.query(StatementReference).filter_by(statement_uid=17).delete()
transaction.commit()
|
101574
|
import pytest
from tagbot.web import app
@pytest.fixture
def client():
with app.test_client() as client:
yield client
@app.route("/die")
def die():
raise Exception("!")
|
101588
|
import gym
import numpy as np
import tensorflow as tf
from tensorflow import saved_model as sm
from easy_rl.utils.window_stat import WindowStat
from easy_rl.utils.gym_wrapper.atari_wrapper import make_atari, wrap_deepmind
import time
def main():
gym_env = gym.make("CartPole-v0")
atari_env = make_atari("PongNoFrameskip-v4")
atari_env = wrap_deepmind(
env=atari_env,
frame_stack=True,
clip_rewards=False,
episode_life=True,
wrap_frame=True,
frame_resize=42)
# replace the following env according to your saved_model
# env = atari_env
env = gym_env
with tf.Session() as sess:
path = 'dump_dir'
MetaGraphDef = tf.saved_model.loader.load(
sess, tags=[sm.tag_constants.SERVING], export_dir=path)
# get SignatureDef protobuf
SignatureDef_d = MetaGraphDef.signature_def
SignatureDef = SignatureDef_d["predict_results"]
# get inputs/outputs TensorInfo protobuf
ph_inputs = {}
for name, ts_info in SignatureDef.inputs.items():
ph_inputs[name] = sm.utils.get_tensor_from_tensor_info(
ts_info, sess.graph)
outputs = {}
for name, ts_info in SignatureDef.outputs.items():
outputs[name] = sm.utils.get_tensor_from_tensor_info(
ts_info, sess.graph)
for name, ph in ph_inputs.items():
print(name, ph)
for name, ts in outputs.items():
print(name, ts)
len_window = WindowStat("length", 50)
reward_window = WindowStat("reward", 50)
for i in range(100):
ob = env.reset()
env.render()
time.sleep(0.2)
done = False
episode_len = 0
episode_reward = .0
while not done:
action = sess.run(
outputs["output_actions"],
feed_dict={
ph_inputs["obs_ph"]: [np.asarray(ob)],
ph_inputs["deterministic_ph"]: True
})
next_ob, reward, done, info = env.step(action[0])
env.render()
time.sleep(0.1)
episode_reward += reward
episode_len += 1
ob = next_ob
len_window.push(episode_len)
reward_window.push(episode_reward)
print(reward_window)
print(len_window)
if __name__ == '__main__':
main()
|
101633
|
import glob
import os
import numpy as np
import scipy
from scipy import signal
import fabio
import matplotlib.pyplot as plt
import center_approx
import integration
import peakfindingrem
import peakfinding
import scipy.optimize as optimize
import scipy.stats
from scipy import interpolate
from lmfit import minimize, Parameters
import cv2
from scipy.ndimage import filters
from scipy.fftpack import rfft, irfft
from skimage.restoration import denoise_bilateral
demo = True
# def find_arcs(img, cen):
# img = img * (img > 1)
# img = np.log(img + 1) / np.log(np.max(img)) * 255
# img = np.array(img, dtype=np.uint8)
# img = img * Pilatus2M_Mask()
#
# plt.imshow(img)
# plt.show()
# # plt.imshow(img)
# # plt.show()
# # search theta
# Nradial = 25
#
# arclist = []
#
# for Theta in np.linspace(-180.0, 0.0, Nradial, endpoint=False):
# mask = arcmask(img, cen, [0, img.shape[0]], [Theta, Theta + 180.0 / Nradial])
#
# if demo:
# plt.imshow(img * mask)
# plt.show()
#
# thetaprofile = radial_integrate(mask * img, cen)
# # print thetaprofile
#
# peakRs = findpeaks(thetaprofile)
#
# print peakRs
#
# if demo:
# fig = plt.figure()
#
# h = fig.add_subplot(211)
# plt.plot(thetaprofile)
# plt.plot(peakRs, thetaprofile[peakRs], 'r*')
# limits = h.axis()
# h = fig.add_subplot(212)
# plt.imshow(signal.cwt(thetaprofile, signal.ricker, np.arange(5, 100)))
# cwtlimits = h.axis()
# h.axis([limits[0], limits[1], cwtlimits[2], cwtlimits[3], ])
# fig.tight_layout()
# plt.show()
#
# for peakR in peakRs:
# Rwidth = 20
# mask = arcmask(img, cen, (-0.5 * Rwidth + peakR, 0.5 * Rwidth + peakR), (-180.0, 0.0))
# if demo:
# plt.imshow(img * mask)
# plt.show()
#
# Rprofile = alt_integrate(img * mask, cen)
#
# Nangle = 50.0
# peakThetas = np.array(findpeaks(Rprofile))
#
# if demo:
# plt.plot(np.linspace(0, np.pi * 2, Rprofile.__len__()), Rprofile)
# plt.plot(peakThetas / Nangle, Rprofile[peakThetas], 'r*')
# plt.show()
#
# peakThetas = peakThetas / Nangle
# print 'Here', peakThetas
#
# #add peak to arclist
# for peakTheta in peakThetas:
# arclist.append([peakR * np.cos(peakTheta), peakR * np.sin(peakTheta)])
#
# arclist = np.array(arclist)
# plt.imshow(img)
# plt.plot(cen[0] - arclist[:, 0], cen[1] - arclist[:, 1], 'r*')
# plt.show()
def findpeaks(Y):
# Find peaks using continuous wavelet transform; parameter is the range of peak widths (may need tuning)
# plt.plot(Y)
# plt.show()
Y = np.nan_to_num(Y)
peakindices = scipy.signal.find_peaks_cwt(Y, np.arange(20, 100), noise_perc=5, min_length=10)
# peakindices=peakutils.indexes(Y, thres=0.6,min_dist=20)
return peakindices
# def radial_integrate(img, cen):
# # Radial integration
# y, x = np.indices(img.shape)
# r = np.sqrt((x - cen[0]) ** 2 + (y - cen[1]) ** 2)
# r = np.rint(r).astype(np.int)
#
# tbin = np.bincount(r.ravel(), img.ravel())
#
# nr = np.bincount(r.ravel(), (img > 0).ravel())
# radialprofile = tbin / nr
#
# return radialprofile
# def alt_integrate(img, cen):
# # Radial integration
# N = 50
# y, x = np.indices((img.shape))
# r = (np.arctan2(y - cen[1], x - cen[0]) + np.pi) * N
# r = np.rint(r).astype(np.int)
# tbin = np.bincount(r.ravel(), img.ravel())
# nr = np.bincount(r.ravel(), (img > 0).ravel())
# radialprofile = tbin / nr
#
# return radialprofile
# def Pilatus2M_Mask():
# row = 1679
# col = 1475
# mask = np.zeros((row, col))
#
# row_start = 196
# row_gap_size = 16
# row_num_gaps = 7
#
# col_start = 488
# col_gap_size = 6
# col_num_gaps = 2
#
# start = row_start
# for i in range(1, row_num_gaps + 1):
# mask[start:start + row_gap_size, :] = 1
# start = start + row_gap_size + row_start
# start = col_start
# for j in range(1, col_num_gaps + 1):
# mask[:, start:start + col_gap_size] = 1
# start = start + col_gap_size + col_start
#
# return 1 - mask
# def arcmask(img, cen, Rrange, Thetarange):
# mask = np.zeros_like(img)
# #print cen, Rrange,Thetarange
# if min(Rrange)==0:
# cv2.ellipse(mask, (int(cen[0]),int(cen[1])), (int(max(Rrange)),int(max(Rrange))), 0, min(Thetarange), int(max(Thetarange)), 255, -1)
# else:
# cv2.ellipse(mask, (int(cen[0]),int(cen[1])), (int(min(Rrange)),int(min(Rrange))), 0, min(Thetarange), int(max(Thetarange)), 255, int(max(Rrange)-min(Rrange)))
# #cv2.ellipse(mask,(256,256),(100,50),0,0,180,255,-1)
#
# plt.imshow(mask)
# plt.show()
# return mask/255
def arcmask(img, cen, Rrange, Thetarange):
y, x = np.indices((img.shape))
r = np.sqrt((x - cen[0]) ** 2 + (y - cen[1]) ** 2)
theta = np.arctan2(y - cen[1], x - cen[0]) / (2 * np.pi) * 360.0
mask = ((min(Rrange) < r) & (r < max(Rrange)) & (min(Thetarange) < theta) & (theta < max(Thetarange)))
#plt.imshow(mask)
#plt.show()
return mask
def scanforarcs(radialprofile, cen):
# h = 35
# radialprofile=signal.convolve(radialprofile,signal.gaussian(h, std=8))
# test = np.max(radialprofile) / h
#print 't', test
peakmax, peakmin = peakfindingrem.peakdet(range(len(radialprofile)), radialprofile, 10)
peakind = peakmax[:, 0]
# for i in range(np.size(peakind)):
# plt.axvline(peakind[i],color='b')
# plt.plot(radialprofile)
# plt.show()
# accurancy = 50
# x = np.zeros((np.size(peakind), accurancy))
#y = np.zeros((np.size(peakind), accurancy))
#xinf = np.zeros((np.size(peakind), accurancy))
#yinf = np.zeros((np.size(peakind), accurancy))
#xsup = np.zeros((np.size(peakind), accurancy))
#ysup = np.zeros((np.size(peakind), accurancy))
# for i in range(0, np.size(peakind)):
#Delta = peakind[i] / 10
#theta = np.linspace(0, 2 * np.pi, accurancy)
#x[i] = cen[0] + (peakind[i]) * np.cos(theta)
#y[i] = cen[1] + (peakind[i]) * np.sin(theta)
#xinf[i] = cen[0] + (peakind[i] - Delta) * np.cos(theta)
#xsup[i] = cen[0] + (peakind[i] + Delta) * np.cos(theta)
#yinf[i] = cen[1] + (peakind[i] - Delta) * np.sin(theta)
#ysup[i] = cen[1] + (peakind[i] + Delta) * np.sin(theta)
return peakind
def mirroredgaussian(theta, a, b, c, d):
val = (gaussian(theta, a, b, c, d) + gaussian(2 * np.pi - theta, a, b, c, d)) / 2.
return val
def gaussian(x, a, b, c, d):
val = abs(a) * np.exp(-(x - b) ** 2. / c ** 2.) + abs(d)
return val
def vonmises(x, A, mu, kappa):
return A * scipy.stats.vonmises.pdf(2 * (x - mu), kappa)
def mirroredvonmises(x, A, mu, kappa, floor):
return A * (scipy.stats.vonmises.pdf(2 * (mu - x), kappa) + scipy.stats.vonmises.pdf(2 * (mu - x),
kappa)) / 2 + floor # 2*(mu-(np.pi-x))
tworoot2ln2 = 2. * np.sqrt(2. * np.log(2.))
def residual(params, x, data):
A = params['A'].value
mu = params['mu'].value
kappa = params['kappa'].value
floor = params['floor'].value
model = mirroredvonmises(x, A, mu, kappa, floor)
return data - model
def gaussianresidual(params, x, data, sig=1):
A = params['A'].value
mu = params['mu'].value
sigma = params['sigma'].value
floor = params['floor'].value
model = gaussian(x, A, mu, sigma, floor)
resids = data - model
# print resids
weighted = np.sqrt(resids ** 2 / sig ** 2)
return weighted
def findgisaxsarcs(img, cen, experiment):
radialprofile = integration.pixel_2Dintegrate(img, (cen[1], cen[0]), experiment.mask)
# arcs = scanforarcs(radialprofile, cen)
arcs = peakfinding.findpeaks(None, radialprofile, (100, 50), gaussianwidthsigma=3, minimumsigma=100)
# print arcs
plt.plot(radialprofile)
plt.plot(arcs[0], arcs[1], 'ok')
#plt.show()
arcs = arcs[0]
output = []
_, unique = np.unique(arcs, return_index=True)
for qmu in arcs[unique]:
chiprofile = np.nan_to_num(integration.chi_2Dintegrate(img, (cen[1], cen[0]), qmu, mask=experiment.mask))
plt.plot(np.arange(0, np.pi, 1 / 30.), chiprofile, 'r')
# filter out missing chi
missingpointfloor = np.percentile(chiprofile, 15)
badpoints = np.where(chiprofile < missingpointfloor)[0]
goodpoints = np.where(chiprofile >= missingpointfloor)[0]
chiprofile[badpoints] = np.interp(badpoints, goodpoints, chiprofile[goodpoints])
plt.plot(np.arange(0, np.pi, 1 / 30.), chiprofile, 'k')
# f=rfft(chiprofile)
# plt.plot(f)
# f[-20:]=0
# chiprofile=irfft(chiprofile)
try:
params = Parameters()
params.add('A', value=np.max(chiprofile), min=0)
params.add('mu', value=np.pi / 2, min=0, max=np.pi)
params.add('kappa', value=0.1, min=0)
params.add('floor', value=0.1, min=0)
x = np.arange(0, np.pi, 1 / 30.)
out = minimize(residual, params, args=(x, chiprofile))
print params
# print params['A'].stderr
# popt, pcov = optimize.curve_fit(vonmises, np.arange(0, np.pi, 1 / 30.), np.nan_to_num(chiprofile),
#
# print(popt)
except RuntimeError:
print('Fit failed at ' + qmu)
continue
if params['kappa'].stderr > 100 or params['A'].stderr > 100:
isring = True
else:
isring = False
popt = [params['A'].value, params['mu'].value, params['kappa'].value, params['floor'].value]
A, chimu, kappa, baseline = popt
FWHM = np.arccos(np.log(.5 * np.exp(kappa) + .5 * np.exp(-kappa)) / kappa)
output.append([qmu, A, chimu, FWHM, baseline, isring])
# plt.plot(np.arange(0, np.pi, 1 / 30.), chiprofile)
# plt.plot(np.arange(0, np.pi, 1 / 30.), mirroredvonmises(np.arange(0, np.pi, 1 / 30.), *popt))
# plt.show()
return output
def inpaint(img,mask):
filled = None
if False:
img = img / (2 ^ 16 - 1) * 255
plt.imshow(img.astype(np.uint8))
plt.show()
plt.imshow(mask) #TODO: check that mask corners are correct
plt.show()
kernel = np.ones((3, 3),np.uint8)
mask = cv2.dilate(mask.astype(np.uint8), kernel, iterations=1)
filled = cv2.inpaint(img.astype(np.uint8), mask.astype(np.uint8), 3, cv2.INPAINT_TELEA)
plt.imshow(img)
plt.show()
return
elif True:
valid = ~mask.astype(np.bool)
coords = np.array(np.nonzero(valid)).T
values = img[valid]
it = interpolate.LinearNDInterpolator(coords, values)
filled = it(list(np.ndindex(img.shape))).reshape(img.shape)
plt.imshow(np.rot90(filled))
plt.show()
return filled
def findmaxs(orig):
img = orig.copy()
img = filters.gaussian_filter(img, 3)
img = filters.minimum_filter(img, 4)
img = filters.median_filter(img, 4)
img -= np.min(img)
img = denoise_bilateral(img, sigma_range=0.5, sigma_spatial=15)
# plt.imshow(np.rot90(img))
#plt.show()
#img = filters.percentile_filter(img,50,50)
maxima = ((img == filters.maximum_filter(img, (10, 10))) & (
filters.maximum_filter(img, (50, 50)) > 1.5 * filters.minimum_filter(img, (50, 50))) & (img > 2))
maximachis, maximaqs = np.where(maxima == 1)
plt.imshow(np.rot90(orig), interpolation='nearest')
plt.plot(maximachis, 1000 - maximaqs, 'o', markersize=10, markeredgecolor='red', markerfacecolor="None", mew="4")
plt.ylim([1000, 0])
plt.xlim([0, 1000])
plt.show()
return maximachis, maximaqs
def fitarc(chiprofile):
try:
params = Parameters()
params.add('A', value=np.max(chiprofile), min=0)
params.add('mu', value=np.pi / 2, min=0, max=np.pi)
params.add('kappa', value=0.1, min=0)
params.add('floor', value=0.1, min=0)
x = np.arange(0, np.pi, 1 / 30.)
out = minimize(residual, params, args=(x, chiprofile))
print params
# print params['A'].stderr
# popt, pcov = optimize.curve_fit(vonmises, np.arange(0, np.pi, 1 / 30.), np.nan_to_num(chiprofile),
# p0=[np.max(np.nan_to_num(chiprofile)), np.pi / 2, .1, 0])
# print(popt)
except RuntimeError:
print('Fit failed.')
if params['kappa'].stderr > 100 or params['A'].stderr > 100:
isring = True
else:
isring = False
popt = [params['A'].value, params['mu'].value, params['kappa'].value, params['floor'].value]
A, chimu, kappa, baseline = popt
FWHM = np.arccos(np.log(.5 * np.exp(kappa) + .5 * np.exp(-kappa)) / kappa)
return A, chimu, FWHM, baseline, isring
def fitarcgaussian(chiprofile, chi):
try:
params = Parameters()
x = np.arange(np.size(chiprofile))
roi = np.ones_like(chiprofile)
roi[chi - 30:chi + 30] = .0001
# roi/=1000
# plt.plot(chiprofile,'')
#plt.plot(roi * np.max(chiprofile * roi), 'g')
#plt.plot(roi*chiprofile,'k')
params.add('A', value=np.max(chiprofile * (1 - roi)), min=0)
params.add('mu', value=chi, min=0, max=len(chiprofile))
params.add('sigma', value=20, min=0)
params.add('floor', value=0.1, min=0)
out = minimize(gaussianresidual, params, args=(x, chiprofile, roi), method='nelder')
#print params
# print params['A'].stderr
# popt, pcov = optimize.curve_fit(vonmises, np.arange(0, np.pi, 1 / 30.), np.nan_to_num(chiprofile),
# p0=[np.max(np.nan_to_num(chiprofile)), np.pi / 2, .1, 0])
# print(popt)
except RuntimeError:
print('Fit failed.')
if params['sigma'].stderr > 100 or params['A'].stderr > 100:
isring = False #True
else:
isring = False
popt = [params['A'].value, params['mu'].value, params['sigma'].value, params['floor'].value]
#plt.plot(x, gaussian(x, *popt), 'r')
# plt.show()
# A, chimu, sigma, baseline = popt
# FWHM = sigma * tworoot2ln2
return popt
def findgisaxsarcs2(img, experiment):
img = img.T.copy()
cake, _, _ = integration.cake(img, experiment, mask=experiment.mask) # TODO: refactor these parameters and check .T
maskcake, _, _ = integration.cake(experiment.mask.T, experiment)
from fabio import edfimage
fabimg = edfimage.edfimage(cake)
filename = 'cake.edf'
fabimg.write(filename)
fabimg = edfimage.edfimage(maskcake)
filename = 'cake_MASK.edf'
fabimg.write(filename)
img = inpaint(cake, maskcake)
fabimg = edfimage.edfimage(img)
filename = 'cake_LINEAR_INFILL.edf'
fabimg.write(filename)
maxchis, maxqs = findmaxs(img)
out =[]
for chi, q in zip(maxchis, maxqs):
# roi=np.ones_like(img)
#roi[chi - 10:chi + 10, q - 5:q + 5]=10
#roi=np.sum(roi,axis=1)
slice = img[:, q - 5:q + 5]
if np.max(slice) / np.min(slice) < 2:
pass # continue
chiprofile = np.sum(slice, axis=1)
x = np.arange(np.size(chiprofile))
#plt.plot(chiprofile)
params = fitarcgaussian(chiprofile, chi)
if params['mu'] > chi + 5 or params['mu'] < chi - 5:
continue
params.add('q', value=q)
out.append(params)
#plt.show()
# plt.imshow(np.log(img))
#plt.show()
return out
if __name__ == "__main__":
import xicam.config
experiment = xicam.config.experiment()
experiment.setvalue('Detector', 'pilatus2m')
experiment.setvalue('Pixel Size X',172e-6)
experiment.setvalue('Pixel Size Y', 172e-6)
experiment.mask = experiment.getDetector().calc_mask()
for imgpath in glob.glob(os.path.join("../GISAXS samples/", '*.edf')):
print "Opening", imgpath
# read image
img = fabio.open(imgpath).data
# find center
# cen = center_approx.center_approx(img)
cen = center_approx.gisaxs_center_approx(img)
experiment.setcenter(cen)
arcs = findgisaxsarcs2(img, experiment)
# print cen
# print arcs
ax = plt.gca()
plt.axvline(cen[0], color='r')
plt.axhline(cen[1], color='r')
plt.imshow(np.log(img))
from matplotlib.patches import Arc
qratio =1.78
for arc in arcs:
print arc
if not np.isnan(arc['sigma'].value):
if False:
arcartist = [Arc(xy=cen, width=arc['q'] * 2, height=arc['q'] * 2, angle=-90, theta1=0,
theta2=360)] # Arc
ax.add_artist(arcartist[0])
arcartist[0].set_lw(3)
else:
angle = -arc['mu'].value / 1000 * 360
theta1 = -abs(arc['sigma'].value * tworoot2ln2) / 1000 * 360 / 2
theta2 = abs(arc['sigma'].value * tworoot2ln2) / 1000 * 360 / 2
arcartist = [
Arc(xy=cen, width=arc['q'].value * 2 * qratio, height=arc['q'].value * 2 * qratio, angle=angle,
theta1=theta1,
theta2=theta2)] # Arc
for artist in arcartist:
ax.add_artist(artist)
artist.set_lw(3)
# for i in range(1, np.size(x, 0)):
# plt.plot(y[i], x[i], color='g')
# plt.plot(yinf[i], xinf[i], color='r')
# plt.plot(ysup[i], xsup[i], color='r')
plt.show()
# popt, pcov = optimize.curve_fit(gaussian, np.arange(np.size(a)), np.nan_to_num(a))
# print("Scale = %.3f +/- %.3f" % (popt[0], np.sqrt(pcov[0, 0])))
#print("Offset = %.3f +/- %.3f" % (popt[1], np.sqrt(pcov[1, 1])))
#print("Sigma = %.3f +/- %.3f" % (popt[2], np.sqrt(pcov[2, 2])))
# print(vimodel.A)
# print vimodel.mu
# print vimodel.FWHM
# find arcs
# arcs = find_arcs(img, cen)
#draw arcs
#drawarcs(img,arcs)
|
101635
|
import numpy as np
import datetime
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
import sklearn.metrics
from ops import add_features, augment_data
from sklearn.model_selection import train_test_split
from capture_data import DataObserver
from sklearn.preprocessing import StandardScaler
import warnings
import time
import pickle
warnings.filterwarnings("ignore")
np.set_printoptions(suppress=True)
##CONFIG
root_path = './data/'
timestamp = datetime.datetime.now().isoformat().split('.')[0].replace(':', '_')
model_dir = './experiments/model-' + timestamp + '/'
# Parameters
vocabulary = 'PEAWSB'
n_classes = len(vocabulary)+1 # number of classes
data_scaler = StandardScaler()
n_features = 12
def prepare_data(augment_iter=0):
X = []
y = []
for i in range(n_classes):
if i == n_classes - 1:
char = 'None'
else:
char = vocabulary[i]
res_x = pickle.load(open(root_path + char + ".pkl", 'rb'))
res_y = np.tile(i, (len(res_x), 1)).tolist()
X += res_x
y += res_y
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42, stratify=y)
X_train, y_train = augment_data(X_train, y_train, iterations=augment_iter)
# add features and normalize data to 0 mean and unit variance
pen_up = []
for i in range(len(X_train)):
sequence = np.asarray(X_train[i])
pen_up.append(sequence[:, 2])
sequence = sequence[:, 0:2]
sequence = add_features(sequence)
X_train[i] = sequence
data_scaler.fit(np.vstack(X_train))
for i in range(len(X_train)):
sequence = np.asarray(X_train[i])
sequence = data_scaler.transform(sequence)
X_train[i] = np.column_stack((sequence, pen_up[i])).tolist()
for i in range(len(X_test)):
sequence = np.asarray(X_test[i])
pen_up = sequence[:, 2]
sequence = sequence[:, 0:2]
sequence = add_features(sequence)
sequence = data_scaler.transform(sequence)
X_test[i] = np.column_stack((sequence, pen_up)).tolist()
max_seqLen = max(len(max(X_train, key=len)), len(max(X_test, key=len)))
# Pad sequences for dimension consistency
padding_mask = np.zeros(n_features).tolist()
for i in range(len(X_train)):
X_train[i] += [padding_mask for _ in range(max_seqLen - len(X_train[i]))]
for i in range(len(X_test)):
X_test[i] += [padding_mask for _ in range(max_seqLen - len(X_test[i]))]
# flat sequence
X_train = np.asarray(X_train)
shape = np.shape(X_train)
X_train = np.reshape(X_train, (shape[0], shape[1] * shape[2]))
X_test = np.asarray(X_test)
shape = np.shape(X_test)
X_test = np.reshape(X_test, (shape[0], shape[1] * shape[2]))
return X_train, X_test, y_train, y_test
def train(model, X, y):
# Perform training
print("Start training..")
#model_training
model.fit(X, y)
#saver.save(sess, model_dir + 'model.cptk')
print("Training done, final model saved")
def test(model, X, y):
# prediction sample for every entry of test set
prediction = model.predict(X)
test_confusion_matrix = sklearn.metrics.confusion_matrix(y, prediction, labels=range(n_classes))
test_accuracy = np.sum(np.diagonal(test_confusion_matrix)) / np.sum(test_confusion_matrix)
print("Test Accuracy: ", test_accuracy)
print("Test Confusion Matrix:")
print(test_confusion_matrix)
if __name__ == '__main__':
X_train, X_test, y_train, y_test = prepare_data(augment_iter=4)
#model = RandomForestClassifier(n_estimators=20, max_depth=12) # Test Accuracy: 0.7238
#model = SVC() # linear: Test Accuracy: 0.7428, rbf: 0.7142
model = MLPClassifier(hidden_layer_sizes=(300,), learning_rate='adaptive', random_state=1) # Test Accuracy: 0.8
# train models
train(model, X_train, y_train)
# evaluate models
test(model, X_test, y_test)
|
101657
|
import collections
import warnings
import jax
import jax.numpy as np
FixedPointSolution = collections.namedtuple(
"FixedPointSolution",
"value converged iterations previous_value"
)
def unrolled(i, init_x, func, num_iter, return_last_two=False):
"""Repeatedly apply a function using a regular python loop.
Args:
i (int): the current iteration count.
init_x: The initial values fed to `func`.
func (callable): The function which is repeatedly called.
num_iter (int): The number of times to apply the function `func`.
return_last_two (bool, optional): When `True`, return the two last
outputs of `func`.
Returns:
The last output of `func`, or, if `return_last_two` is `True`, a tuple
with the last two outputs of `func`.
"""
x = init_x
x_old = None
for _ in range(num_iter):
x_old = x
x = func(x_old)
i = i + 1
if return_last_two:
return i, x, x_old
else:
return i, x
def fixed_point_iteration(init_x, func, convergence_test, max_iter,
batched_iter_size=1, unroll=False):
"""Find a fixed point of `func` by repeatedly applying `func`.
Use this function to find a fixed point of `func` by repeatedly applying
`func` to a candidate solution. This is done until the solution converges
or until the maximum number of iterations, `max_iter` is reached.
NOTE: if the maximum number of iterations is reached, the convergence
will not be checked on the final application of `func` and the solution
will always be marked as not converged when `unroll` is `False`.
Args:
init_x: The initial values to be used in `func`.
func (callable): The function for which we want to find a fixed point.
`func` should be of type `a -> a` where `a` is the type of
`init_x`.
convergence_test (callable): A two argument function of type
`(a, a) -> bool` that takes in the newest solution and the previous
solution and returns `True` if they have converged. The fixed point
iteration will stop and return when `True` is returned.
max_iter (int or None): The maximum number of iterations.
batched_iter_size (int, optional): The number of iterations to be
unrolled and executed per iterations of `while_loop` op. Convergence
is only tested at the beginning of each batch. Set this to a number
larger than 1 to reduce the number of times convergence is checked
and to potentially allow for the graph of the unrolled batch to be
more aggressively optimized.
unroll (bool): If True, use `jax.lax.scan` instead of
`jax.lax.while`. This enables back-propagating through the iterations.
NOTE: due to current limitations in `JAX`, when `unroll` is `True`,
convergence is ignored and the loop always runs for the maximum
number of iterations.
Returns:
FixedPointSolution: A named tuple containing the results of the
fixed point iteration. The tuple contains the attributes `value`
(the final solution), `converged` (a bool indicating whether
convergence was achieved), `iterations` (the number of iterations
used), and `previous_value` (the value of the solution on the
previous iteration). The previous value satisfies
`sol.value=func(sol.previous_value)` and allows us to log the size
of the last step if desired.
"""
if batched_iter_size < 1:
raise ValueError(
"Argument `batch_iter_size` must be greater than zero.")
if max_iter is not None and batched_iter_size > max_iter:
raise ValueError((
"Argument `batched_iter_size` must be smaller or equal to "
"`max_iter`."))
if max_iter is not None and max_iter % batched_iter_size != 0:
warnings.warn((
"Argument `batched_iter_size` should be a multiple of `max_iter` "
"to guarantee that no more than `max_iter` iterations are used."))
max_batched_iter = None
if max_iter is not None:
max_batched_iter = max_iter // batched_iter_size
def cond(args):
i, x_new, x_old = args
converged = convergence_test(x_new, x_old)
if max_iter is not None:
converged = converged | (max_iter <= i)
return np.logical_not(converged)
def body(args):
i, x_new, _ = args
i_new, x_new, x_old = unrolled(i, x_new, func, batched_iter_size,
return_last_two=True)
return i_new, x_new, x_old
init_vals = unrolled(0, init_x, func, batched_iter_size,
return_last_two=True)
if unroll:
if max_batched_iter is None:
raise ValueError("`max_iter` must be not None when using `unroll`.")
def scan_step(args, idx):
del idx
return body(args), None
if max_batched_iter < 2:
iterations, sol, prev_sol = init_vals
else:
(iterations, sol, prev_sol), _ = jax.lax.scan(
f=scan_step,
init=init_vals,
xs=np.arange(max_batched_iter - 1),
)
converged = convergence_test(sol, prev_sol)
else:
iterations, sol, prev_sol = jax.lax.while_loop(
cond,
body,
init_vals,
)
converged = max_iter is None or iterations < max_iter
return FixedPointSolution(
value=sol,
converged=converged,
iterations=iterations,
previous_value=prev_sol,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.